blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f408aa8f4c17d9145482c1dcf6a069551a33f26 | 1d8f9448d80796df89cb903e01ba99fb7645bf9a | /BloodBank/Website/forms.py | 6b95761d83ded1d0a0539495c5c899abfd16283a | [] | no_license | bhusan-thapa/BloodBank | 94820361827d12d10f391d4d3e72a951c7a062a9 | b51485a857cad5798ad20f6a6be4187d11c4ba4a | refs/heads/master | 2020-07-10T14:31:46.156633 | 2013-02-28T05:02:43 | 2013-02-28T05:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,167 | py | from django import forms
from BloodBank import settings
import datetime
from datetime import timedelta
import re
from models import RegisteredUsers
from models import Feedback
#from recaptcha.client import captcha
class ContactForm(forms.Form):
con_name=forms.CharField(30,3)
con_emailid=forms.EmailField(required=False)
#con_mobile = forms.CharField(required=False)
con_mobile=forms.IntegerField(required=False)
con_text=forms.CharField(300,3)
def clean(self):
cleaned_data = super(ContactForm, self).clean()
con_name=cleaned_data.get("con_name")
con_emailid=cleaned_data.get("con_emailid")
con_mobile=cleaned_data.get("con_mobile")
con_text=cleaned_data.get("con_text")
try:
if re.match(r'^[A-Za-z. ]*$', con_name) is None:
msg = u"Invalid Characters in Name"
self._errors["con_name"] = self.error_class([msg])
except:
pass
try:
if con_mobile != None and (con_mobile < 7000000000 or con_mobile > 9999999999):
msg = u"Invalid Mobile Number"
self._errors["con_mobile"] = self.error_class([msg])
except:
pass
return cleaned_data
class ProfileForm(forms.Form):
prof_oldemail=forms.EmailField()
prof_name = forms.CharField(30,3)
prof_emailid = forms.EmailField()
prof_mobile = forms.IntegerField()
prof_dolbd = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS,required=False)
prof_city = forms.CharField(20,3)
def clean(self):
cleaned_data = super(ProfileForm, self).clean()
prof_emailid = cleaned_data.get("prof_emailid")
prof_oldemail = cleaned_data.get("prof_oldemail")
prof_dob = cleaned_data.get("prof_dob")
prof_dolbd = cleaned_data.get("prof_dolbd")
today = datetime.date.today()
prof_mobile = cleaned_data.get("prof_mobile")
prof_name = cleaned_data.get("prof_name")
#18 yrs minimum to donate blood
year = timedelta(days=365*18)
try:
if prof_dob > today :
msg = u"Must be 1 day or Older "
self._errors["prof_dob"] = self.error_class([msg])
except:
pass
try:
if prof_dolbd !=None and prof_dolbd > today :
msg = u"Enter a Resonable Date "
self._errors["prof_dolbd"] = self.error_class([msg])
else:
self._errors["prof_dolbd"]
except:
pass
try:
if prof_mobile < 7000000000 or prof_mobile > 9999999999:
msg = u"Invalid Mobile number "
self._errors["prof_mobile"] = self.error_class([msg])
except:
pass
try:
if re.match(r'^[A-Za-z. ]*$', prof_name) is None:
msg = u"Invalid Characters in Name "
self._errors["prof_name"] = self.error_class([msg])
except:
pass
#checking if email id is already registerd or not
try:
if prof_emailid != prof_oldemail :
user = RegisteredUsers.objects.filter(email=prof_emailid)
if len(user) > 0 :
msg = u"Email ID already registerd "
self._errors["prof_emailid"] = self.error_class([msg])
except:
pass
# Always return the full collection of cleaned data.
return cleaned_data
class PreregisterForm(forms.Form):
pre_reg_name=forms.CharField(30,3)
pre_reg_emailid = forms.EmailField()
pre_reg_pswd = forms.CharField(None,5)
pre_reg_cnfpswd= forms.CharField(None,5)
pre_reg_bloodgroup = forms.CharField(4,2)
pre_reg_mobile = forms.IntegerField()
pre_reg_hidemob = forms.CharField(required=False)
pre_reg_sex = forms.CharField(6,4)
pre_reg_dob = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS)
pre_reg_dolbd = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS,required=False)
pre_reg_city = forms.CharField(20,3)
def clean(self):
cleaned_data = super(PreregisterForm, self).clean()
pre_reg_emailid = cleaned_data.get("pre_reg_emailid")
pre_reg_pswd = cleaned_data.get("pre_reg_pswd")
pre_reg_cnfpswd = cleaned_data.get("pre_reg_cnfpswd")
pre_reg_dob = cleaned_data.get("pre_reg_dob")
pre_reg_dolbd = cleaned_data.get("pre_reg_dolbd")
today = datetime.date.today()
pre_reg_mobile = cleaned_data.get("pre_reg_mobile")
pre_reg_name = cleaned_data.get("pre_reg_name")
#18 yrs minimum to donate blood
year = timedelta(days=365*18)
try:
if pre_reg_dob > today :
msg = u"Must be 1 day or Older "
self._errors["pre_reg_dob"] = self.error_class([msg])
except:
pass
try:
if pre_reg_dolbd != None and (pre_reg_dolbd > today or pre_reg_dolbd < pre_reg_dob + year):
msg = u"Enter a Resonable Date "
self._errors["pre_reg_dolbd"] = self.error_class([msg])
except:
pass
try:
if pre_reg_mobile < 7000000000 or pre_reg_mobile > 9999999999:
msg = u"Invalid Mobile number "
self._errors["pre_reg_mobile"] = self.error_class([msg])
except:
pass
try:
if re.match(r'^[a-zA-Z. ]*$', pre_reg_name) is None:
msg = u"Invalid Characters in Name "
self._errors["pre_reg_name"] = self.error_class([msg])
except:
pass
#checking if email id is already registerd or not
try:
user = RegisteredUsers.objects.filter(email=pre_reg_emailid)
if len(user) > 0 :
msg = u"Email ID already registerd "
self._errors["pre_reg_emailid"] = self.error_class([msg])
except:
pass
if pre_reg_pswd != pre_reg_cnfpswd:
# We know these are not in self._errors now (see discussion
# below).
msg = u"Password Doesnt match"
self._errors["pre_reg_pswd"] = self.error_class([msg])
self._errors["pre_reg_cnfpswd"] = self.error_class([msg])
# These fields are no longer valid. Remove them from the
# cleaned data.
try:
del cleaned_data["pre_reg_pswd"]
del cleaned_data["pre_reg_cnfpswd"]
except:
pass
# Always return the full collection of cleaned data.
return cleaned_data
class RegisterForm(forms.Form):
reg_name=forms.CharField(30,3)
reg_emailid = forms.EmailField()
reg_pswd = forms.CharField(None,5)
reg_cnfpswd= forms.CharField(None,5)
reg_bloodgroup = forms.CharField(4,2)
reg_mobile = forms.IntegerField()
reg_hidemob = forms.CharField(required=False)
reg_sex = forms.CharField(6,4)
reg_dob = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS)
reg_dolbd = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS,required=False)
reg_city = forms.CharField(20,3)
reg_location = forms.CharField(30,3)
def clean(self):
cleaned_data = super(RegisterForm, self).clean()
reg_emailid = cleaned_data.get("reg_emailid")
pswd = cleaned_data.get("reg_pswd")
cnfpswd = cleaned_data.get("reg_cnfpswd")
reg_dob = cleaned_data.get("reg_dob")
reg_dolbd = cleaned_data.get("reg_dolbd")
today = datetime.date.today()
reg_mobile = cleaned_data.get("reg_mobile")
reg_name = cleaned_data.get("reg_name")
#18 yrs minimum to donate blood
year = timedelta(days=365*18)
try:
if reg_dob > today :
msg = u"Must be 1 day or Older "
self._errors["reg_dob"] = self.error_class([msg])
except:
pass
try:
if reg_dolbd != None and (reg_dolbd > today or reg_dolbd < reg_dob + year):
msg = u"Enter a Resonable Date "
self._errors["reg_dolbd"] = self.error_class([msg])
except:
pass
try:
if reg_mobile < 7000000000 or reg_mobile > 9999999999:
msg = u"Invalid Mobile number "
self._errors["reg_mobile"] = self.error_class([msg])
except:
pass
try:
if re.match(r'^[a-zA-Z. ]*$', reg_name) is None:
msg = u"Invalid Characters in Name "
self._errors["reg_name"] = self.error_class([msg])
except:
pass
#checking if email id is already registerd or not
try:
user = RegisteredUsers.objects.filter(email=reg_emailid)
if len(user) > 0 :
msg = u"Email ID already registerd "
self._errors["reg_emailid"] = self.error_class([msg])
except:
pass
if pswd != cnfpswd:
# We know these are not in self._errors now (see discussion
# below).
msg = u"Password Doesnt match"
self._errors["reg_pswd"] = self.error_class([msg])
self._errors["reg_cnfpswd"] = self.error_class([msg])
# These fields are no longer valid. Remove them from the
# cleaned data.
try:
del cleaned_data["reg_pswd"]
del cleaned_data["reg_cnfpswd"]
except:
pass
# Always return the full collection of cleaned data.
return cleaned_data
class LoginForm(forms.Form):
log_emailid= forms.EmailField()
log_pswd = forms.CharField(30,5)
def clean(self):
cleaned_data = super(LoginForm, self).clean()
try:
log_emailid = cleaned_data["log_emailid"]
log_pswd = cleaned_data["log_pswd"]
user =RegisteredUsers.objects.get(email=log_emailid,pswd=log_pswd)
except:
msg =u"Invalid User Name or Password "
self._errors["log_pswd"] = self.error_class([msg])
return cleaned_data
class SearchForm(forms.Form):
srch_bloodgroup = forms.CharField(6,3)
srch_city = forms.CharField(20,3)
reg_location = forms.CharField(30,3)
class PasswordForm(forms.Form):
emailid = forms.EmailField()
old_pswd=forms.CharField(30,5)
new_pswd=forms.CharField(30,5)
cnf_new_pswd = forms.CharField(30,5)
def clean(self):
cleaned_data = super(PasswordForm, self).clean()
emailid=cleaned_data["emailid"]
old_pswd = ""
try:
old_pswd = cleaned_data["old_pswd"]
except:
old_pswd = None
msg =u"Invalid Password "
self._errors["old_pswd"] = self.error_class([msg])
return cleaned_data
new_pswd = ""
cnf_new_pswd =""
try:
new_pswd = cleaned_data["new_pswd"]
cnf_new_pswd = cleaned_data["cnf_new_pswd"]
except:
msg =u"Invalid Password or Empty"
self._errors["cnf_new_pswd"] = self.error_class([msg])
#self._errors["cnf_new_pswd"] = "Invalid Password or Empty"
return cleaned_data
try:
users = RegisteredUsers.objects.filter(email=emailid,pswd=old_pswd)
if len(users) == 0:
msg =u"Wrong Password"
self._errors["old_pswd"] = self.error_class([msg])
if new_pswd != cnf_new_pswd :
msg =u"Passwords do not match"
self._errors["cnf_new_pswd"] = self.error_class([msg])
except:
msg =u"Unknown Error occured,Try after some time"
self._errors["cnf_new_pswd"] = self.error_class([msg])
return cleaned_data
class ForgotPassword(forms.Form):
frgt_email = forms.EmailField()
| [
"uselessfelow@gmail.com"
] | uselessfelow@gmail.com |
fbf476c031c972f14fd9d161604a65b456f6cdb0 | f0cda71d45fa653791002e780b8ba6c4836f5705 | /proc_center/settings.py | afb0ca8aca5a267c79056e48d63924f1f236c162 | [] | no_license | jkoen/poc_request | ff4e6105b57e4716a5168f5ff8dfb0b72d8fb6e0 | 9a013dccd0ee6cb8acabb638eca0dca1c85e1f05 | refs/heads/master | 2021-01-19T04:32:46.486897 | 2016-08-01T08:04:50 | 2016-08-01T08:04:50 | 64,491,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,810 | py | """
Django settings for proc_center project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gnt1gxz(v89&3uk!cpw#$mt(75n5k0&*340xmhloz30e$dt989'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
EMAIL_HOST = '10.74.74.99' #'mail.hds.com'
EMAIL_PORT = 25
# EMAIL_HOST_USER = "POC.Request.EMEA@hds.com"
# EMAIL_HOST_PASSWORD = "password"
# EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'poc_request',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'proc_center.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'proc_center.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_env", "static_root")
#STATIC_ROOT = '/u01/poc_request/static_env/static_root/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static_pro", "static_app"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_env", "media_root")
ADMINS = (
('Johnny Koen', 'johnny.koen@hds.com'),
)
| [
"johnny.koen@gmail.com"
] | johnny.koen@gmail.com |
28f151b9a98a0578c9f221e0258ebc19e63eeae4 | 454894e9962b4194f449b32923ab774e925be8b4 | /blog/models.py | 83597a52f05a26a7716888a664bc6f64cc123c0b | [] | no_license | alexandrinekouame/blogdjango | 2d96eef1328e5fa4742ed69ffceca0fca8dc7950 | f032ac34ca0b322d067b2f549b43e73e1188a90b | refs/heads/master | 2020-03-20T01:31:46.333090 | 2018-06-12T14:05:17 | 2018-06-12T14:05:17 | 137,079,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model): #cette classe est un modele django
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
Created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
def __str(self):
return self.title
| [
"alexandrinekouame091@gmail.com"
] | alexandrinekouame091@gmail.com |
a21649ef125044c994a18467e0d481af42158f2f | 860b903e04ba83aa4cb391b43a8bbe290773fd8e | /floodSite/myTodo/views.py | 69c1e20c5b28db0feb24a3dd4292c16867d92c75 | [] | no_license | Archibald547/flood | 4ef809960617d480da411a8797cc176209c0f4df | 1046262c0e7cb3ef181aaa3bce4dfde195cf099c | refs/heads/master | 2022-12-12T17:42:15.452221 | 2017-11-03T03:14:46 | 2017-11-03T03:14:46 | 127,584,723 | 0 | 0 | null | 2022-12-08T00:39:53 | 2018-04-01T01:09:01 | CSS | UTF-8 | Python | false | false | 2,110 | py | from .models import todo
from django.shortcuts import render_to_response, render
from .add_task import PostForm
from django.shortcuts import redirect
from django.utils import timezone
from django.shortcuts import get_object_or_404
from schedule.models import Event, EventRelation, Calendar
import datetime
from home.models import MyProfile
def index(request):
user_list = todo.objects.filter(username=request.user.username)
items = user_list.filter(completed=False)
#return render(request,'todo.html')
return render(request,'todo.html', {'items': items})
def completed(request, pk):
post = todo.objects.get(pk=pk)
post.completed = True
post.save()
acc = MyProfile.objects.get(user=request.user)
acc.exp += 1
acc.save()
user_list = todo.objects.filter(username=request.user.username)
items = user_list.filter(completed=False)
return render(request, 'todo.html', {'items': items})
def delete_task(request, pk):
task = todo.objects.get(pk=pk)
task.delete()
user_list = todo.objects.filter(username=request.user.username)
items = user_list.filter(completed=False)
return render(request, 'todo.html', {'items': items})
def post_task(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.username = request.user.username
post.published_date = timezone.now()
post.save()
return redirect('todo_view')
else:
form = PostForm()
return render(request, 'add_task.html', {'form': form})
def edit_task(request, pk):
post = get_object_or_404(todo, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('todo_view')
else:
form = PostForm(instance=post)
return render(request, 'add_task.html', {'form': form})
| [
"Jessica Huang@localhost"
] | Jessica Huang@localhost |
55d35bcef573fd000e0f5e0421a8c339963d3ea0 | f6af12ec68ec60903ea9ff414abab599e64f1aaf | /production/migrations/0001_initial.py | 0a44c5d26db00458bb2d96181f177ee9e46cff85 | [] | no_license | reginaldbadua/Final-Project | d649cbe129b884cff226dcd95e09866aee1d8c25 | 265f96575ea621974510957e2e60bed973472fdd | refs/heads/master | 2021-06-18T08:48:55.263513 | 2019-08-01T18:39:27 | 2019-08-01T18:39:27 | 200,101,894 | 0 | 0 | null | 2021-06-10T21:58:24 | 2019-08-01T18:37:06 | Python | UTF-8 | Python | false | false | 1,145 | py | # Generated by Django 2.1 on 2019-07-13 16:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('abv', models.IntegerField()),
('ibu', models.IntegerField()),
('srm', models.IntegerField()),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=100)),
('on_hand_qty', models.IntegerField()),
('price', models.FloatField()),
('detail', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='production.Detail')),
],
),
]
| [
"noreply@github.com"
] | reginaldbadua.noreply@github.com |
04651aedbb6826947d6f7ec17cb473d751090f82 | 59d1830314e5532ad48ad69bf7bbc0d035348bc3 | /my_decorator1.py | b1fbe8ea4c520309257cf26a2cdfb17fdbeb1473 | [
"MIT"
] | permissive | mosinu/classwork | 2decec21719a857b5a16fcbb4b8f3de5d10362e4 | ba80d3687ec9ddd1346ce5b20adf6242875dfacb | refs/heads/master | 2020-11-24T06:07:28.139996 | 2019-12-16T04:37:15 | 2019-12-16T04:37:15 | 227,999,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | def my_decorator(func):
def wrap_func():
func()
return wrap_func
@my_decorator
def hello():
#print('hello baby duck')
print('''
Hello Baby Duck!
..---..
.' _ `.
__..' (o) :
`..__ ;
`. /
; `..---...___
.' `~-. .-')
. ' _.'
: :
\ '
+ J
`._ _.'
`~--....___...---~' mh
''')
hello()
| [
"noreply@github.com"
] | mosinu.noreply@github.com |
2c534b64d88e6eb04e1f183146bfcce2ddaf0c8e | 360274d10ee6e153c8e79350878497c7928dbd05 | /Recursion/coin_change.py | fb0934a387bccf66859af63c71bd95944ae62c20 | [] | no_license | Rishabh-Jain21/Data-Structures | 09fd347a54efae83ea16c53ce767d6f592b48751 | 444e738695b46d4df585bf9955a8080a6d33c71d | refs/heads/master | 2023-06-22T12:25:44.405070 | 2023-05-28T08:22:22 | 2023-05-28T08:22:22 | 288,493,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | def rec_coin(target, coins):
# default value set to target
min_coins = target
# check if target in value list
if target in coins:
return 1
else:
# for every coin value less than or equal to my target
for i in [c for c in coins if c <= target]:
# add a coin count + recurive call
num_coins = 1+rec_coin(target-i, coins)
if(num_coins) < min_coins:
min_coins = num_coins
return min_coins
def rec_coin_dynamic(target, coins, known_result):
# default output to target
min_coins = target
# Base case
if target in coins:
known_result[target] = 1
return 1
# return a known result if it happens to greater than 1
elif known_result[target] > 0:
return known_result[target]
else:
for i in [c for c in coins if c <= target]:
num_coins = 1+rec_coin_dynamic(target-i, coins, known_result)
if num_coins < min_coins:
min_coins = num_coins
known_result[target] = min_coins
return min_coins
#print(rec_coin(63, [1, 5, 10, 25]))
target = 63
coins = [1, 5, 10, 25]
known_result = [0]*(target+1)
print(rec_coin_dynamic(target, coins, known_result))
| [
"Rishabh9843@gmail.com"
] | Rishabh9843@gmail.com |
bc72c18944a978fc7e42bb86f4bfecb958283d94 | 71f9b644143312fd73f035356141ce037c3bcbd3 | /utils/c_data_loader.py | bd89bed740bd2299646663bacfcc39f57e0d305e | [] | no_license | lukas-jkl/sparsenn-lmc-robustness | 6b9dea1d6ccdcf0da1cab8afa367b0d78ff6abce | 3ffffe5bd762f8df82cb18c01852bbc46850d80c | refs/heads/master | 2023-09-06T04:49:05.137120 | 2021-11-25T16:32:57 | 2021-11-25T16:32:57 | 421,472,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,340 | py | import os
import wget
import tarfile
import zipfile
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
import PIL
cifar_c_corruptions = {
"brightness": "brightness.npy",
"contrast": "contrast.npy",
"defocus_blur": "defocus_blur.npy",
"elastic_transform": "elastic_transform.npy",
"fog": "fog.npy",
"frost": "frost.npy",
"gaussian_blur": "gaussian_blur.npy",
"gaussian_noise": "gaussian_noise.npy",
"glass_blur": "glass_blur.npy",
"impulse_noise": "impulse_noise.npy",
"jpeg_compression": "jpeg_compression.npy",
"labels": "labels.npy",
"motion_blur": "motion_blur.npy",
"pixelate": "pixelate.npy",
"saturate": "saturate.npy",
"shot_noise": "shot_noise.npy",
"snow": "snow.npy",
"spatter": "spatter.npy",
"speckle_noise": "speckle_noise.npy",
"zoom_blur": "zoom_blur.npy"
}
mnist_c_corruptions = {
"brightness": "brightness",
"canny_edges": "canny_edges",
"dotted_line": "dotted_line",
"fog": "fog",
"glass_blur": "glass_blur",
"identity": "identity",
"impulse_noise": "impulse_noise",
"motion_blur": "motion_blur",
"rotate": "rotate",
"scale": "scale",
"shear": "shear",
"shot_noise": "shot_noise",
"spatter": "spatter",
"stripe": "stripe",
"translate": "translate",
"zigzag": "zigzag"
}
def _download_and_unpack_dataset(datadir, file, url, unpackfolder):
outfile = os.path.join(datadir, file)
if not os.path.isfile(outfile):
wget.download(url, out=outfile)
if not os.path.isdir(os.path.join(datadir, unpackfolder)):
extension = os.path.splitext(file)[1]
if "tar" in extension:
with tarfile.open(outfile) as tar:
for member in tqdm(iterable=tar.getmembers(), total=len(tar.getmembers()), desc="Unpacking - "):
tar.extract(member, path=datadir)
elif "zip" in extension:
with zipfile.ZipFile(outfile) as zip:
zip.extractall(path=datadir)
else:
raise Exception("Can't unpack file ", file)
class CIFAR10C(Dataset):
def __init__(self, corruption, datadir="datasets", severity=2, transform=None, target_transform=None):
unpackfolder = "CIFAR-10-C"
_download_and_unpack_dataset(datadir, "CIFAR-10-C.tar",
"https://zenodo.org/record/2535967/files/CIFAR-10-C.tar", unpackfolder)
datafile = os.path.join(datadir, unpackfolder, cifar_c_corruptions[corruption])
self.data = np.load(datafile)[(severity-1)*10000:severity*10000]
labelsfile = os.path.join(datadir, unpackfolder, "labels.npy")
self.labels = torch.tensor(np.load(labelsfile)[(severity-1)*10000:severity*10000], dtype=torch.long)
if transform is None:
normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616])
transform = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), normalize])
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
image = self.data[idx]
image = PIL.Image.fromarray(image)
label = self.labels[idx]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
class CIFAR100C(Dataset):
def __init__(self, corruption, datadir="datasets", severity=2, transform=None, target_transform=None):
unpackfolder = "CIFAR-100-C"
_download_and_unpack_dataset(datadir, "CIFAR-100-C.tar",
"https://zenodo.org/record/3555552/files/CIFAR-100-C.tar", unpackfolder)
datafile = os.path.join(datadir, unpackfolder, cifar_c_corruptions[corruption])
self.data = np.load(datafile)[(severity-1)*10000:severity*10000]
labelsfile = os.path.join(datadir, unpackfolder, "labels.npy")
self.labels = torch.tensor(np.load(labelsfile)[(severity-1)*10000:severity*10000], dtype=torch.long)
if transform is None:
normalize = transforms.Normalize(mean=[0.5071, 0.4865, 0.4409], std=[0.2673, 0.2564, 0.2762])
transform = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), normalize])
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
image = self.data[idx]
image = PIL.Image.fromarray(image)
label = self.labels[idx]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
class MNISTC(Dataset):
def __init__(self, corruption, datadir="datasets", severity=1, transform=None, target_transform=None):
if severity != 1:
raise Exception("MNISTC does only have severity 1")
unpackfolder = "mnist_c"
_download_and_unpack_dataset(datadir, "mnist_c.zip",
"https://zenodo.org/record/3239543/files/mnist_c.zip", unpackfolder)
datafile = os.path.join(datadir, unpackfolder, mnist_c_corruptions[corruption], "test_images.npy")
self.data = np.load(datafile)
labelfile = os.path.join(datadir, unpackfolder, mnist_c_corruptions[corruption], "test_labels.npy")
self.labels = torch.tensor(np.load(labelfile), dtype=torch.long)
if transform is None:
normalize = transforms.Normalize(mean=[0.1307], std=[0.3081])
transform = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), normalize])
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
image = self.data[idx][:, :, 0]
image = PIL.Image.fromarray(image)
label = self.labels[idx]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
| [
"lukas.timpl@student.tugraz.at"
] | lukas.timpl@student.tugraz.at |
34216650f80861c61c0d6aafd8fd5cb4209fb743 | afe271a34f1caf88fc980ddd38f48891d3bce32e | /parseargs.py | 7d7c4b5e86cc7c935e7fbac1ad230d9f2033797b | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | bpsuntrup/pyutils | c6381fbb224d71679d09c2f4d71b15962ab427c8 | 6e01fb4bf21d122e225a43e809747586919afadc | refs/heads/master | 2020-03-21T21:52:05.360480 | 2018-08-06T11:55:01 | 2018-08-06T11:55:01 | 139,089,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | '''
pass a dictionary of single letter switches to multi-letter switch names.
This parses arguments
Right now it doesn't validate arguments belong to any specific set
right now it doesn't do anything
I'm just bored
'''
import sys
print(sys.argv)
class ArgError(Exception):
pass
''' types of switches:
on/off
-a
with parameter
-a hi
with several parameters
-a hi there
preceding args
blah
trailing args
'''
switch_dict = {
'a': 'apple',
'g': 'great',
'd': 'day',
}
def parseargs(switch_dict):
pa = []
ta = []
preceding_args = True
expect_trailing_arg = False
expecting_parameter = False
current_switch = None
switches = {}
for arg in sys.argv[1:]:
if arg[0] == '-':
expecting_parameter = False
expecting_trailing_arg = False
preceding_args = False
if current_switch is not None:
if current_switch not in switches.keys():
switches[current_switch] = []
current_switch = None
if len(arg) == 1:
raise ArgError('- is not a valid arg')
elif arg[1] == '-':
print 'longswitch: {}'.format(arg)
expecting_parameter = True
current_switch = arg[2:]
expect_trailing_arg = False
else:
print 'switch: {}'.format(arg)
if len(arg) == 2:
expecting_parameter = True
current_switch = switch_dict[arg[1]]
expect_trailing_arg = False
else:
for char in arg[1:]:
switches[switch_dict[char]] = []
expect_trailing_arg = True
current_switch = None
else:
if expecting_parameter:
print 'parameter: {}'.format(arg)
if current_switch in switches.keys():
switches[current_switch].append(arg)
else:
switches[current_switch] = [arg,]
elif preceding_args:
print 'preceding arg: {}'.format(arg)
pa.append(arg)
else:
print 'trailing arg: {}'.format(arg)
ta.append(arg)
return pa, switches, ta
if __name__ == '__main__':
print parseargs(switch_dict)
# try python parseargs.py hi there i am -ag --day hi again and again -ag hi there
| [
"benjamin.suntrup@garmin.com"
] | benjamin.suntrup@garmin.com |
b2e41f816bb98e9ca6fb1c5f42be9d73a6d44af7 | 01b3a6b5ad2989ac34d14a30b62f4dbfb5d2bbb7 | /hw_modules/accounting/application/salary.py | 1840b939c88e752c056eab499d3f084ca7ed295d | [] | no_license | ashfrol/netology_hw | 7b34c3c877889be47533c104a4a2009c68df827f | dff2d9fe725c86e42346f31d69125d18bb77a933 | refs/heads/master | 2022-12-31T07:57:01.250315 | 2020-10-11T15:55:38 | 2020-10-11T15:55:38 | 291,321,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | def calculate_salary(person):
print(f'Calculate salary of {person}') | [
"darya.frolova@rt.ru"
] | darya.frolova@rt.ru |
97896f056e2d9e43f2b34416ee879e413f4b285e | 99baa1da516d9a5ef7cc3bf224022b919d2ab8c1 | /test/retry_t.py | 3e1873b9cacdb8a355218078e4eb3f4db150e6a9 | [] | no_license | AYiXi/SourceCode | f6597e7134cbe9fbc0cd778581feff8d976329ff | 733eb900b4ae740df1f86b72fb6d9bcca10b07b9 | refs/heads/master | 2022-03-08T23:09:33.799628 | 2022-03-04T01:37:51 | 2022-03-04T01:37:51 | 248,709,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import types
from tenacity import retry, TryAgain
import sys
print(sys.stdin)
@retry
def f():
print(__name__)
raise TryAgain('try again')
a = lambda i: i + 1
print(isinstance(f, types.FunctionType))
print(isinstance(print, types.BuiltinMethodType))
if __name__ == "__main__":
pass
# f()
| [
"821346679@qq.com"
] | 821346679@qq.com |
8e196523624782ec54dc56a9223e4f118282172a | 5b613cc64c743b75068e1f3290ef75d3c7f657b8 | /last_update_checker/shared/writers.py | a50edacfe3e706d17418c60ddc2a229d1f0209cc | [] | no_license | apollowesley/chunz | 7ed3eaa2aeb31e8bf82d4696cabf346e105c3fbf | 32674296ae711e21ed1ca5a81c19ba38d8fa6382 | refs/heads/master | 2022-03-16T21:37:30.360448 | 2019-08-23T03:22:04 | 2019-08-23T03:22:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | import os
from shared.constants import FILE_DIR
def write_error(directory=FILE_DIR, error='', exception=None):
errormsg = '{}_{}\r\n'.format(error, '' if exception is None else str(exception))
with open(os.path.join(directory, 'error.txt'), 'a') as f:
f.write(errormsg)
def write_debug(directory=FILE_DIR, msg='', exception=None):
debugmsg = '{}_{}\r\n'.format(msg, '' if exception is None else str(exception))
with open(os.path.join(directory, 'debug.txt'), 'a') as f:
f.write(debugmsg)
def write_details(directory=FILE_DIR, title='', url=''):
with open(os.path.join(directory, 'details.txt'), 'a') as f:
f.write(title + '\r\n')
f.write(url + '\r\n')
| [
"chunyuan90@gmail.com"
] | chunyuan90@gmail.com |
f02787a273b490951056524eb3ad1f28ce42171b | ce0a3ebb242fb18bb48bad68b1866afd9a7988cb | /chap09 Recursion and Dynamic Programming/9_08.py | cdc7511b833dd34249b1e021c0045b7007046717 | [] | no_license | libra202ma/cc150Python | b6c4fff9f41aa8b3f4b8f618c855c25570039e30 | e1e6be69cfb3974dfac9b0a3d27115924b78e393 | refs/heads/master | 2020-06-01T11:29:24.448832 | 2015-06-22T18:04:59 | 2015-06-22T18:04:59 | 37,871,587 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | """
Given an infinite number of quarters (25 cents), dimes (10 cents),
nickels (5 cents) and pennies (1 cent), write code to calculate the
number of ways of representing n cents.
- brute-force. Using for-loops to check if the sum of all possible
combinations equals to n.
- recursion. The base case is that representing n cents using only 1
cents. Then ways of representing of n cents using 1 and 5 cents = ways
of using at least one 5 cents + ways of using only 1 cents to
represent left parts.
<http://www.cs.ucf.edu/~dmarino/ucf/cop3503/lectures/DynProgChange.doc>
- dynamic programming. The key point is that the number of ways to make change for n cents using denomination d can be split up into counting two groups:
1. the number of ways to make changes for n cents using denominations LESS than d
2. the number of ways to make changes for n cents using at least ONE
coin of denomination d.
"""
def countways(n, coins=[1, 5, 10, 25]):
if coins == [1]:
return 1
nways = 0
for i in range(n/coins[-1] + 1):
nways += countways(n - i * coins[-1], coins[:-1])
return nways
def test_countways():
assert countways(1) == 1
assert countways(5) == 2
assert countways(6) == 2
assert countways(10) == 4
assert countways(15) == 6
# dynamic programming, bottom up
def countwaysDP(n):
waystable = [[], [], [], []]
denoms = [1, 5, 10, 25]
for cents in range(1, n+1):
for denomidx, denom in enumerate(denoms):
if denom == 1:
waystable[denomidx].append(1)
elif cents - denom >= 0:
waystable[denomidx].append(waystable[denomidx - 1][cents - 1] + waystable[denomidx][cents - 1 - denom])
else:
waystable[denomidx].append(waystable[denomidx - 1][cents - 1])
return waystable[-1][-1]
def test_countwaysDP():
assert countwaysDP(1) == 1
assert countwaysDP(15) == 6
# dynamic programming, top down
def countwaysDP2(n, maps={}, coins = [1, 5, 10, 25]):
# maps has keys (n, coin), which keep tracks of number of ways to
# represent n cents using largest coins equals to coin
if coins == [1]:
return 1
if (n, coins[-1]) in maps:
return maps[(n, coins[-1])]
nways = 0
for i in range(n/coins[-1] + 1):
nways += countwaysDP2(n - i * coins[-1], maps, coins[:-1])
maps[(n, coins[-1])] = nways
return nways
def test_countwaysDP2():
assert countwaysDP2(15) == 6
| [
"libra202ma@gmail.com"
] | libra202ma@gmail.com |
c0cdd8c93382c5984112075490371358e76e49b4 | 97a692612e0242279e63aeb5bf805fee0d3a1be9 | /JUNIPR_anders/JUNIPR-charge.py | 3fa4a7ea82a1b640d86fcd9e9592de70737abeb3 | [] | no_license | ABHIDEV006/jet-nn | cd992441cbe92567ee35edc1590d3f1c2a0efb62 | dae7a7e2e7d1905e60349b5a811ddaf370a766fb | refs/heads/master | 2020-04-04T18:40:07.912265 | 2018-08-09T14:25:16 | 2018-08-09T14:25:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,277 | py | import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--q_granularity', default=21, type=int)
parser.add_argument('-p', '--p_granularity', default=10, type=int)
parser.add_argument('-b', '--batch_size', default=100, type=int)
parser.add_argument('-n', '--n_events', default=10**4, type=int)
parser.add_argument('-d', '--data_path', default='../../data/junipr/longer_training')
parser.add_argument('-i', '--input_path', default='final_reclustered_practice_cut.out')
parser.add_argument('-x', '--times', action='store_true')
args = vars(parser.parse_args())
# Setup parameters
p_granularity = args['p_granularity']
q_granularity = args['q_granularity']
batch_size = args['batch_size']
n_events = args['n_events']
n_batches = n_events//batch_size
# path to where to save data
data_path = args['data_path']
# path to input jets file
input_path = args['input_path']
# boolean indicating if we are in the p times q or p plus q framework
times = args['times']
print('JUNIPR started', flush=True)
import tensorflow as tf
from utilities import load_data
import numpy as np
import time
# # Load Data
# Setup parameters
p_granularity = 10
q_granularity = 2*10 + 1
batch_size = 100
n_events = 10**5
n_batches = n_events//batch_size
n_epochs = 1
# path to saved jets
data_path = '../../data/junipr/final_reclustered_practice.out'
# Load in jets from file
[daughters, endings, mothers, (discrete_p_splittings, discrete_q_splittings), mother_momenta] = load_data(data_path,
n_events=n_events, batch_size=batch_size, split_p_q=True,
p_granularity=p_granularity, q_granularity=q_granularity)
print('data loaded', flush=True)
# temporary hack having to do with mask values; this will change later.
for i in range(len(mothers)):
mothers[i][0][mothers[i][0]==-1] = 0
discrete_p_splittings[i][0][discrete_p_splittings[i][0]==p_granularity**4] = 0
discrete_q_splittings[i][0][discrete_q_splittings[i][0]==q_granularity] = 0
batch_number = 0
print(daughters[batch_number].shape, flush=True)
print(endings[batch_number][0].shape, flush=True)
print(mothers[batch_number][0].shape, flush=True)
print(discrete_p_splittings[batch_number][0].shape, flush=True)
print(discrete_q_splittings[batch_number][0].shape, flush=True)
print(mother_momenta[batch_number].shape, flush=True)
# # Simple RNN
# ## Build Model
def activation_average(x):
total = tf.keras.backend.clip(tf.keras.backend.sum(x, axis=-1, keepdims=True), tf.keras.backend.epsilon(), 1)
return x/total
def categorical_crossentropy2(target, output):
sums = tf.keras.backend.sum(output, axis=-1, keepdims=True)
sums = tf.keras.backend.clip(sums, tf.keras.backend.epsilon(), 1)
output = output/sums
output = tf.keras.backend.clip(output, tf.keras.backend.epsilon(), 1)
length = tf.keras.backend.sum(tf.keras.backend.ones_like(target[0,:,0]))
return -tf.keras.backend.sum(target*tf.keras.backend.log(output), axis=-1)*length/tf.keras.backend.sum(target)
# Define input to RNN cell
input_daughters = tf.keras.Input((None,8), name='Input_Daughters')
# Masking input daughters
masked_input = tf.keras.layers.Masking(mask_value=-1, name='Masked_Input_Daughters')(input_daughters)
# Define RNN cell
rnn_cell = tf.keras.layers.SimpleRNN(100, name='RNN', activation='tanh', return_sequences=True)(masked_input)
# End shower
end_hidden = tf.keras.layers.Dense(100, name='End_Hidden_Layer', activation='relu')(rnn_cell)
end_output = tf.keras.layers.Dense(1, name='End_Output_Layer', activation='sigmoid')(end_hidden)
## Choose Mother
mother_hidden = tf.keras.layers.Dense(100, name='Mother_Hidden_Layer', activation='relu')(rnn_cell)
mother_output = tf.keras.layers.Dense(100, name='Mother_Output_Layer', activation='softmax')(mother_hidden)
mother_weights = tf.keras.Input((None, 100), name='mother_weights')
mother_weighted_output = tf.keras.layers.multiply([mother_weights, mother_output])
normalization = tf.keras.layers.Activation(activation_average)(mother_weighted_output)
## Branching Function
input_mother_momenta = tf.keras.Input((None, 4), name='Input_Mother_Momenta')
# Masking Mother Momenta for branching function
masked_mother_momenta = tf.keras.layers.Masking(mask_value=-1, name='Masked_Mother_Momenta')(input_mother_momenta)
# Merge rnn & mother momenta inputs to branching function
branch_input = tf.keras.layers.concatenate([rnn_cell, masked_mother_momenta], axis=-1)
branch_hidden = tf.keras.layers.Dense(100, name='Branch_Hidden_Layer', activation='relu')(branch_input)
branch_output = tf.keras.layers.Dense(p_granularity**4, name='Branch_Output_Layer', activation='softmax')(branch_hidden)
# Use merged rnn & mother momenta concatenation for charge function
charge_hidden = tf.keras.layers.Dense(100, name='Charge_Hidden_Layer', activation='relu')(branch_input)
charge_output = tf.keras.layers.Dense(q_granularity, name='Charge_Output_Layer', activation='softmax')(charge_hidden)
model = tf.keras.models.Model(
inputs=[input_daughters, input_mother_momenta, mother_weights],
outputs=[end_output, normalization, branch_output, charge_output])
print(model.summary(), flush=True)
# ## Train Model
for lr in [1e-2, 1e-3, 1e-4]:
print('Using learning rate ', lr, flush=True)
model.compile(optimizer=tf.keras.optimizers.SGD(lr=lr),
loss=['binary_crossentropy', categorical_crossentropy2,
'sparse_categorical_crossentropy', 'sparse_categorical_crossentropy'])
for epoch in range(n_epochs):
print("Epoch: ", epoch, flush=True)
l = 0
for n in range(len(daughters)):
start_time = time.time()
batch_loss = model.train_on_batch(x=[daughters[n], mother_momenta[n], mothers[n][1]],
y=[endings[n][0], mothers[n][0], np.ma.masked_array(discrete_p_splittings[n][0], mask =
discrete_p_splittings[n][1]), discrete_q_splittings[n][0]])[0]
print("Batch {}: {}, took {} seconds".format(n, batch_loss, time.time() - start_time), flush=True)
l += batch_loss
if n%100==0 and n>0:
model.save(os.path.join(data_path, 'JUNIPR_plus_q_LR{}_E{}_B{}'.format(lr, epoch, n)))
if n%1000==0:
print("Batch: ", n, l/1000, flush=True)
l=0
model.save(os.path.join(data_path, 'JUNIPR_plus_q_LR{}_E{}'.format(lr, epoch)))
| [
"thomas.culp@cfa.harvard.edu"
] | thomas.culp@cfa.harvard.edu |
5ec2a61f6d862c6819b409b2198a8d635cfa8fd0 | a1f78e0834579ab380171c5102066031e763d2f4 | /development_scripts/OLE_try_1.py | d25cb5e5ec0b283745ec2ec0a43e028e4400c6b6 | [] | no_license | wachtlerlab/colortilt-model | cadfb42f5e23b6de286d4e7bee3e56903148934d | a43dc931d274b59313df57ac3a3bda28c03c02fc | refs/heads/master | 2021-07-09T17:06:59.452770 | 2020-10-21T13:50:27 | 2020-10-21T13:50:27 | 204,482,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,763 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 21 23:33:54 2020
@author: Ibrahim Alperen Tunc
"""
import cmath as c
import numpy as np
import matplotlib.pyplot as plt
import colclass as col
import sys
sys.path.insert(0,col.pathes.runpath)#!Change the directory accordingly
from supplementary_functions import std2kappa, kappa2std, depth_modulator, plotter, param_dict
#OLE TRY_1
#See Salinas&Abbot 1994
#Import a random model
model = col.colmod(None,2,None,stdInt=[2,1],bwType="gradient/sum",phase=0,avgSur=180,startAvg=1,endAvg=360,depInt=[0.2,0.6],depmod=True,stdtransform=False)
kapMod=(2-1)/2*np.cos(2*np.deg2rad(np.linspace(1,360,360)-0))+1+(2-1)/2#Kappa Modulator, see also depth_modulator() in supplementary_functions.py
def OLE(unitactivity,x):#SOMETHING NOT RIGHT, DEBUG SOME TIME LATER
#L_j=integral(stimulusVector*responseToStimulus)
#unit activity array (n_units x n_stimang)
unitactivity = np.squeeze(unitactivity) #so that the matrix notation can be used,
#preallocate covariance matrix Q (n_units x n_units)
Q = np.zeros([len(unitactivity),len(unitactivity)])
#stimulus indexes for 0 degrees and 359.9 degrees
idxst , idxend = (list(x).index(0),list(x).index(360))
#print(idxend)
#convert stimuli to vectors in complex plane (z=r*exp(i*stimang))
stims = np.e**(1j * np.deg2rad(x[idxst:idxend]))
#Compute the center of mass vector L
L = np.sum(stims * unitactivity[:,idxst:idxend],1)
for i in range(len(unitactivity)):
for j in range(i,len(unitactivity)):
#print(i,j)
Q[i,j] = np.sum(unitactivity[i,idxst:idxend]*unitactivity[j,idxst:idxend])
Q[j,i] = Q[i,j]
D = np.sum(np.linalg.inv(Q)*L,1)
#preallocate v.est (n_stims x 1)
v_est = np.zeros(idxend-idxst)
for i in range(idxst,idxend):
#print(i)
v_est[i-idxst] = np.rad2deg(c.phase(np.sum(unitactivity[:,i]*D)))
if v_est[i-idxst]<-1:
v_est[i-idxst]+=360
return v_est
unact = np.squeeze(model.centery) #all unit activities added here
#Preallocate L
L = np.ones(len(model.unitTracker))*1j
Langle = np.zeros(len(model.unitTracker))
#stimulus indexes for 0 degrees and 359.9 degrees
idxst , idxend = (list(model.x).index(0),list(model.x).index(360))
x = model.x[idxst:idxend]
stimuli = np.e**(1j*np.deg2rad(x))
angles=np.zeros(len(stimuli))
for i in range(angles.shape[0]):#works if angle is necessary, vector is now complex
angles[i] = np.rad2deg(c.polar(stimuli[i])[1])
if angles[i]<0:
angles[i]+=360
for i in range(L.shape[0]):
L[i] = np.sum(stimuli*unact[i,idxst:idxend])
for i in range(L.shape[0]):
Langle[i] = np.rad2deg(c.phase(L[i]))
if Langle[i]<0:
Langle[i]+=360
#preallocate covariance matrix Q (n_units x n_units)
Q = np.zeros([len(unact),len(unact)])
for i in range(len(unact)):
for j in range(len(unact)):
if i!=j:
var = 0
else:
var = np.deg2rad(kappa2std(kapMod[i])**2)
#print(i,j)
Q[i,j] = var+np.sum(unact[i,idxst:idxend]*unact[j,idxst:idxend])
#Q=Q/max(Q[0])
D = np.linalg.inv(Q)@L
#ri (popact) is the unit activity for the given stimulus!!!
popact = np.squeeze(col.decoder.nosurround(270,model.x,model.centery).noSur)
vest = np.sum(popact*D)
vests = np.zeros(360)
for i in range(360):
popact = np.squeeze(col.decoder.nosurround(i,model.x,model.centery).noSur)
vest = np.sum(popact*D)
if np.rad2deg(c.phase(vest))<0:
angle = np.rad2deg(c.phase(vest))+360
else:
angle = np.rad2deg(c.phase(vest))
vests[i] = angle
plt.plot(np.arange(360),vests-np.arange(360)) #either i did something wrong or the OLE does not work properly here!
| [
"tuncibrahimalperen@gmail.com"
] | tuncibrahimalperen@gmail.com |
0d5a174e5cec1db7ab67c95cc3571c0b464b5a33 | 0f61ebb523590099ad5dd15a0d33180117ff7437 | /record/callbacks/DatasetCB.py | 4d0b7ace9c42e2b24f2ce10938f8d681129d8a5e | [] | no_license | RECHE23/Constellation | 584af04961acca3146dceece283937a878ee7093 | e61d34314b64c8047f624f47446351deb755e8a3 | refs/heads/master | 2022-12-23T14:33:46.304614 | 2020-10-06T12:34:01 | 2020-10-06T12:34:01 | 290,214,600 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # -*- coding: utf-8 -*-
from poutyne.framework import Callback
from poutyne.utils import torch_to_numpy
class DatasetCB(Callback):
"""
This callback saves the information about the dataset.
"""
def __init__(self, base_callback, dataset, batch_size):
"""
Constructor of the class.
:param base_callback: The base callback.
:param dataset: The dataset being used.
:param batch_size: The size of a batch.
"""
super().__init__()
self.base_callback = base_callback
self.dataset = dataset
self.data = dataset.data
self.targets = torch_to_numpy(dataset.targets)
self.record = self.base_callback.record
self.experiment = self.base_callback.experiment
self.experiment['data'] = torch_to_numpy(dataset.data)
self.experiment['targets'] = torch_to_numpy(dataset.targets)
self.experiment['feature_names'] = dataset.feature_names
self.experiment['target_names'] = dataset.feature_names
self.experiment['dataset_name'] = dataset.name
self.experiment['batch_size'] = batch_size
| [
"rene.chenard.1@ulaval.ca"
] | rene.chenard.1@ulaval.ca |
13a68583ca4ac6c7679b587c5ab1fe6e270b7a99 | 27d88f214158513ca83b29f71ba8f0670076ce4f | /EasyWay/settings.py | 0e259c615726ddd0d7da951fa94a34228c764ce4 | [] | no_license | ArclightMat/EasyWay | 899ee8210e06eb0f28639166b60a7695da3d3f62 | 558fd31c6736ffdf9002087d329d8d5df3f071ba | refs/heads/main | 2023-01-04T19:59:00.128928 | 2020-11-01T01:57:55 | 2020-11-01T01:57:55 | 307,208,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | """
Django settings for EasyWay project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'crispy_forms',
'geo.apps.GeoConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'EasyWay.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
WSGI_APPLICATION = 'EasyWay.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'geo.User'
LOGIN_REDIRECT_URL = '/'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"mguedes@arclight.dev"
] | mguedes@arclight.dev |
f8187c532bd10bee4122b11b690cffea987f1b9c | 712c0f30bcb03809f752cba615d31d10e3a5f185 | /dataviz/eurovision_votes.py | 4b085a3c420c5599d736759e906d13b880973aab | [
"MIT"
] | permissive | dasbush/pudzu | dbe6ef0c3ca986918111eafff49fedcedc78bb47 | 72ab2571baf0128f6a77c87f058d4a003c632533 | refs/heads/master | 2020-03-27T11:22:58.655283 | 2018-08-24T13:43:15 | 2018-08-24T13:43:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | import sys
sys.path.append('..')
from charts import *
from bamboo import *
df = pd.read_csv("datasets/eurovision_votes.csv").set_index("country")
countries = pd.read_csv("datasets/countries.csv").split_columns(('nationality', 'tld', 'country'), "|").split_rows('country').set_index('country')
RANGES = [12, 10, 8, 6, 4, 2]
LABELS = ["twelve points (douze points)", ">ten points (dix points)", ">eight points (huit points)", ">six points (six points)", ">four points (quatre points)", ">two points (deux points)"]
PALETTE_TO = treversed(['#eff3ff','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#084594'])
PALETTE_FROM = PALETTE_TO
FONT = calibri
def colorfn(c, to=True):
if c in ['Sea', 'Borders']: return 'white'
elif c not in df.index: return "grey"
return first(PALETTE_TO[i] if to else PALETTE_FROM[i] for i in range(len(RANGES)) if (df.to_score[c] if to else df.from_score[c]) >= RANGES[i])
def labelfn(c, w, h, *args, to=True):
if c not in df.index: return None
flag = Image.from_url_with_cache(countries.flag[df.to[c] if to else df['from'][c]]).to_rgba().resize((24,18))
country = Image.from_text(df.to[c] if to else df['from'][c], FONT(16), "black")
score = Image.from_text("{0:.1f}".format(df.to_score[c] if to else df.from_score[c]), FONT(16), "black")
return first(i for i in [Image.from_row([flag, Rectangle(5,0), score]), flag] if i.width <= w)
map_from = map_chart("maps/Europe2.png", partial(colorfn, to=False), partial(labelfn, to=False))
legend_from = generate_legend(PALETTE_FROM[:len(RANGES)], LABELS, header="HIGHEST AVERAGE POINTS RECEIVED", box_sizes=40, fonts=partial(FONT, 16))
chart_from = map_from.place(legend_from, align=(1,0), padding=10)
map_to = map_chart("maps/Europe2.png", partial(colorfn, to=True), partial(labelfn, to=True))
legend_to = generate_legend(PALETTE_FROM[:len(RANGES)], LABELS, header="HIGHEST AVERAGE POINTS GIVEN", box_sizes=40, fonts=partial(FONT, 16))
chart_to = map_to.place(legend_to, align=(1,0), padding=10)
chart = Image.from_row([chart_to, chart_from])
title = Image.from_column([
Image.from_text("Eurovision friendships: highest average points given (←) and received (→) per country".upper(), FONT(60, bold=True)),
Image.from_text("excludes ex-countries (Yugoslavia, S&M) and countries with <5 appearances (Morroco, Australia)", FONT(48, italics=True))],
bg="white", padding=4)
img = Image.from_column([title, chart], bg="white", padding=10)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/eurovision_votes.png")
| [
"uri.granta@hpe.com"
] | uri.granta@hpe.com |
a70c48187a78cb7b7949f22881f5719e418805ed | 9888b161ced0b5a1d33502dc9d26a4d861cf7b16 | /Sem3/Python/assignment4/1_max_three.py | 6cb9896d0e9e550c4c8063ecc649c716e20cc7da | [
"MIT"
] | permissive | nsudhanva/mca-code | bef8c3b1b4804010b4bd282896e07e46f498b6f8 | 812348ce53edbe0f42f85a9c362bfc8aad64e1e7 | refs/heads/master | 2020-03-27T13:34:07.562016 | 2019-11-25T04:23:25 | 2019-11-25T04:23:25 | 146,616,729 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | numbers = input('Enter a list of numbers (csv): ').replace(' ','').split(',')
numbers = [int(i) for i in numbers]
def max_three(numbers):
maxx = -1
for i in numbers:
if i > maxx:
maxx = i
return maxx
print('Max:', max_three(numbers)) | [
"nsudhanva@gmail.com"
] | nsudhanva@gmail.com |
65d52cdedac7d0a6460e1e1980f18c2c594b6c1b | aa1972e6978d5f983c48578bdf3b51e311cb4396 | /nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/stat/cluster/clusternode_stats.py | abe2d6e3e45fa778aaa226fb9adbf4931f703567 | [
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | MayankTahil/nitro-ide | 3d7ddfd13ff6510d6709bdeaef37c187b9f22f38 | 50054929214a35a7bb19ed10c4905fffa37c3451 | refs/heads/master | 2020-12-03T02:27:03.672953 | 2017-07-05T18:09:09 | 2017-07-05T18:09:09 | 95,933,896 | 2 | 5 | null | 2017-07-05T16:51:29 | 2017-07-01T01:03:20 | HTML | UTF-8 | Python | false | false | 7,729 | py | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class clusternode_stats(base_resource) :
r""" Statistics for cluster node resource.
"""
def __init__(self) :
self._nodeid = None
self._clearstats = None
self._clsyncstate = ""
self._clnodeeffectivehealth = ""
self._clnodeip = ""
self._clmasterstate = ""
self._cltothbtx = 0
self._cltothbrx = 0
self._nnmcurconn = 0
self._nnmtotconntx = 0
self._nnmtotconnrx = 0
self._clptpstate = ""
self._clptptx = 0
self._clptprx = 0
self._nnmerrmsend = 0
@property
def nodeid(self) :
r"""ID of the cluster node for which to display statistics. If an ID is not provided, statistics are shown for all nodes.<br/>Minimum value = 0<br/>Maximum value = 31.
"""
try :
return self._nodeid
except Exception as e:
raise e
@nodeid.setter
def nodeid(self, nodeid) :
r"""ID of the cluster node for which to display statistics. If an ID is not provided, statistics are shown for all nodes.
"""
try :
self._nodeid = nodeid
except Exception as e:
raise e
@property
def clearstats(self) :
r"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
r"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def clnodeip(self) :
r"""NSIP address of the cluster node.
"""
try :
return self._clnodeip
except Exception as e:
raise e
@property
def clsyncstate(self) :
r"""Sync state of the cluster node.
"""
try :
return self._clsyncstate
except Exception as e:
raise e
@property
def nnmcurconn(self) :
r"""Number of connections open for node-to-node communication.
"""
try :
return self._nnmcurconn
except Exception as e:
raise e
@property
def nnmerrmsend(self) :
r"""Number of errors in sending node-to-node multicast/broadcast messages. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._nnmerrmsend
except Exception as e:
raise e
@property
def clnodeeffectivehealth(self) :
r"""Health of the cluster node.
"""
try :
return self._clnodeeffectivehealth
except Exception as e:
raise e
@property
def nnmtotconnrx(self) :
r"""Number of node-to-node messages received. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._nnmtotconnrx
except Exception as e:
raise e
@property
def cltothbrx(self) :
r"""Number of heartbeats received. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._cltothbrx
except Exception as e:
raise e
@property
def clptprx(self) :
r"""Number of PTP packets received on the node. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._clptprx
except Exception as e:
raise e
@property
def nnmtotconntx(self) :
r"""Number of node-to-node messages sent. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._nnmtotconntx
except Exception as e:
raise e
@property
def clmasterstate(self) :
r"""Operational state of the cluster node.
"""
try :
return self._clmasterstate
except Exception as e:
raise e
@property
def clptptx(self) :
r"""Number of PTP packets transmitted by the node. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._clptptx
except Exception as e:
raise e
@property
def clptpstate(self) :
r"""PTP state of the node. This state is Master for one node and Slave for the rest. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows UNKNOWN. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._clptpstate
except Exception as e:
raise e
@property
def cltothbtx(self) :
r"""Number of heartbeats sent. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._cltothbtx
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(clusternode_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.clusternode
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.nodeid is not None :
return str(self.nodeid)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch the statistics of all clusternode_stats resources that are configured on netscaler.
set statbindings=True in options to retrieve bindings.
"""
try :
obj = clusternode_stats()
if not name :
response = obj.stat_resources(service, option_)
else :
obj.nodeid = name
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class clusternode_response(base_response) :
def __init__(self, length=1) :
self.clusternode = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.clusternode = [clusternode_stats() for _ in range(length)]
| [
"Mayank@Mandelbrot.local"
] | Mayank@Mandelbrot.local |
fdecc8791d1fe84d3a7a0b13de466a0681441769 | 97af32be868ceba728202fe122852e887841e20c | /posts/viewsets.py | 70d9194711b064d33b77fc0e298efcb2fb906e53 | [] | no_license | Paguru/paguru_challenge_api | 40e5b52300a260d463a19685addb50786f3acbe6 | 8f08a5f0e9f957402a7722dd9aa3846cdd018725 | refs/heads/main | 2023-01-13T19:15:11.720923 | 2020-11-23T14:25:30 | 2020-11-23T14:25:30 | 313,945,901 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | from rest_framework import viewsets, permissions
from .serializers import PostSerializer
from .models import Post
class IsAuthor(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.user:
if request.user.is_superuser:
return True
else:
return obj.author == request.user
else:
return False
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def get_permissions(self):
self.permission_classes = [permissions.IsAuthenticated]
if self.action in ['update', 'destroy']:
self.permission_classes = [IsAuthor]
return super(self.__class__, self).get_permissions()
| [
"leonardofreitasdev@gmail.com"
] | leonardofreitasdev@gmail.com |
9e5f53b95d741c2ee30a38a7da958cc90bad660f | 06fb085df79c55de27af2dd4556ad322687987b1 | /source/conf.py | ab894d558ac834c5d98e3adb205663909625ac85 | [] | no_license | My-LearningNotes/CMake | 68cf2ba3e95f415fa5b9feff4f8f9b84a1dee57d | 38822f6083a59969758e37a535024566ade5c03c | refs/heads/master | 2022-07-29T10:15:24.623737 | 2020-05-14T10:03:00 | 2020-05-14T10:03:00 | 263,876,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'CMake笔记'
copyright = '2020, sylar.liu'
author = 'sylar.liu'
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# The master toctree document.
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
#
#templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#
#html_static_path = ['_static']
| [
"sylar_liu65@163.com"
] | sylar_liu65@163.com |
80b2caff1f7f6d0232067a5e7fc87dc00bf20efe | aaa162a8a772b1353bafce81698038a71d1fde23 | /one/gui.py | 3c6396570968532e68e75ae54d9d8713c0932684 | [] | no_license | jenkinss6/Commodity-parity-system | 167dcf21cfd874b9efa7d4e40ae4c58a8d07b20a | d4a57fb157c17f39f9cf2ba625ffd10cc9a59eaf | refs/heads/master | 2022-04-15T20:19:24.381988 | 2018-09-06T11:57:00 | 2018-09-06T11:57:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,334 | py | #########################################################################
#File Name: gui.py
#Author: Breeze
#mail: isduanhc@163.com
#github: https://github.com/breezeDHC
#csdn: https://blog.csdn.net/RBreeze
#Created Time: 2018年04月17日 星期二 09时24分55秒
#########################################################################
#!/usr/bin/env python3
#_*_ encoding:utf-8 _*_
from tkinter import *
from tkinter import messagebox
import crawl
import database
class gui(Frame):
def __init__(self,master=None):
#爬取参数变量
self.webvar1 = IntVar()
self.webvar2 = IntVar()
self.webvar3 = IntVar()
self.webvar4 = IntVar()
self.dp_var = IntVar()
self.goods_var = StringVar()
#数据显示参数变量
self.g_var = StringVar()
self.min_var = IntVar()
self.max_var = IntVar()
self.sort_var = IntVar()
self.tb_text = Text()
self.am_text = Text()
self.jd_text = Text()
Frame.__init__(self,master)
self.pack()
self.createWidgets()
def crawler(self):
web_tb = self.webvar1.get()
web_jd = self.webvar2.get()
web_am = self.webvar3.get()
web_all = self.webvar4.get()
depth = self.dp_var.get()
goods = self.goods_var.get()
cr = crawl.crawl()
if web_all == 1:
cr.crawler_all(depth,goods)
messagebox.showinfo(title='result',message='全部爬取完成')
else:
if web_jd == 1:
cr.crawler_jd(depth,goods)
messagebox.showinfo(title='result',message='jd爬取完成')
if web_tb == 1:
cr.crawler_tb(depth,goods)
messagebox.showinfo(title='result',message='tb爬取完成')
if web_am == 1:
cr.crawler_am(depth,goods)
messagebox.showinfo(title='result',message='TMALL爬取完成')
#排序和范围选择
def show_sub1(self,lst,sort,min_v,max_v):
if sort == 1:
lst = sorted(lst, key=lambda x:x[0])
if max_v != 0:
lst_tem = lst
lst = []
for i in range(len(lst_tem)):
if (lst_tem[i][0] >= min_v) and (lst_tem[i][0] <= max_v):
lst.append(lst_tem[i])
return lst
#显示处理完的数据
def show_sub2(self,lst,web):
if len(lst) != 0:
tem_str = '序号\t价格\t\t\t标题\n'
count = 0
if web == 'tb':
self.tb_text.insert('end',tem_str)
if web == 'tm':
self.am_text.insert('end',tem_str)
if web == 'jd':
self.jd_text.insert('end',tem_str)
for i in range(len(lst)):
count = count + 1
if web == 'tb':
self.tb_text.insert('end',str(count)+'\t')
self.tb_text.insert('end',str(lst[i][0])+'\t')
self.tb_text.insert('end',str(lst[i][1][:17])+'\n')
if web == 'tm':
self.am_text.insert('end',str(count)+'\t')
self.am_text.insert('end',str(lst[i][0])+'\t')
self.am_text.insert('end',str(lst[i][1][:17])+'\n')
if web == 'jd':
self.jd_text.insert('end',str(count)+'\t')
self.jd_text.insert('end',str(lst[i][0])+'\t')
self.jd_text.insert('end',str(lst[i][1][:17])+'\n')
def showdata(self):
#变量定义
goods = self.g_var.get()
min_v = self.min_var.get()
max_v = self.max_var.get()
sort = self.sort_var.get()
lst_tb = []
lst_jd = []
lst_tm = []
d = database.database()
#淘宝数据显示
self.tb_text.delete(0.0,'end')
d.readInfo(lst_tb,goods,'tb')
lst_tb = self.show_sub1(lst_tb,sort,min_v,max_v)
self.show_sub2(lst_tb,'tb')
#天猫数据显示
self.am_text.delete(0.0,'end')
d.readInfo(lst_tm,goods,'am')
lst_tm = self.show_sub1(lst_tm,sort,min_v,max_v)
self.show_sub2(lst_tm,'tm')
#京东数据显示
self.jd_text.delete(0.0,'end')
d.readInfo(lst_jd,goods,'jd')
lst_jd = self.show_sub1(lst_jd,sort,min_v,max_v)
self.show_sub2(lst_jd,'jd')
d.closeConn()
def createWidgets(self):
#爬取参数部分框架划分
self.ft = Frame(self)
self.ft.pack(side='top',pady=20,padx=30,fill='both')
self.f1 = Frame(self.ft)
self.f1.pack(side='top',anchor='ne',pady=10,fill='both')
self.f2 = Frame(self.ft,bg='red')
self.f2.pack(side='top',fill='both')
self.f11 = Frame(self.f1)
self.f11.pack(side='left',anchor='ne',fill='both')
self.f12 = Frame(self.f1)
self.f12.pack(side='left',anchor='ne',padx=20,fill='both')
self.f13 = Frame(self.f1)
self.f13.pack(side='left',anchor='ne',fill='both')
self.f14 = Frame(self.f1)
self.f14.pack(side='left',anchor='ne',padx=20,fill='both')
#数据显示部分框架划分
self.fb = Frame(self)
self.fb.pack(side='top',anchor='nw',padx=30)
#第一行
self.fb1 = Frame(self.fb)
self.fb1.pack(side='top',fill='both')
self.fb11 = Frame(self.fb1)
self.fb11.pack(side='left')
self.fb12 = Frame(self.fb1)
self.fb12.pack(side='left',padx=20)
self.fb13 = Frame(self.fb1)
self.fb13.pack(side='left')
self.fb14 = Frame(self.fb1)
self.fb14.pack(side='left',padx=20)
#第二行
self.fb2 = Frame(self.fb)
self.fb2.pack(side='top',fill='both',pady=20)
#第三行
self.fb3 = Frame(self.fb)
self.fb3.pack(side='top',fill='both')
#爬取参数选择
#网站选择
Label(self.f11,text='Website:').pack(side='left',padx=0)
Checkbutton(self.f11,text='Taobao',variable=self.webvar1,onvalue=1,offvalue=0).pack(side='left')
Checkbutton(self.f11,text='JingDong',variable=self.webvar2,onvalue=1,offvalue=0).pack(side='left')
Checkbutton(self.f11,text='TMALL',variable=self.webvar3,onvalue=1,offvalue=0).pack(side='left')
Checkbutton(self.f11,text='ALL',variable=self.webvar4,onvalue=1,offvalue=0).pack(side='left')
#页面数选择
Label(self.f12,text='Page:').pack(side='left',padx=10)
Entry(self.f12,textvariable=self.dp_var,width=5).pack(side='left')
#商品选择
Label(self.f13,text='goods:').pack(side='left',padx=10)
Entry(self.f13,textvariable=self.goods_var,width=10).pack(side='left')
#确认
Button(self.f14,text='submit',command=self.crawler).pack(side='left',padx=20)
#划线
Canvas(self.f2,bg='white',width=1093,height=3).pack(side='top')
#数据显示参数选择部分
#商品选择
Label(self.fb11,text='goods:').pack(side='left')
Entry(self.fb11,textvariable=self.g_var,width=5).pack(side='left')
#价格范围选择
Label(self.fb12,text='Range:').pack(side='left')
Entry(self.fb12,width=5,textvariable=self.min_var).pack(side='left',padx=10)
Entry(self.fb12,width=5,textvariable=self.max_var).pack(side='left')
#排序
Checkbutton(self.fb13,text='Sort',variable=self.sort_var,onvalue=1,offvalue=0).pack(side='left')
#确认
Button(self.fb14,text='confirm',command=self.showdata).pack(side='left')
#网站名
Label(self.fb2,text='Taobao').pack(side='left',padx=150)
Label(self.fb2,text='TMALL').pack(side='left',padx=170)
Label(self.fb2,text='JingDong').pack(side='left',padx=150)
#数据显示框
self.tb_text = Text(self.fb3,width=50,height=30)
self.tb_text.pack(side='left')
self.am_text = Text(self.fb3,width=50,height=30)
self.am_text.pack(side='left',padx=10)
self.jd_text = Text(self.fb3,width=50,height=30)
self.jd_text.pack(side='left')
if __name__ == '__main__':
window = Tk()
window.title('商品比价')
window.geometry('1150x700')
g = gui(master=window)
g.mainloop()
| [
"isduanhc@163.com"
] | isduanhc@163.com |
006975ec0ce5c55e6f135808e0f0aedad4e2f004 | c6584c9529d571458061661e125d7c52b6f968ee | /LeetcodeProblems/Add Two Numbers.py | 30bff0d914cfd0d050f9af2a89598e3e32e2389c | [] | no_license | sankarshan-bhat/DS_Algo_Practice | b6caad4075990491ada8ea8fd1d4c21bea560e6b | 3ed2221d38ed073939decf8dcadacb31c507d5ee | refs/heads/master | 2021-10-01T18:00:14.288330 | 2018-11-28T01:17:42 | 2018-11-28T01:17:42 | 115,923,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
sumNode = dummy=ListNode(0)
c = 0
while ( l1 or l2 or c ==1):
a1,l1 = (l1.val,l1.next) if l1 else (0,None)
a2,l2 = (l2.val,l2.next) if l2 else (0,None)
c,s = divmod(a1+a2+c,10)
print s,c
sumNode.next = ListNode(s)
sumNode = sumNode.next
return dummy.next
| [
"sbhat@Ban-1Sbhat-m-2.local"
] | sbhat@Ban-1Sbhat-m-2.local |
7547d88badccaf38947a4ee584cd09926ea414f7 | ea795fdbf005d43abfffe3e62370650d825913de | /estagios/core/migrations/0014_job_candidatos.py | 5756b208792d8f539eda8f654b92e9c8b6964156 | [] | no_license | rjcjunior/dac-trabalho-final | a795856e81f0da7bda57d3e08e551574e13f26b4 | 74876f37f0cc7547cb7e80b723f0876ee416619e | refs/heads/master | 2020-03-23T20:29:18.209298 | 2018-07-16T14:46:27 | 2018-07-16T14:46:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # Generated by Django 2.0.6 on 2018-07-11 04:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0013_auto_20180710_2158'),
]
operations = [
migrations.AddField(
model_name='job',
name='candidatos',
field=models.ManyToManyField(blank=True, null=True, to='core.Student', verbose_name='Candidatos'),
),
]
| [
"rjcjunior@hotmail.com"
] | rjcjunior@hotmail.com |
d93165b87e5b1cc0ff1516e627ecc9fe72ad0be0 | bb258e36a9a1683398b27296c369d5a68fd1f2f8 | /ADT102/ADTassignmentButwithpython/Stack.py | 19b95ff175e7acf41cb75998b1e99b4f34eaa009 | [
"Unlicense"
] | permissive | PasakonPJ/HomeworkCollection | 507cb838d2c114eef0e675594dbbdd8c8c558cb4 | 12359eab28416b93f7da00c59fcc44b4e9b7723a | refs/heads/main | 2023-06-18T18:51:08.638939 | 2021-07-14T15:31:26 | 2021-07-14T15:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | class Stack:
st = []
top = 0
size = 0
def __init__(self, size):
self.size = size
def push(self, data):
self.st.append(data)
self.top = self.top + 1
def pop(self):
temp = self.st[-1]
del self.st[-1]
self.top = self.top - 1
return temp
def peek(self):
return self.st[-1]
def is_empty(self):
return self.top == 0
def is_full(self):
return self.top >= self.size
def pop_all(self):
while not self.is_empty():
print(self.pop().__str__())
def get_size(self):
return self.size
| [
"oat431@gmail.com"
] | oat431@gmail.com |
b0b7904fdff451daf6abcf12c03a1fb40d296873 | 02b11d521d6b97ecd2adac1c27bd49b996e3d1eb | /decatur/config.py | 2695d2c404507aa0e4623578f7b382d532932adf | [
"MIT"
] | permissive | jadilia/decatur | bb419d8664309018d625622c804366495774b6c3 | 42525f3797aa7b6d60765bd1149209e4fcf50b75 | refs/heads/master | 2020-04-06T04:24:31.916882 | 2017-02-24T20:38:00 | 2017-02-24T20:38:00 | 72,668,902 | 0 | 0 | null | 2016-11-02T18:22:55 | 2016-11-02T18:22:55 | null | UTF-8 | Python | false | false | 821 | py | #!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function, division, absolute_import
import os
# Data directory
data_dir = os.getenv('DECATUR_DATA_DIR')
if data_dir is None:
data_dir = '.'
# Repository data directory
repo_data_dir = catalog_file = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data'))
# Load database parameters from environment variables
host = os.getenv('DB_HOST')
user = os.getenv('DB_USER')
password = os.getenv('DB_PASSWORD')
domain = os.getenv('DB_DOMAIN')
tunnel_host = os.getenv('TUNNEL_HOST')
tunnel_user = os.getenv('TUNNEL_USER')
db_params = {'host': host, 'user': user, 'password': password,
'domain': domain, 'tunnel_host': tunnel_host,
'tunnel_user': tunnel_user}
| [
"luriejcc@gmail.com"
] | luriejcc@gmail.com |
425263f799516e95483dfd2b406768fe3bb8a952 | 49d9757a44b5aca84de8fbb00df67d0529b5cff1 | /swarm_tasks/Examples/basic_tasks/aggregation.py | 89f866b3980642aeb21e2dab1535579a50d35060 | [
"MIT"
] | permissive | akagam1/swarm_tasks | e076782a1494b1a0216e633824e4f54de5d991d1 | 3335297ba8fcdbff756ae519002bcce919d54a84 | refs/heads/main | 2023-04-29T15:24:50.440730 | 2021-05-17T16:58:04 | 2021-05-17T16:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | print("Running foraging example 1\nUsing source files for package imports\nPARAMETERS:")
import sys,os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),'../../..')))
print(sys.path)
import swarm_tasks
#Set demo parameters directly
import numpy as np
import random
swarm_tasks.utils.robot.DEFAULT_NEIGHBOURHOOD_VAL = 6
swarm_tasks.utils.robot.DEFAULT_SIZE= 0.4
swarm_tasks.utils.robot.MAX_SPEED = 1.5
swarm_tasks.utils.robot.MAX_ANGULAR: 0.3
np.random.seed(42)
random.seed(42)
from swarm_tasks.simulation import simulation as sim
from swarm_tasks.simulation import visualizer as viz
#Required modules
import swarm_tasks.controllers.base_control as base_control
from swarm_tasks.modules.aggregation import aggr_centroid
#Initialize Simulation and GUI
s = sim.Simulation(env_name='rectangles')
gui = viz.Gui(s)
gui.show_env()
while(1):
for b in s.swarm:
#Base control
cmd = base_control.base_control(b)
cmd+= base_control.obstacle_avoidance(b)
#Behaviour
cmd+=aggr_centroid(b)
#Execute
cmd.exec(b)
gui.update()
| [
"rishikesh.vanarse@gmail.com"
] | rishikesh.vanarse@gmail.com |
51446c69f0f45868f9d4f60c5e8b92fab988a9c1 | d2faafa0cc3a7845d601d4ed3fb27a3dd90f89db | /libsvm/libsvm-3.20/python/ML_train.py | 6b4c8318edcd87b2ac30421acc95ad54fdac55cc | [
"BSD-3-Clause"
] | permissive | bob831009/NLP | 469b3da6b5d5792022f3a3ece3db9374fc12497d | c26d547ea4f949e5250c6e4a7480cb9dc07ed8dd | refs/heads/master | 2021-05-04T11:08:19.826682 | 2017-02-23T12:06:01 | 2017-02-23T12:06:01 | 54,315,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | #!/usr/bin/env python
from svm import *
from svmutil import *
train_X = [];
train_Y = [];
test_X = [];
test_Y = [];
f = open("./libsvm-3.20/python/train.txt", "r");
for line in f:
line = line.strip().split();
line = line[10:];
if(line[0] == 'A'):
train_Y.append(1);
else:
train_Y.append(-1);
line.pop(0);
line = map(float, line);
train_X.append(line);
problem = svm_problem(train_Y, train_X);
tmp_param = '-t 1 -c 1 -g 1 -r 1 -d 2 -q';
param = svm_parameter(tmp_param);
model = svm_train(problem, param);
svm_save_model('./libsvm-3.20/python/LTTC.model', model); | [
"bob@LiangZhiHongdeMacBook-Pro.local"
] | bob@LiangZhiHongdeMacBook-Pro.local |
1c99962d45256fe6f29cdac2db7415ab822aa439 | 22c494a663c52e2b11eb309a2e51f641439b86e9 | /unb/items.py | ff6794309b02bef91bd1788d85a6c384689e8dde | [] | no_license | christianlmc/unb-crawler | e268376fc23609869e1b2eba4423c60250e88eae | 9d91dd25cf1e1a844a79d1310f0a469ef6e7096b | refs/heads/master | 2022-03-01T12:10:30.396568 | 2019-11-16T21:33:23 | 2019-11-16T21:33:23 | 204,311,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class CursoItem(scrapy.Item):
nome = scrapy.Field()
turno = scrapy.Field()
class UnbItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class DisciplinaItem(scrapy.Item):
# define the fields for your item here like:
nome = scrapy.Field()
turmas = scrapy.Field()
departamento = scrapy.Field()
class TurmaItem(scrapy.Item):
# define the fields for your item here like:
letra = scrapy.Field()
vagas_total = scrapy.Field()
vagas_ocupadas = scrapy.Field()
vagas_disponiveis = scrapy.Field()
| [
"christianlmc1@gmail.com"
] | christianlmc1@gmail.com |
8891c63d771f6a79d7d21a6d4e1aa2b6b160da21 | a9fc606f6d86d87fe67290edc49265986a89b882 | /0x01-challenge/square.py | 968a4ca633933d88ff03bcf97ff32862dd42e645 | [] | no_license | Jilroge7/Fix_My_Code_Challenge | 5a4a1aae18ebc600fdb327bcf8958e67475562f5 | b9309addde32a714a533cbb43e87ba180b19e67a | refs/heads/master | 2022-12-25T22:23:09.815638 | 2020-10-01T18:28:50 | 2020-10-01T18:28:50 | 287,828,672 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | #!/usr/bin/python3
class square():
width = 0
height = 0
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def area_of_my_square(self):
""" Area of the square """
return self.width * self.height
def PermiterOfMySquare(self):
return (self.width * 2) + (self.height * 2)
def __str__(self):
return "{}/{}".format(self.width, self.height)
if __name__ == "__main__":
s = square(width=12, height=9)
print(s)
print(s.area_of_my_square())
print(s.PermiterOfMySquare())
| [
"1672@holbertonschool.com"
] | 1672@holbertonschool.com |
3a8b201be22a348feb2a278b38cd0dc961637695 | c98c2821042687d4f84bc135b3c929a33f1bf607 | /OWL2Vec_Standalone_Multi.py | 9795c2720813ba6ac3288149ffda789dc5c39ba4 | [
"Apache-2.0"
] | permissive | KRR-Oxford/OWL2Vec-Star | e971b1d411a15118f5ddfab8bf39c35455277ab4 | 948dd9ccbd3dc8ee85fb91270d68dbf5f03842c2 | refs/heads/master | 2023-04-16T19:43:46.084986 | 2023-04-09T11:58:57 | 2023-04-09T11:58:57 | 289,316,423 | 71 | 25 | Apache-2.0 | 2021-08-05T21:12:27 | 2020-08-21T16:32:37 | Python | UTF-8 | Python | false | false | 9,830 | py | import os
import time
import argparse
import random
import multiprocessing
import gensim
import configparser
from owl2vec_star.lib.RDF2Vec_Embed import get_rdf2vec_walks
from owl2vec_star.lib.Label import pre_process_words, URI_parse
from owl2vec_star.lib.Onto_Projection import Reasoner, OntologyProjection
parser = argparse.ArgumentParser()
parser.add_argument("--ontology_dir", type=str, default=None, help="The directory of input ontologies for embedding")
parser.add_argument("--embedding_dir", type=str, default=None, help="The output embedding directory")
parser.add_argument("--config_file", type=str, default='default_multi.cfg', help="Configuration file")
parser.add_argument("--URI_Doc", help="Using URI document", action="store_true")
parser.add_argument("--Lit_Doc", help="Using literal document", action="store_true")
parser.add_argument("--Mix_Doc", help="Using mixture document", action="store_true")
FLAGS, unparsed = parser.parse_known_args()
# read and combine configurations
# overwrite the parameters in the configuration file by the command parameters
config = configparser.ConfigParser()
config.read(FLAGS.config_file)
if FLAGS.ontology_dir is not None:
config['BASIC']['ontology_dir'] = FLAGS.ontology_dir
if FLAGS.embedding_dir is not None:
config['BASIC']['embedding_dir'] = FLAGS.embedding_dir
if FLAGS.URI_Doc:
config['DOCUMENT']['URI_Doc'] = 'yes'
if FLAGS.Lit_Doc:
config['DOCUMENT']['Lit_Doc'] = 'yes'
if FLAGS.Mix_Doc:
config['DOCUMENT']['Mix_Doc'] = 'yes'
if 'cache_dir' not in config['DOCUMENT']:
config['DOCUMENT']['cache_dir'] = './cache'
if not os.path.exists(config['DOCUMENT']['cache_dir']):
os.mkdir(config['DOCUMENT']['cache_dir'])
if 'embedding_dir' not in config['BASIC']:
config['BASIC']['embedding_dir'] = os.path.join(config['DOCUMENT']['cache_dir'], 'output')
start_time = time.time()
walk_sentences, axiom_sentences = list(), list()
uri_label, annotations = dict(), list()
for file_name in os.listdir(config['BASIC']['ontology_dir']):
if not file_name.endswith('.owl'):
continue
ONTO_FILE = os.path.join(config['BASIC']['ontology_dir'], file_name)
print('\nProcessing %s' % file_name)
projection = OntologyProjection(ONTO_FILE, reasoner=Reasoner.STRUCTURAL, only_taxonomy=False,
bidirectional_taxonomy=True, include_literals=True, avoid_properties=set(),
additional_preferred_labels_annotations=set(),
additional_synonyms_annotations=set(), memory_reasoner='13351')
# Extract and save seed entities (classes and individuals)
print('... Extract entities (classes and individuals) ...')
projection.extractEntityURIs()
classes = projection.getClassURIs()
individuals = projection.getIndividualURIs()
entities = classes.union(individuals)
with open(os.path.join(config['DOCUMENT']['cache_dir'], 'entities.txt'), 'a') as f:
for e in entities:
f.write('%s\n' % e)
# Extract and save axioms in Manchester Syntax
print('... Extract axioms ...')
projection.createManchesterSyntaxAxioms()
with open(os.path.join(config['DOCUMENT']['cache_dir'], 'axioms.txt'), 'a') as f:
for ax in projection.axioms_manchester:
axiom_sentence = [item for item in ax.split()]
axiom_sentences.append(axiom_sentence)
f.write('%s\n' % ax)
print('... %d axioms ...' % len(axiom_sentences))
# Read annotations including rdfs:label and other literals from the ontology
# Extract annotations: 1) English label of each entity, by rdfs:label or skos:preferredLabel
# 2) None label annotations as sentences of the literal document
print('... Extract annotations ...')
projection.indexAnnotations()
with open(os.path.join(config['DOCUMENT']['cache_dir'], 'annotations.txt'), 'a') as f:
for e in entities:
if e in projection.entityToPreferredLabels and len(projection.entityToPreferredLabels[e]) > 0:
label = list(projection.entityToPreferredLabels[e])[0]
v = pre_process_words(words=label.split())
uri_label[e] = v
f.write('%s preferred_label %s\n' % (e, v))
for e in entities:
if e in projection.entityToAllLexicalLabels:
for v in projection.entityToAllLexicalLabels[e]:
if (v is not None) and \
(not (e in projection.entityToPreferredLabels and v in projection.entityToPreferredLabels[e])):
annotation = [e] + v.split()
annotations.append(annotation)
f.write('%s\n' % ' '.join(annotation))
# project ontology to RDF graph (optionally) and extract walks
if 'ontology_projection' in config['DOCUMENT'] and config['DOCUMENT']['ontology_projection'] == 'yes':
print('... Calculate the ontology projection ...')
projection.extractProjection()
onto_projection_file = os.path.join(config['DOCUMENT']['cache_dir'], 'projection.ttl')
projection.saveProjectionGraph(onto_projection_file)
ONTO_FILE = onto_projection_file
print('... Generate walks ...')
walks_ = get_rdf2vec_walks(onto_file=ONTO_FILE, walker_type=config['DOCUMENT']['walker'],
walk_depth=int(config['DOCUMENT']['walk_depth']), classes=entities)
print('... %d walks for %d seed entities ...' % (len(walks_), len(entities)))
walk_sentences += [list(map(str, x)) for x in walks_]
# collect URI documents
# two parts: axiom sentences + walk sentences
URI_Doc = list()
if 'URI_Doc' in config['DOCUMENT'] and config['DOCUMENT']['URI_Doc'] == 'yes':
print('Extracted %d axiom sentences' % len(axiom_sentences))
URI_Doc = walk_sentences + axiom_sentences
# Some entities have English labels
# Keep the name of built-in properties (those starting with http://www.w3.org)
# Some entities have no labels, then use the words in their URI name
def label_item(item):
if item in uri_label:
return uri_label[item]
elif item.startswith('http://www.w3.org'):
return [item.split('#')[1].lower()]
elif item.startswith('http://'):
return URI_parse(uri=item)
else:
# return [item.lower()]
return ''
# read literal document
# two parts: literals in the annotations (subject's label + literal words)
# replacing walk/axiom sentences by words in their labels
Lit_Doc = list()
if 'Lit_Doc' in config['DOCUMENT'] and config['DOCUMENT']['Lit_Doc'] == 'yes':
print('\n\nGenerate literal document')
for annotation in annotations:
processed_words = pre_process_words(annotation[1:])
if len(processed_words) > 0:
Lit_Doc.append(label_item(item=annotation[0]) + processed_words)
print('... Extracted %d annotation sentences ...' % len(Lit_Doc))
for sentence in walk_sentences + axiom_sentences:
lit_sentence = list()
for item in sentence:
lit_sentence += label_item(item=item)
Lit_Doc.append(lit_sentence)
# for each axiom/walk sentence, generate mixture sentence(s) by two strategies:
# all): for each entity, keep its entity URI, replace the others by label words
# random): randomly select one entity, keep its entity URI, replace the others by label words
Mix_Doc = list()
if 'Mix_Doc' in config['DOCUMENT'] and config['DOCUMENT']['Mix_Doc'] == 'yes':
print('\n\nGenerate mixture document')
for sentence in walk_sentences + axiom_sentences:
if config['DOCUMENT']['Mix_Type'] == 'all':
for index in range(len(sentence)):
mix_sentence = list()
for i, item in enumerate(sentence):
mix_sentence += [item] if i == index else label_item(item=item)
Mix_Doc.append(mix_sentence)
elif config['DOCUMENT']['Mix_Type'] == 'random':
random_index = random.randint(0, len(sentence) - 1)
mix_sentence = list()
for i, item in enumerate(sentence):
mix_sentence += [item] if i == random_index else label_item(item=item)
Mix_Doc.append(mix_sentence)
print('\n\nURI_Doc: %d, Lit_Doc: %d, Mix_Doc: %d' % (len(URI_Doc), len(Lit_Doc), len(Mix_Doc)))
all_doc = URI_Doc + Lit_Doc + Mix_Doc
print('Time for document construction: %s seconds' % (time.time() - start_time))
random.shuffle(all_doc)
# learn the embedding model (train a new model or fine tune the pre-trained model)
start_time = time.time()
if 'pre_train_model' not in config['MODEL'] or not os.path.exists(config['MODEL']['pre_train_model']):
print('\n\nTrain the embedding model')
model_ = gensim.models.Word2Vec(all_doc, vector_size=int(config['MODEL']['embed_size']),
window=int(config['MODEL']['window']),
workers=multiprocessing.cpu_count(),
sg=1, epochs=int(config['MODEL']['iteration']),
negative=int(config['MODEL']['negative']),
min_count=int(config['MODEL']['min_count']), seed=int(config['MODEL']['seed']))
else:
print('\n\nFine-tune the pre-trained embedding model')
model_ = gensim.models.Word2Vec.load(config['MODEL']['pre_train_model'])
if len(all_doc) > 0:
model_.min_count = int(config['MODEL']['min_count'])
model_.build_vocab(all_doc, update=True)
model_.train(all_doc, total_examples=model_.corpus_count, epochs=int(config['MODEL']['epoch']))
model_.save(config['BASIC']['embedding_dir'])
print('Time for learning the embedding model: %s seconds' % (time.time() - start_time))
print('Model saved. Done!')
| [
"chen00217@gmail.com"
] | chen00217@gmail.com |
725b00dcf831b8d8915cb513f457df96a8e21cd4 | be3681cb13eaf32e1f6f0ad2239875e1a2e0c260 | /hw1/hw1_v2.py | d2ab3971fd6b01879a2f9d570d82ea15810d2419 | [] | no_license | Wilson1211/DataScience | 531cd19169205aeff02d4a4554e2019c3763d11b | f9467f2a04b6e8615d7fd1226a5c8b06d901afc9 | refs/heads/master | 2021-08-23T04:34:50.954976 | 2017-12-03T09:14:59 | 2017-12-03T09:14:59 | 111,182,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,467 | py | #/usr/bin/python
import urllib
#import csv
import ssl
import sys
import matplotlib.pyplot as plt
#from pyplot import *
ssl._create_default_https_context = ssl._create_unverified_context
link = "https://ceiba.ntu.edu.tw/course/481ea4/hw1_data.csv"
#file=urllib.urlopen(link)
urllib.request.urlretrieve(link,"hw1_data")
#myfile=file.read()
#print myfile
#reader = csv.reader(file("hw1_data.csv"))
#file=open("hw1_data","r")
i=0;
data=[]
data1=[]
with open("hw1_data","r") as file:
while True:
datar=file.readline()
if not datar: break
#ss=datar
if datar == "\n":
continue
datar.rsplit()
datar=datar[:len(datar)-1]
datar.rsplit(' ')
print (datar)
#datar.rsplit()
#ss.rsplit(" ")
print (len(datar))
data.append(datar.split(","))
#data.append(data1)
#data1=[]
"""
data1.append(datar.split(","))
#print data1
ss=data1[i][3]
data1[i].pop()
ss.rsplit("\n")
data1[i].extend(ss)
i=i+1
data.append(data1)
data1=[]
"""
#print data
file.close()
for x in data:
print (x)
print (len(data))
"""
print "\n=========================\n"
string="abc,defghijklmnopqrstuvwxyz"
string2="aaaaaaaaaaaa,aaaaaa"
ss=[]
ss.append(string.split(","))
ss.append(string.split(","))
print ss
"""
#ParseData=[]
#for line in reader:
# print line
#print(file.read())
#for line in file:
# x=line.split(',')[3]
# print(x)
#s=file.split("\n")
#for line in s:
# print(line)
male_smoke_population = []
female_smoke_population = []
t=[]
total_population = []
tmp=0
for i in range(2,7):
try:
man_p=float(data[i][1])
except ValueError:
pass
else:
#deal with total population ratio
man_p = float(data[i][1])
woman_p = float(data[i][3])
man_smo_r = float(data[i][2])
woman_smo_r = float(data[i][4])
man_smo_p = man_p*man_smo_r/100
print (man_smo_p)
woman_smo_p = woman_p*woman_smo_r/100
print (woman_smo_p)
total_p = man_p+woman_p
print (total_p)
print (man_smo_p+woman_smo_p)
total_smo_r = (man_smo_p + woman_smo_p)/total_p*100
#print total_smo_r
#only store ratio
total_population.append(total_smo_r)
#tmp=float(data[i][2])*value/100
male_smoke_population.append(str(man_smo_r))
#value=float(data[i][3])
#tmp=float(data[i][4])*value/100
female_smoke_population.append(str(woman_smo_r))
#print data[i][0]
t.append(str(data[i][0]))
print (total_population)
print (t)
print (male_smoke_population)
list1 = [float(x) for x in male_smoke_population]
#print list1
#mm=[x for x in range(len(t))]
list2 = [float(x) for x in female_smoke_population]
list3 = [float(x) for x in total_population]
"""
plt.bar(range(len(list1)),list1)
plt.bar(range(len(list2)),list2)
plt.bar(range(len(list3)),list3)
plt.xticks(range(len(list1)),t,size='small')
"""
#plt.plot(t,list1)
width=0.25 #the width of the bars
ind=range(len(list1)) #the x locations of the group
ind_left = ind
ind_middle = [x+width for x in ind]
ind_right = [x+width*2 for x in ind]
#ind_add_width=[x-width for x in ind]
#ind_add_width2=[x+width/2 for x in ind]
fig, ax=plt.subplots()
rects1 = ax.bar(ind_left ,list1,width,color='b')
rects2 = ax.bar(ind_middle ,list2,width,color='r')
rects3 = ax.bar(ind_right, list3,width,color='y')
ax.set_xticks(ind_middle)
#fig.canvas.draw()
#labels=[item.get_text() for item in ax.get_xticklabels()]
#labels[1]="testing"
#plt.plot(mm,list1)
#ax.set_xticks(list1)
ax.set_xticklabels(t,minor=False)
"""
plt.plot(mm,list1,'r')
plt.xticks(mm,t,rotation="vertical")
"""
plt.legend()
plt.show()
| [
"raito244@gmail.com"
] | raito244@gmail.com |
25cc6bb5a430478db159df2bb42d75cbe1419b5b | 868756e273db24b49216876d6984cbbaa5d64443 | /OpenGLControl.py | 19f3f473cc39cc47a83a7bb83b2e2370011b215e | [] | no_license | Thewilf/RobotDesign | a31b3705d677ac0eb283c734b8ef6866a1e24c80 | 40e29b1be238db93a1ce11c7f262b0b1965cc9fd | refs/heads/master | 2020-03-17T17:34:34.678405 | 2018-05-17T09:57:13 | 2018-05-17T09:57:13 | 133,793,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,608 | py | from PyQt5 import QtCore, QtGui
from PyQt5 import QtOpenGL
from OpenGL import GLU
from OpenGL.GL import *
from numpy import array, arange
from STLFile import *
# from ConfigRobot import *
from GlobalFunc import *
class GLWidget(QtOpenGL.QGLWidget):
xRotationChanged = QtCore.pyqtSignal(int)
yRotationChanged = QtCore.pyqtSignal(int)
zRotationChanged = QtCore.pyqtSignal(int)
def __init__(self, parent=None, objRobot=None):
super(GLWidget, self).__init__(parent)
self.objRobot = objRobot
self.xRot = -2584
self.yRot = -512
self.zRot = 0.0
self.z_zoom = -3500
self.xTran = 0
self.yTran = 0
self.model0 = loader('STLFile/Link0.STL')
self.model1 = loader('STLFile/Link1.STL')
self.model2 = loader('STLFile/Link2.STL')
self.model3 = loader('STLFile/Link3.STL')
self.model4 = loader('STLFile/Link4.STL')
def setXRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.xRotationChanged.emit(angle)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.updateGL()
def setYRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.yRotationChanged.emit(angle)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# self.updateGL()
def setZRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.zRotationChanged.emit(angle)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.updateGL()
def setXYTranslate(self, dx, dy):
self.xTran += 3.0 * dx
self.yTran -= 3.0 * dy
self.updateGL()
def setZoom(self, zoom):
self.z_zoom = zoom
self.updateGL()
def updateJoint(self):
self.updateGL()
def initializeGL(self):
lightPos = (5.0, 5.0, 10.0, 1.0)
reflectance1 = (0.8, 0.1, 0.0, 1.0)
reflectance2 = (0.0, 0.8, 0.2, 1.0)
reflectance3 = (0.2, 0.2, 1.0, 1.0)
ambientLight = [0.7, 0.7, 0.7, 1.0]
diffuseLight = [0.7, 0.8, 0.8, 1.0]
specularLight = [0.4, 0.4, 0.4, 1.0]
positionLight = [20.0, 20.0, 20.0, 0.0]
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight)
glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight)
glLightModelf(GL_LIGHT_MODEL_TWO_SIDE, 1.0)
glLightfv(GL_LIGHT0, GL_POSITION, positionLight)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glEnable(GL_NORMALIZE)
glClearColor(0.0, 0.0, 0.0, 1.0)
def drawGL(self):
self.drawGrid()
self.setupColor([96.0 / 255, 96 / 255.0, 192.0 / 255])
self.model0.draw()
self.setupColor([169.0 / 255, 169.0 / 255, 169.0 / 255])
# Link1
glTranslatef(0.0, 0.0, self.objRobot.d[1]);
glRotatef(RadToDeg(self.objRobot.q[1]), 0.0, 0.0, 1.0)
glTranslatef(self.objRobot.a[1], 0.0, 0.0)
glRotatef(RadToDeg(self.objRobot.alpha[1]), 1.0, 0.0, 0.0);
self.model1.draw()
#Link2
# self.setupColor([90.0 / 255, 150.0 / 255, 9.0 / 255])
glTranslatef(0.0, 0.0, self.objRobot.d[2]);
glRotatef(RadToDeg(self.objRobot.q[2]), 0.0, 0.0, 1.0)
glTranslatef(self.objRobot.a[2], 0.0, 0.0)
glRotatef(RadToDeg(self.objRobot.alpha[2]), 1.0, 0.0, 0.0);
self.model2.draw()
#Link3
# self.setupColor([255.0 / 255, 255.0 / 255, 9.0 / 255])
glTranslatef(0.0, 0.0, self.objRobot.d[3]);
glRotatef(RadToDeg(self.objRobot.q[3]), 0.0, 0.0, 1.0)
glTranslatef(self.objRobot.a[3], 0.0, 0.0)
glRotatef(RadToDeg(self.objRobot.alpha[3]), 1.0, 0.0, 0.0);
self.model3.draw()
#Link4
self.setupColor([0 / 255, 0 / 255, 255 / 255])
glTranslatef(0.0, 0.0, self.objRobot.d[4]);
glRotatef(RadToDeg(self.objRobot.q[4]), 0.0, 0.0, 1.0)
glTranslatef(self.objRobot.a[4], 0.0, 0.0)
glRotatef(RadToDeg(self.objRobot.alpha[4]), 1.0, 0.0, 0.0);
self.model4.draw()
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glPushMatrix()
glTranslate(0, 0, self.z_zoom)
glTranslate(self.xTran, self.yTran, 0)
glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)
glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)
glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)
glRotated(+90.0, 1.0, 0.0, 0.0)
self.drawGL()
glPopMatrix()
def resizeGL(self, width, height):
side = min(width, height)
if side < 0:
return
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
GLU.gluPerspective(35.0, width / float(height), 1.0, 20000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslated(0.0, 0.0, -40.0)
def mousePressEvent(self, event):
self.lastPos = event.pos()
def drawGrid(self):
glPushMatrix()
color = [0.0, 1.0, 1.0]
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, color);
step = 50
num = 15
for i in arange(-num, num+1):
glBegin(GL_LINES)
glVertex3f(i*step, -num * step, 0)
glVertex3f(i*step, num*step, 0)
glVertex3f(-num * step, i*step, 0)
glVertex3f(num*step, i*step, 0)
glEnd()
glPopMatrix()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & QtCore.Qt.LeftButton:
self.setXRotation(self.xRot + 4 * dy)
self.setYRotation(self.yRot - 4 * dx)
elif event.buttons() & QtCore.Qt.RightButton:
self.setZoom(self.z_zoom + 5.0*dy)
elif event.buttons() & QtCore.Qt.MidButton:
self.setXYTranslate(dx, dy)
self.lastPos = event.pos()
def setupColor(self, color):
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, color);
def xRotation(self):
return self.xRot
def yRotation(self):
return self.yRot
def zRotation(self):
return self.zRot
def normalizeAngle(self, angle):
while (angle < 0):
angle += 360 * 16
while (angle > 360 * 16):
angle -= 360 * 16 | [
"trantuanvu96@gmail.com"
] | trantuanvu96@gmail.com |
e1bbf8182e588f5f52ffb8cadb97eaa892c613ba | 1e263d605d4eaf0fd20f90dd2aa4174574e3ebce | /plugins/support-acl/setup.py | f1476d2d80a32ac08318c6525eb595d35a343679 | [] | no_license | galiminus/my_liveblog | 698f67174753ff30f8c9590935d6562a79ad2cbf | 550aa1d0a58fc30aa9faccbfd24c79a0ceb83352 | refs/heads/master | 2021-05-26T20:03:13.506295 | 2013-04-23T09:57:53 | 2013-04-23T09:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | '''
Created on June 14, 2012
@package: support acl
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
'''
# --------------------------------------------------------------------
from setuptools import setup, find_packages
# --------------------------------------------------------------------
setup(
name="support_acl",
version="1.0",
packages=find_packages(),
install_requires=['ally_core >= 1.0'],
platforms=['all'],
zip_safe=True,
# metadata for upload to PyPI
author="Gabriel Nistor",
author_email="gabriel.nistor@sourcefabric.org",
description="Support for acl",
long_description='Support for acl definitions',
license="GPL v3",
keywords="Ally REST framework support acl plugin",
url="http://www.sourcefabric.org/en/superdesk/", # project home page
)
| [
"etienne@spillemaeker.com"
] | etienne@spillemaeker.com |
8c0dfa2b18581cc492049ac281d6762985bf95f8 | 38a4b0b758125419b21ea417530bd418c5eeb0d1 | /Homework/Barker HW4-2.py | bff7894f6c2ab84939f7169fb608e58e411a08ae | [] | no_license | acbarker19/PHY299-Class-Assignments | 4c12146bf83ac3737748dc25587a720ac2a61da8 | d17d445e70c8228c247f92c772bb69471e69024f | refs/heads/master | 2021-07-03T04:32:57.555060 | 2021-05-18T01:59:59 | 2021-05-18T01:59:59 | 236,010,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # HW 4-2
# Alec Barker
import math
# P2.5.8
n = 10000
p = 2
prime_nums = list(range(1, n + 1))
while p <= n / 2:
for m in range(2, math.ceil(n / p) + 1):
try:
prime_nums.remove(m * p)
except:
pass
for num in prime_nums:
if num > p:
p = num
break
print(prime_nums) | [
"acbarker19@gmail.com"
] | acbarker19@gmail.com |
1ce8bf6c370887cc8f954363447c62a185ac05c7 | 8dc6a81bf8316f05731a990f73bfd838763a886f | /Capitulos/Cap_02/app.py | 81cca32fcebca2cc740725e4867932004bef137a | [] | no_license | riff-imaginario/Studying_Livro-Intro_VC_com_Python_e_OpenCV | c9558d0ab90ec0744d36494350780c7daab91c5e | e84e1a3b860f441d30ce220372dad1553a41a3d5 | refs/heads/master | 2020-07-07T00:30:00.966071 | 2019-08-21T14:55:36 | 2019-08-21T14:55:36 | 203,186,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | import cv2
img = cv2.imread('../../_img/terminator_hand.jpg')
for i in range(0, img.shape[0], 1):
for j in range(0, img.shape[1], 1):
img[i, j] = (0, (i * j) % 256, 0)
cv2.imshow('Mod', img)
cv2.waitKey(0) | [
"lucasn@spi.local"
] | lucasn@spi.local |
d5457b11c1175531559679deb03ace99c49aae45 | 0d2e11835cb1a7548b9d767743e3d746b17af715 | /return_as_a_break.py | 51337567c5d64e4b87ed341cf6f7c3315d7d4b52 | [] | no_license | Archanaaarya/function | f588142e953f8126bb373022fa788369002bf85f | f18d78aa8a4a3ed36ae24fe0816eda2897e4de55 | refs/heads/main | 2023-07-03T11:23:43.234532 | 2021-08-13T10:21:55 | 2021-08-13T10:21:55 | 389,851,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | def add_numbers_more(number_x, number_y):
number_sum = number_x + number_y
print ("Hello from NavGurukul ;)")
return number_sum
number_sum = number_x + number_x
print ("Kya main yahan tak pahunchunga?")
return number_sum
sum6 = add_numbers_more(100, 20) | [
"swati.Singh@simplifyvms.com"
] | swati.Singh@simplifyvms.com |
9795733fcec86d243b19960e37b75bee838be0fb | a3a3c2fe68a9744872613c43899d92b9099ae2f3 | /python.py | 5a38cd46baa21f770590710763db51a6fb7f0246 | [] | no_license | daysifebles/machine_learning | 3e24ee47de87c273b3820eb1c3f833b812a89d0c | 7ce34a88f618f4d923fe61a7adcef3780d234e66 | refs/heads/master | 2020-12-22T14:34:10.357359 | 2020-03-05T18:56:36 | 2020-03-05T18:56:36 | 236,825,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,551 | py | # -*- coding: utf-8 -*-
"""Untitled1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-jast8_Z1PK_gvUMnwOc3k784BYUXlj9
# Listas
valores o items separados por comas entre corchetes. Puede contener items de diferentes tipos, pero usualmente todos sus items son del mismo tipo.
"""
squares = [1,4,9,16,25]
squares
#acceder a los elementos de la lista
squares[0] #valor en la primera posición
squares[3]
squares[-1] #valor de la última posición
squares[-3:] #corta la lista y retorna una nueva lista
squares[:] #retorna una copia de la lista original
# Concatenación
squares + [36,49,64,81,100]
#Cambiar un valor o varios
squares[1] = 16
squares
squares[2:4] = [1,0] # no toma en cuenta la última posición
squares
#remover un elemento o todos los elementos de la lista
squares[0:1] = []
squares
squares[:] = []
squares
lista = [1,'dos',3.2,5]
lista
"""Es posible crear listas que contienen otras listas"""
a = ['a', 'b', 'c']
n = [1, 2, 3]
x = [a, n]
x
x[0] #acceder a la primera lista
x[0][1] #acceder a un elemento de la primera lista
"""### `append(x)`
Agregar un item al final de la lista.
"""
lista.append('x')
lista
#equivalente a lista[len(lista):]=['x']
"""### `extend(iterable)`
Extiende la lista agregando todos los elementos que se le agregen en el argumento iterable.
"""
lista.extend([1,2])
lista
#equivalente a lista[len(lista):]=['x']
"""### `insert(i,x)`
Inserta un item en la posición dada. El primer argumento corresponde al índice del elemento donde se va a insertar el item.
"""
lista.insert(1,'a')
lista
#lista.insert(len(lista),'a') es equivalente a lista.append('a')
"""### `remove(x)`
Eliminia el primer item de la lista que es igual a el argumento x, y devuelve `ValueError` si no hay items iguales a dicho valor.
"""
lista.remove('a')
lista
"""### `pop(i)`
Elimina el item de la posición dada y devuelve dicho valor. Si no se específica la posición elimina y devuelve el último item de la lista.
"""
lista.pop(2)
lista
"""### `clear()`
Elimina todos los items de la lista.
"""
lista.clear()
lista
#equivalente a del lista[:]
"""### `index(x[,start[,end]])`
Devuelve el índice basado en cero de la lista cuyo primer valor es igual a x, devuelve un ValueError si no se encutra dicho valor. Los argumentos start y end son opcionales y se usan para limitar la busqueda en una subsecuencia de la lista.
"""
lista = [1,'dos',3.2,5]
lista.index('dos')
"""### `count(x)`
Revuelve el número de veces que se encuentra el elemento x en la lista.
"""
lista.count('dos')
"""### `sort(key=None, reverse=False)`
ordena los items de la lista.
"""
lista1 = [1,5,8,9,63.2,52.1,47.2]
lista1.sort()
lista1
"""### `reverse()`
Devuelve los elementos de la lista en forma reversa.
"""
lista.reverse()
lista
"""### `copy()`
Devuelve una copia superficial de la lista.
"""
lista2 = lista.copy()
lista2
"""### `len()`
Retorna el tamaño de la lista.
"""
len(lista)
"""### `max()` and `min()`
Retorna el valor máximo y mínimo respectivamente de una lista númerica.
"""
max(n)
min(n)
"""### Ejemplos con listas"""
fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
fruits
#cuántas veces aparece 'apple en la lista'
fruits.count('apple')
#En qué indice se encuentra 'banana'
fruits.index('banana')
#Encuentre el indice después de la posicion 4 que se encuentre 'banana'
fruits.index('banana',4)
#Colocar en forma reversa la lista fruits
fruits.reverse()
fruits
#Agregar en la última posición 'grape'
fruits.append('grape')
fruits
#Ordenar los elementos de la lista
fruits.sort()
fruits
#Eliminar el último item de la lista fruits
fruits.pop()
fruits
stack = [3, 4, 5]
#Agregar el número 6
stack.append(6)
#Agregar el número 7
stack.append(7)
stack
stack.pop()
stack.pop()
stack.pop()
stack
"""## Compresión de Listas
Proporcionan un camino conciso para crear listas. Los elementos de la lista son el resultado de alguna operación aplicada de otra secuencia o iteración, o para crear una secuencia de esos elementos que satisfacen una cierta condición.
"""
cuadrados = []
for x in range(10):
cuadrados.append(x**2)
cuadrados
cuadrado = list((map(lambda x: x**2, range(10))))
cuadrado
cuad = [x**2 for x in range(10)]
cuad
[(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]
vec = [-4, -2, 0, 2, 4]
[x*2 for x in vec]
"""# Expresión `lambda`
Puede tener cualquier número de argumentos, pero solamente puede tener una expresión.
`lambda argumentos : expresión`
"""
x = lambda a : a+10
x(5)
y = lambda a, b : a * b
y(2,5)
(lambda x: x + 1)(2)
"""# Funciones
Como ejemplo se creara la función de Fibonacci, recordemos que es una serie que comienza con 0 y 1 y los números siguientes son la suma de los dos anteriores.
0, 1, 1, 2, 3, 5, 8, 13, 21 ...
"""
#n será el número de parada
def fibonacci(n):
a,b =0,1 #valores iniciales de la serie de fibonacci
while a <= n:
print(a, end=' ')
a, b = b, a+b
fibonacci(2000)
#vamos a retornar la serie en una lista
def fibonacci_lista(n):
resultado = [] #inicializar la lista
a,b =0,1 #valores iniciales de la serie de fibonacci
while a <= n:
resultado.append(a)
# resultado = resultado + [a] # es equivalente a lo anterior
a, b = b, a+b
return resultado
fibonacci_lista(2000)
"""# Diccionario
Son memorias o matrices asociativas. Son indexados por llaves (keys).
"""
#diccionario = { clave1 : valor1, clave2 : valor2, ...}
tel = { 'jack' : 4098, 'sape' : 4139}
#Agregar una clave nueva
tel['guido'] = 2147
tel['irv'] = 4127
tel
# El valor de una clave
tel['jack']
#Borrar una key:value
del tel['sape']
tel
#Para retornar todas las llaves, keys o claves
list(tel)
sorted(tel) # este también los ordena
#para verificar si una key está en el diccionario
'guido' in tel
'jack' not in tel
# otra forma de definir un diccionario, por pares de keys-value
dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])
{x: x**2 for x in (2, 4, 6)}
dict(sape=4139, guido=4127, jack=4098)
"""# Grupos, conjuntos o sets
Un conjunto es una colección desordenada sin elementos duplicados. Soportan operaciones matemáticas como unión, intersección, diferencia y diferencia simétrica.
"""
basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
basket
a = {x for x in 'abracadabra' if x not in 'abc'}
a
# para crear un conjunto vacio se debe usar set() no {} porque crea un diccionario bacio.
# ver si un elemento está en el conjunto
'orange' in basket
#Operaciones entre conjuntos
x = set('abracadabra')
y = set('alacazam')
# ver los elementos unicos de un conjunto
x
#Diferencia entre conjuntos
x-y #están en x pero no en y
#Unión de conjuntos
x | y
#Intersección de conjuntos
x & y
#Elementos de ambos conjuntos menos los comunes entre ellos
x ^ y
"""# Ciclo `for`
Ietración sobre una progresión aritmética de números, tiene la capacidad de definir el paso de iteración como la condición de parada.
Itera sobre los elementos de una secuencia, que puede ser cadena, lista o tupla, en el orden que aparecen.
"""
for w in [1,2,3]:
print(w, 'hi')
words = ['cat','window','defenestrate']
for w in words:
print(w, len(w))
for item in basket:
print(item)
for i in range(0,3):
print(i)
for a,b in [(1,2),(3,4)]:
print(a,b+a)
"""# Declaración `if`
Retorna un valor si se cumple una condición o otro si este no se cumple.
Dentro de las condiciones podemos encontrar:
`>=` mayor igual.
`<=` menor igual.
`==` igual.
`!=` diferente.
`and` Y, ambas condiciones se cumplen.
`or` O, suficiente que una se cumpla.
"""
if 3<2 :
print('hello')
else:
print('la condición no es verdadera')
age = 29
if age<13:
print('you are young')
elif age>=13 and age<18 : #else if
print('you are teenager')
else:
print('you are adult')
"""# Formateo de Cadenas
Cuando a una cadena queremos agregar un número y queremos que retorne una cadena. Para que el número se convierta en tipo cada se unas `%`.
`%s` denota una cadena. Si dicho valor no es una cadena este automaticamente lo cambia con la función `str()`.
`%d` denota un entero de base 10.
`%f` un número flotante. `%.2f` indica que son dos números después de la coma, dos decimales.
`%%` si se quiere incluir el `%` en el texto.
`%x` números hexadecimales.
"""
import math
print('The value of pi is approximately %5.3f.' % math.pi)
#Otra forma de hacerlo usando str()
nombre='Juany'
edad=20
"Tu nombre es " + nombre + " y tienes " + str(edad) + " años."
"Tu nombre es %s" % nombre
"""# Tipos de Datos
## Números
Se pueden realizar operaciones con números igual que una calculadora. Las operaciones con las que trabaja son `+`, `-`, `*`, `**`, `/`, `//` y `%`.
"""
2+2
50 -5*7
(60+7*9)/4
8/4
17//3 #descarta la parte decimal
17%3 # devuelve el residuo
5 ** 2 # potencia de un número
5**(1/2)
math.sqrt(5) #raíz cuadrada
3+5J #números complejos
str(25) #convertir números en string
"""## Strings, cadena de carácter
Se colocan entre `'...'` o `"..."`.
"""
'spam eggs'
'doesn\'t' # para imprimir '
"doesn't"
'"Yes," they said.'
"\"Yes,\" they said." #para imprimir las comillas
'"Isn\'t," they said.'
#Usando la función print() para mostrar caracteres
print('"Isn\'t," they said.')
s = 'spam eggs\n hola' #\n salto de línea
s
print(s)
print('C:\some\name') # muestra el salto de linea de \n
print(r'C:\some\name') #no toma en cuenta el salto de línea
#Para mostrar multiples líneas con print(), se usa """... """ o '''... '''
print("""\
Usage: thingy [OPTIONS]
-h Display this usage message
-H hostname Hostname to connect to
""")
3 * 'un' + 'ium' #se concatena con + y se replica con *
'Py' 'thon' # se concatenan
#Para concatenar texto guardado en variables se debe hacer con +
prefix = 'Pyt'
prefix + 'hon'
#acceder a los elementos de una cadena
prefix[0]
prefix[1]
prefix[2]
prefix[-1] #último carácter
prefix[0:2] #no toma la última posición
prefix[:2]
prefix[2:]
prefix[-2:]
# Las cadenas de caracteres en python no son inmutables, es decir no se pueden cambiar.
#prefix[0] = 'K'
len(prefix) #tamaño de la cadena
"""## float"""
float(10)
float(11.22)
float('-13.33') #convierte un carácter en un flotante
float(" -24.45\n")
#float("abc") #no se puede convertir en un flotante
"""## bool
Retorna falso si el valor que se omite es falso, y verdadero si dicho valor es verdadero
"""
#Los elementos nulos o vacíos se condideran False
bool(0)
bool(0.0)
bool("")
bool([])
bool({})
#El resto se consideran True
bool(25)
bool('abc')
bool((1,2,3))
bool([27, "octubre", 1997])
#Operadores lógicos
## and
True and True #True
True and False #False
False and True #False
False and False #True
## or (sólo puede darse una de las dos alternativas)
True or True #True
True or False #True
False or True #True
False or False #False
## Negación
not True # False
not False # True
4 == 3 + 1 and 3 + 1 > 2
"""Función `type()` paa conocer que tipo de dato es el objeto que pasamos dentro de ella."""
type(10.0) # Float
type(10) # Entero
type('a') # Carácter
"""# Declaración `pass`
No hace nada, se usa cuando la declaración se requiere sistematicamente pero el programa no requiere una acción.
"""
for letter in 'Python':
if letter == 'h':
pass
print('This is pass block')
print('Current Letter :', letter)
"""# Para saber la versión de Python"""
#Para saber la versión de Python que usas
import sys
print(sys.version) | [
"daysilorenafeblesr@gmail.com"
] | daysilorenafeblesr@gmail.com |
9ef335479513bfd0e95172dcbc515989e2208930 | 505ce732deb60c4cb488c32d10937a5faf386dce | /di_website/place/migrations/0006_auto_20190819_0751.py | ae04259a02373cbe7e974ab6654f35a3725dfcff | [] | no_license | davidebukali/di_web_test | cbdbb92b2d54c46771b067a24480e6699f976a15 | a826817a553d035140bb8b6768f3fd2b451199d8 | refs/heads/develop | 2023-02-11T13:21:26.281899 | 2021-01-08T04:37:34 | 2021-01-08T04:37:34 | 319,560,677 | 0 | 0 | null | 2021-01-08T04:37:35 | 2020-12-08T07:30:51 | HTML | UTF-8 | Python | false | false | 3,476 | py | # Generated by Django 2.2.3 on 2019-08-19 07:51
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('place', '0005_auto_20190814_1527'),
]
operations = [
migrations.AlterField(
model_name='placespage',
name='body',
field=wagtail.core.fields.StreamField([('paragraph_block', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', template='blocks/paragraph_block.html')), ('section_paragraph_block', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'])), ('center', wagtail.core.blocks.BooleanBlock(default=False, required=False))])), ('block_quote', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.TextBlock())])), ('section_block_quote', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.TextBlock()), ('center', wagtail.core.blocks.BooleanBlock(default=False, required=False))])), ('banner_block', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-s15', required=False, template='blocks/embed_block.html')), ('text', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.TextBlock(template='blocks/banner/text.html')), ('list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.TextBlock()), ('content', wagtail.core.blocks.TextBlock(required=False))], template='blocks/banner/list_item.html'), template='blocks/banner/list.html'))])), ('meta', wagtail.core.blocks.CharBlock(help_text='Anything from a name, location e.t.c - usually to provide credit for the text', required=False)), ('buttons', wagtail.core.blocks.StreamBlock([('button', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('document_box', wagtail.core.blocks.StructBlock([('document_box_heading', wagtail.core.blocks.CharBlock(icon='title', required=False)), ('documents', wagtail.core.blocks.StreamBlock([('document', wagtail.documents.blocks.DocumentChooserBlock())], required=False)), ('dark_mode', wagtail.core.blocks.BooleanBlock(default=False, help_text='Red on white if unchecked. White on dark grey if checked.', required=False))]))], required=False)), ('media_orientation', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right')], required=False))])), ('button_block', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))])), ('link_block', wagtail.core.blocks.StructBlock([('caption', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False))]))], blank=True, null=True, verbose_name='Page Body'),
),
]
| [
"edwinm_p@yahoo.com"
] | edwinm_p@yahoo.com |
7221e205c04cbe5aa4f8912b8546d5695d5db384 | fbb94335807c55c154ed6c5c5c308a7106d5e0ce | /example/crawl/crawl_image.py | b2eefd955f9b4e386838fb11281d5e4cfcce5796 | [] | no_license | white5168/crawl_course | 5e6342b6684f23f2261ae1a3767e8d20ab915aad | a69c63003b156380aac7d6f579b624fd4f524f81 | refs/heads/master | 2021-05-01T21:51:21.131065 | 2017-12-08T17:25:47 | 2017-12-08T17:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
url = 'https://www.thsrc.com.tw/tw/TimeTable/SearchResult'
response = requests.get(url)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'html.parser')
#print(soup)
all_imgs = soup.find_all('img')
for index, img in enumerate(all_imgs):
if index!=0:
print(img['src'])
print('https://www.thsrc.com.tw'+img['src'])
urlretrieve('https://www.thsrc.com.tw'+img['src'], img['src'].split('/')[-1]) | [
"isaac60103@gmail.com"
] | isaac60103@gmail.com |
2e8b12f1688ccdc4e2dbffa82c03704c1569082b | 48832d27da16256ee62c364add45f21b968ee669 | /res/scripts/client/messenger/gui/scaleform/channels/bw/__init__.py | baf94f407f20cf5ee2e4f40fcc8f9542bff22cb2 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 492 | py | # 2016.08.04 19:54:00 Střední Evropa (letní čas)
# Embedded file name: scripts/client/messenger/gui/Scaleform/channels/bw/__init__.py
from messenger.gui.Scaleform.channels.bw.factories import LobbyControllersFactory
__all__ = 'LobbyControllersFactory'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\gui\scaleform\channels\bw\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:54:00 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
f26b0a2349aecb9851bb1aaa098c6fda0e89cbcc | 457822b1eb6694d7291b546d77286269c272e1d9 | /team/views.py | a9e31d3491951dfa6909bd448362ead3524b204a | [] | no_license | sanujsood/Zeal | ccdf56a1a7ab065b407f8ba240feea277df3a5eb | 71b5f8a3bd8660cbf6d254b15a71912ca69b4c8c | refs/heads/master | 2022-12-23T03:57:05.489968 | 2020-10-05T17:28:16 | 2020-10-05T17:28:16 | 295,974,229 | 0 | 3 | null | 2020-10-05T17:28:18 | 2020-09-16T08:39:49 | HTML | UTF-8 | Python | false | false | 569 | py | from django.shortcuts import render
from .models import Team
# Create your views here.
def about(request):
member = Team.objects.filter(status = 'member')
dev = Team.objects.filter(status = 'developer')
coordinator = Team.objects.filter(status = 'cord')
faculty = Team.objects.filter(status = 'fac')
context = {'member':member,
'dev':dev,
'coordinator':coordinator,
'faculty':faculty,
}
return render(request,'about.html',context)
| [
"matharooamrit098@gmail.com"
] | matharooamrit098@gmail.com |
9c15ddb27367f839bac38d79e303f8b8a4bc83f6 | 73e47b80dbd1b72543c2090e479d547a19cbf3e0 | /res_search_firstversion.py | fa5e5c0535551fe14cfd92b936614ea3f46885ee | [
"MIT"
] | permissive | K4CZP3R/vita-resolutionchanger | c7bc7ecbe4a6ad9a06c3926ba295e8974682be8c | 08f238c97f8ef42a656bdfa6617d2beb3940978d | refs/heads/master | 2020-03-21T23:45:30.456687 | 2018-06-30T12:31:46 | 2018-06-30T12:31:46 | 139,205,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,295 | py | #First version of script, don't use it. Just use startme.py
from time import sleep, time
from colorama import Fore, Back, init
import os
hardcoded_filepath="jak3_eboot.txt"
hardcoded_sf1=""
hardcoded_sf2="#0x280"
hardcoded_sf3="#0x170"
hardcoded_maxspc=100
#define for later
filepath=""
maxspc=0
sf1=""
sf2=""
sf3=""
line_normalColor=Fore.GREEN
line_foundColor=Fore.RED
line_color=line_normalColor
locationa=0
valuea=""
locationb=0
valueb=""
info=""
count=0
closeList=list()
#on start clean and init of colorama
os.system("cls")
init(autoreset=True)
def askVars():
global filepath, maxspc,sf1, sf2, sf3
filepath=input("Filepath ({}): ".format(hardcoded_filepath))
if(len(filepath) == 0):
filepath=hardcoded_filepath
maxspc=input("Max space between lines ({}): ".format(str(hardcoded_maxspc)))
if(len(maxspc)==0):
maxspc=hardcoded_maxspc
sf1=input("Search filter 1 [function] ({}): ".format(hardcoded_sf1))
if(len(sf1)== 0):
sf1=hardcoded_sf1
sf2=input("Search filter 2 [value] ({}): ".format(hardcoded_sf2))
if(len(sf2)== 0):
sf2=hardcoded_sf2
sf3=input("Search filter 3 [value] ({}): ".format(hardcoded_sf3))
if(len(sf3)== 0):
sf3=hardcoded_sf3
def showIntro():
print("Input file needs to be output of prxtool")
print("In search filter 1 choose asm function (mov)")
print("In another search filters enter resolution x and y (example: #0x198 or 408)")
def showSummary():
print("Program will search in {0} using following filters {1},{2},{3} | max space: {4} \n{5}Default Color {6} Found Color".format(filepath,sf1,sf2,sf3,maxspc,line_normalColor,line_foundColor))
def showResults():
print(*closeList,sep='\n')
showIntro()
askVars()
showSummary()
print("Opening... ({})".format(filepath))
f = open(filepath)
print("Counting lines... (might take a while)")
f_lines = len(f.readlines())
print("File contains {0} lines".format(str(f_lines)))
f.close()
f=open(filepath)
skiplines = input("Want to skip lines? n/lines: ")
if("n" not in skiplines):
print("Skipping...")
for x in range(0, int(skiplines)):
f.readline()
print("Skipped {0} lines".format(skiplines))
start_time=time()
while count<f_lines:
line=f.readline().strip('\n')
if sf1 in line:
if sf2 in line or sf3 in line:
locationa=count
valuea=line
if((locationa-locationb)<maxspc):
line_color=line_foundColor
if sf2 in valuea and sf2 in valueb:
info="{0} copy".format(sf2)
elif sf3 in valuea and sf3 in valueb:
info="{0} copy".format(sf3)
else:
closeList.append("===\nvaluea:{0} [@{1}] \nvalueb:{2} [@{3}]\n".format(str(valuea),str(locationa),str(valueb),str(locationb)))
else:
info=""
line_color=line_normalColor
print("{0}{1} * [delta:{2}, line:{3}/{5}] {4}*".format(line_color,line,str(locationa-locationb),str(count),str(info),str(f_lines-count)))
locationb=locationa
valueb=valuea
count=count+1
end_time=time()-start_time
print("Took {0}s".format(str(end_time)))
showResults()
| [
"k4czp3r.dev@gmail.com"
] | k4czp3r.dev@gmail.com |
1386cbf1e3d239b1300180618860c3be4f8d3292 | 78a08e7d67c6c6426afb5e1fb69b30fb750d13e5 | /escuela/migrations/0003_auto_20170405_1905.py | 36aafc9d3437d8c8bfcd6b69cf340d0174879476 | [] | no_license | misaelnieto/DjangoAPIREST-Demo | e79a0ae40be49fde6a7ede36cb39c919d241f8c9 | 6feb65b7536b6f25492a07780249c608e9242ff3 | refs/heads/master | 2021-01-19T04:12:54.104118 | 2017-04-05T22:15:10 | 2017-04-05T22:15:10 | 87,358,003 | 0 | 0 | null | 2017-04-05T21:25:48 | 2017-04-05T21:25:48 | null | UTF-8 | Python | false | false | 536 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-05 19:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('escuela', '0002_auto_20170405_1803'),
]
operations = [
migrations.AlterField(
model_name='alumno',
name='carrera',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='escuela.Carrera'),
),
]
| [
"atempaitm@gmail.com"
] | atempaitm@gmail.com |
f904268c0d3fb608dd430ec403fa447f1854a32a | fbac580a7d47d2d9d22221df76143c6f6364ca8a | /Test1/venv/bin/cythonize | 811f297235946918a91b9f81eb925b75e4597e70 | [] | no_license | duyenthaind/crawl-data-from-web | 64aeb72b8e2eb01749e47f60d8240a26a8d063e7 | 9049221f1021c5cf8ef1a26d9906bc886b608820 | refs/heads/master | 2022-12-07T04:36:56.542347 | 2020-08-18T11:26:09 | 2020-08-18T12:03:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | #!/Users/kieuduykhanh/Desktop/github/crawl-data-from-web/Test1/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from Cython.Build.Cythonize import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kieuduykhanh@MacBook-Pro-cua-Kieu.local"
] | kieuduykhanh@MacBook-Pro-cua-Kieu.local | |
f5414a7b48746434a7782d7536d9dbd9b7408df0 | a5e71a333a86476b9cb1bdf6989bb5f47dd5e409 | /ScrapePlugins/M/MangaStreamLoader/ContentLoader.py | a34c254432bb1bbfe774256e4929aa4e787bc847 | [] | no_license | GDXN/MangaCMS | 0e797299f12c48986fda5f2e7de448c2934a62bd | 56be0e2e9a439151ae5302b3e6ceddc7868d8942 | refs/heads/master | 2021-01-18T11:40:51.993195 | 2017-07-22T12:55:32 | 2017-07-22T12:55:32 | 21,105,690 | 6 | 1 | null | 2017-07-22T12:55:33 | 2014-06-22T21:13:19 | Python | UTF-8 | Python | false | false | 4,185 | py |
import logSetup
import runStatus
import bs4
import nameTools as nt
import os
import os.path
import processDownload
import ScrapePlugins.RetreivalBase
import settings
import traceback
import urllib.parse
import webFunctions
import zipfile
class ContentLoader(ScrapePlugins.RetreivalBase.RetreivalBase):
loggerPath = "Main.Manga.Ms.Cl"
pluginName = "MangaStream.com Content Retreiver"
tableKey = "ms"
dbName = settings.DATABASE_DB_NAME
tableName = "MangaItems"
wg = webFunctions.WebGetRobust(logPath=loggerPath+".Web")
retreivalThreads = 1
def getImage(self, imageUrl, referrer):
if imageUrl.startswith("//"):
imageUrl = "http:" + imageUrl
content, handle = self.wg.getpage(imageUrl, returnMultiple=True, addlHeaders={'Referer': referrer})
if not content or not handle:
raise ValueError("Failed to retreive image from page '%s'!" % referrer)
fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1])
fileN = bs4.UnicodeDammit(fileN).unicode_markup
self.log.info("retreived image '%s' with a size of %0.3f K", fileN, len(content)/1000.0)
return fileN, content
def getImageUrls(self, baseUrl):
pages = set()
nextUrl = baseUrl
chapBase = baseUrl.rstrip('0123456789.')
imnum = 1
while 1:
soup = self.wg.getSoup(nextUrl)
imageDiv = soup.find('div', class_='page')
if not imageDiv.a:
raise ValueError("Could not find imageDiv?")
pages.add((imnum, imageDiv.img['src'], nextUrl))
nextUrl = imageDiv.a['href']
if not chapBase in nextUrl:
break
imnum += 1
self.log.info("Found %s pages", len(pages))
return pages
def getLink(self, link):
sourceUrl = link["sourceUrl"]
seriesName = link["seriesName"]
chapterVol = link["originName"]
try:
self.log.info( "Should retreive url - %s", sourceUrl)
self.updateDbEntry(sourceUrl, dlState=1)
imageUrls = self.getImageUrls(sourceUrl)
if not imageUrls:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Page not found - 404")
self.updateDbEntry(sourceUrl, dlState=-1)
return
self.log.info("Downloading = '%s', '%s' ('%s images)", seriesName, chapterVol, len(imageUrls))
dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)
if link["flags"] == None:
link["flags"] = ""
if newDir:
self.updateDbEntry(sourceUrl, flags=" ".join([link["flags"], "haddir"]))
chapterName = nt.makeFilenameSafe(chapterVol)
fqFName = os.path.join(dlPath, chapterName+" [MangaStream.com].zip")
loop = 1
while os.path.exists(fqFName):
fqFName, ext = os.path.splitext(fqFName)
fqFName = "%s (%d)%s" % (fqFName, loop, ext)
loop += 1
self.log.info("Saving to archive = %s", fqFName)
images = []
for imgNum, imgUrl, referrerUrl in imageUrls:
imageName, imageContent = self.getImage(imgUrl, referrerUrl)
images.append([imgNum, imageName, imageContent])
if not runStatus.run:
self.log.info( "Breaking due to exit flag being set")
self.updateDbEntry(sourceUrl, dlState=0)
return
self.log.info("Creating archive with %s images", len(images))
if not images:
self.updateDbEntry(sourceUrl, dlState=-1, seriesName=seriesName, originName=chapterVol, tags="error-404")
return
#Write all downloaded files to the archive.
arch = zipfile.ZipFile(fqFName, "w")
for imgNum, imageName, imageContent in images:
arch.writestr("{:03} - {}".format(imgNum, imageName), imageContent)
arch.close()
dedupState = processDownload.processDownload(seriesName, fqFName, deleteDups=True, rowId=link['dbId'])
self.log.info( "Done")
filePath, fileName = os.path.split(fqFName)
self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, seriesName=seriesName, originName=chapterVol, tags=dedupState)
return
except Exception:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Traceback = %s", traceback.format_exc())
self.updateDbEntry(sourceUrl, dlState=-1)
if __name__ == '__main__':
import utilities.testBase as tb
with tb.testSetup():
cl = ContentLoader()
cl.do_fetch_content()
| [
"something@fake-url.com"
] | something@fake-url.com |
f8ea189c44059b460e306561366d19b6465f2e32 | d660ccb43c3d0441ad0ef277e43245e780a562ec | /microcontroller/src-atmel/automatization2.0/candpy/trunk/candpy.py | 0ce2f5ca331ebfb4aad98848fd2783f6ebe499c4 | [] | no_license | astaliarou/legacy | 0d83670a3e8a5f1a408cc28858321bfbbd4f868c | f4e6ab9e4c2aef6ffa20e0028084aa584c2c968b | refs/heads/master | 2022-05-21T13:37:27.882589 | 2020-02-11T19:31:37 | 2020-02-11T19:31:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,810 | py | #!/usr/bin/env python
# for commandline parsing
import optparse
# for sleeping
from time import sleep
# everything is handled via plugins
from core.peak.util import plugins
# in general two components are needed to activate a plugin
# the first component is the parsing and providing of commandline args
# the second component is the starting of the plugin with the actual
# commandline args
#from plugins.serialcon.SerialPlugin import serialinit, serialargs
from core.LoggingPlugin import fileloggerargsrs232pkg
from core.LoggingPlugin import fileloggerinitrs232pkg
from core.LoggingPlugin import fileloggerrs232pkgread
from core.LoggingPlugin import fileloggerrs232pkgwrite
#from plugins.rawserver.RawTCPClientPlugin import rawtcpclientinit
#from plugins.rawserver.RawTCPClientPlugin import rawtcpclientargs
from plugins.tcpclient import *
from plugins.tcpserver import *
from packets.events import *
from core import *
if __name__ == '__main__':
"""
The main candpy application. It's basicaly a routing between the main plugins.
The Plugins itself can also do some extra routing
"""
parser = optparse.OptionParser(
usage="%prog [options] [port [baudrate]]",
description="Labor Cand in python ",
epilog="""\
NOTE: no security measures are implemented. Anyone can remotely connect
to this service over the network.
""")
parser.add_option("-q", "--quiet",
dest="quiet",
action="store_true",
help="suppress non error messages",
default=False
)
# get port and baud rate from command line arguments or the option switches
### registering rs232readplugins here
def dummyhook(data):
#print "dummyhook was called"
#print data
pass
# create a hook for the event if something appears on the
# serial device
rs232readplugins = plugins.Hook('daslabor.cand.rs232.read')
# send the data to plugins which can perform the data
# rs232readplugins.register(dummyhook, 'localparser')
# rs232readplugins.register(dummyhook, 'remoterawclient')
rs232readplugins.register(fileloggerrs232pkgread, 'logger')
### register raw input
rawinputreadplugins = plugins.Hook('daslabor.cand.rawtcpclient.read')
### register cmdargs plugins
# every plugin maybe needs some commandlinearguments
# so we pass the global optparser to them
argsplugins = plugins.Hook('daslabor.cand.cmdargs')
#argsplugins.register(serialargs, 'serial')
argsplugins.register(fileloggerargsrs232pkg, 'logger')
#argsplugins.register(rawtcpclientargs, 'rawtcpclient')
argsplugins.register(tcpClientArgs, 'rawtcpclient')
argsplugins.register(localparserargs, 'localparser')
argsplugins.register(caneventargs, 'canevent')
### register init plugins
# here the actual plugins are startet with the
# parsed commandlinearguments
initplugins = plugins.Hook('daslabor.cand.init')
#initplugins.register(serialinit, 'serial')
initplugins.register(fileloggerinitrs232pkg, 'logger')
#initplugins.register(rawtcpclientinit, 'rawtcpclient')
initplugins.register(tcpClientInit, 'rawtcpclient')
initplugins.register(localparserinit, 'localparser')
initplugins.register(caneventinit, 'canevent')
### register write plugins
wrplugin = plugins.Hook('daslabor.cand.rs232.write')
wrplugin.register(fileloggerrs232pkgwrite, 'logger')
### notify about init
plugins.Hook('daslabor.cand.cmdargs').notify([parser, plugins])
### parse commandline args and inform plugins
(options, args) = parser.parse_args()
plugins.Hook('daslabor.cand.init').notify([options, args, parser, plugins])
try:
while True:
print "mainloop"
sleep(10)
except KeyboardInterrupt:
pass
| [
"asklepios@b1d182e4-1ff8-0310-901f-bddb46175740"
] | asklepios@b1d182e4-1ff8-0310-901f-bddb46175740 |
a113c2aea8c97a1c80a807f2750da7124223041f | 21a9f4e314c2c84bc9c8850c0c05da92a2d8e43f | /chnSegment.py | 017beb1f6cb5e2897cf4b39a58f1e5397a8543bb | [] | no_license | CSUST-LingYi/analysis_processing | c68927c842586b9dfd284414ad84fb6b6b83d0dd | 8c23f5aa304c29612add5c0c0268177e7a63dd90 | refs/heads/master | 2023-01-06T20:33:46.983957 | 2020-11-05T09:05:11 | 2020-11-05T09:05:11 | 310,240,824 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | # coding:utf-8
from collections import Counter
from os import path
import jieba
jieba.load_userdict(path.join(path.dirname(__file__),'userdict//userdict.txt')) # 导入用户自定义词典
def word_segment(text):
'''
通过jieba进行分词并通过空格分隔,返回分词后的结果
'''
# 计算每个词出现的频率,并存入txt文件
# jieba_word=jieba.cut(text,cut_all=False) # cut_all是分词模式,True是全模式,False是精准模式,默认False
# data=[]
# for word in jieba_word:
# data.append(word)
# dataDict=Counter(data)
# with open('doc//词频统计.txt', 'w') as fw:
# for k,v in dataDict.items():
# fw.write("%s,%d\n" % (k,v))
# fw.write("%s"%dataDict)
# 返回分词后的结果
jieba_word=jieba.cut(text,cut_all=False) # cut_all是分词模式,True是全模式,False是精准模式,默认False
seg_list=' '.join(jieba_word)
return seg_list
# 批量添加用户自定义字典
def add_userdict_as_list(word_list):
try:
with open('userdict//userdict.txt', 'a', encoding='utf8') as fw:
for i in range(len(word_list)):
fw.write("%s\n" % (word_list[i],))
except IOError:
print("用户字典添加错误");
return False
else:
fw.close()
return True
| [
"1417522511@qq.com"
] | 1417522511@qq.com |
a18d6a2dec529d3ec7e607d68fa268b6e10ab14f | fea398a9638acdfa2fb06e7a9695d5894452ded7 | /0x03-python-data_structures/6-print_matrix_integer.py | 94690f8578ca1d676f2b335b5640454178a148b3 | [] | no_license | OscarDRT/holbertonschool-higher_level_programming | d15585aa93ced9bc04464ced9bfd4197e73c42fa | f57ef3344df6350bded78ffce975eea693e67727 | refs/heads/master | 2020-09-30T19:56:30.788311 | 2020-05-14T19:52:10 | 2020-05-14T19:52:10 | 227,360,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | #!/usr/bin/python3
def print_matrix_integer(matrix=[[]]):
if (matrix):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print("{:d}".format(matrix[i][j]), end='')
if (j < len(matrix[i]) - 1):
print(' ', end='')
print()
| [
"oscarnetworkingpro@gmail.com"
] | oscarnetworkingpro@gmail.com |
fd7d056dca6eb683dac51fb9acb8977975310b3c | 872cd13f25621825db0c598268ecd21b49cc2c79 | /Lesson_11/unit_tests/test_client.py | 10a7ec604a3caf9f471dcc27973a5e6aa6a5b511 | [] | no_license | ss2576/client_server_applications_Python | c4e9ebe195d23c8ca73211894aa50a74014013d5 | 9b599e37e5dae5af3dca06e197916944f12129d5 | refs/heads/master | 2022-12-15T10:40:22.935880 | 2020-08-12T11:02:21 | 2020-08-12T11:02:21 | 271,764,749 | 0 | 0 | null | 2020-06-12T10:05:00 | 2020-06-12T09:52:03 | Python | UTF-8 | Python | false | false | 1,934 | py | import sys
import os
sys.path.append(os.path.join(os.getcwd(), '..'))
from unittest import TestCase, main
from common.classes import *
from common.variables import *
from common.utils import *
from common.codes import *
class TestJimClasses(TestCase):
def test_request_dict(self):
body = 'test'
time = dt.timestamp(dt.now())
request = Request(RequestAction.PRESENCE, body)
self.assertEqual(request.get_dict(), {ACTION: RequestAction.PRESENCE, TIME: time, BODY: body})
request = Request(RequestAction.QUIT)
self.assertEqual(request.get_dict(), {ACTION: RequestAction.QUIT, TIME: time, BODY: ''})
self.assertRaises(TypeError, Request)
def test_response_dict(self):
time = dt.timestamp(dt.now())
response = Response(OK)
self.assertEqual(response.get_dict(), {CODE: 200, TIME: time, MESSAGE: 'OK'})
self.assertRaises(TypeError, Response)
class TestJimFunctions(TestCase):
class TestSocket:
encoded_data = None
request = None
def __init__(self, data):
self.data = data
def send(self, request):
json_str = json.dumps(self.data)
self.encoded_data = json_str.encode(ENCODING)
self.request = request
def recv(self, buf):
json_str = json.dumps(self.data)
return json_str.encode(ENCODING)
def test_send_request(self):
request = Request(RequestAction.MESSAGE)
socket = self.TestSocket(request.get_dict())
send_data(socket, request)
self.assertEqual(socket.encoded_data, socket.request)
def test_get_data(self):
response = Response(BASIC)
socket = self.TestSocket(response.get_dict())
self.assertEqual(get_data(socket), response.get_dict())
self.assertEqual(Response.from_dict(get_data(socket)), response)
if __name__ == '__main__':
main() | [
"ss2576@mail.ru"
] | ss2576@mail.ru |
257516fced57a6cff358210829bb1ad4f0052a33 | fed4e357cfbe58c0716ec3e5df5e48a3c885eddb | /Server/Config.py | 1b31776b58214088a4c88aab1bbb3c188d26f74b | [] | no_license | betteroutthanin/SpaceCommand | 975922f033d96bdeb12c164f3f21d75928d0d0ff | 31198267184fab3c2a4e50b871f02cd86415711c | refs/heads/master | 2020-04-23T01:10:41.756449 | 2019-02-15T05:03:34 | 2019-02-15T05:03:34 | 170,804,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | # Gob Path
GOBpath = "Data/GOBS/"
GOBZeros = 10
# How many frame in 1 second
ServerFPS = 1.0
# Length of 1 server frame in Milli Seconds
ServerFrameMS = 1000.0 / ServerFPS
# Lenth of 1 server frame in Seconds
ServerFrameSec = 1.0 / ServerFPS
# Frame Multiplier (sec)
FrameMultiSec = ServerFrameSec
# Jump drive upper charge level
JumpDriveUpperCharge = 100
# Km in 1 light minute
# note the forcing to float
LightMinuteInKm = 17987548.0
# View radius
ViewRadius = 4 | [
"betteroutthanin@gmail.com"
] | betteroutthanin@gmail.com |
2f07645844113c62897b33114cef7c03ca4b7b31 | 7d172bc83bc61768a09cc97746715b8ec0e13ced | /facebook/views.py | bc76a115e91554ace708d6e9fc2227bacf2b21cf | [] | no_license | shivam1111/jjuice | a3bcd7ee0ae6647056bdc62ff000ce6e6af27594 | 6a2669795ed4bb4495fda7869eeb221ed6535582 | refs/heads/master | 2020-04-12T05:01:27.981792 | 2018-11-08T13:00:49 | 2018-11-08T13:00:49 | 81,114,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | from django.shortcuts import render
from django.views import View
from django.http import JsonResponse
import requests
from django.conf import settings
class Ratings(View):
def get(self, request, template_name="index.html"):
response = {}
payload ={
'access_token':settings.FACEBOOK_ACCESSS_TOKEN,
'fields':"has_rating,has_review,rating,review_text,reviewer"
}
res = requests.get('https://graph.facebook.com/v2.9/vapejjuice/ratings',params=payload)
return JsonResponse(data=res.json(), status=200, safe=False)
| [
"shivam1111@gmail.com"
] | shivam1111@gmail.com |
3d2b3dcef65761e440ad2e5a0f58cfde09653fe3 | d90fe24703ec8c70c01343f6a4b5ae8d0c3a4cc6 | /dzien12/kalkulator_tester.py | 7e587336402cabcc984d8b440061cf7752be7ec5 | [] | no_license | blue-mica/kurs_Python | cc89d79053d4281ab177bec0bed7a5e6b4c5361c | e88312e0dbd86f4a9a274e0f7f7d6758204fea55 | refs/heads/master | 2021-08-31T14:51:10.927756 | 2017-12-21T19:27:22 | 2017-12-21T19:27:22 | 109,871,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | from unittest import TestCase
from dzien12.kalkulator import *
class KalkulatorTesty(TestCase):
def setUp(self):
self.a = 33
self.b = 3
def test_dodaj(self):
# arrange
wynik_oczekiwany = self.a + self.b
# act
wynik_rzeczywisty = dodaj(self.a, self.b)
#assert
self.assertEqual(wynik_rzeczywisty, wynik_oczekiwany, msg="Wartosci obliczone sa rozne")
def test_odejmij(self):
# arrange
wynik_oczekiwany = self.a - self.b
#act
wynik_rzeczywisty = odejmi(self.a, self.b)
#asser
self.assertEqual(wynik_rzeczywisty, wynik_oczekiwany) | [
"bluemicam3@gmail.com"
] | bluemicam3@gmail.com |
1dc802022a2096fe6390e9c8c00491b79e22fd57 | c7a5448821669b2fdebf5c2a4eb0ea70bba545d3 | /creme/optim/adam.py | 3c29444f912d89f1b786e209b735cfb90c961960 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brp-sara/creme | e5eb44e5d75cea0120c8fd17c20a963a1fe6c153 | 56c3baf6ee160015b72ab8ebedc0e03da32a6eae | refs/heads/master | 2020-09-08T17:10:18.903069 | 2019-11-11T12:14:32 | 2019-11-11T12:14:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | import collections
from . import base
__all__ = ['Adam']
class Adam(base.Optimizer):
"""Adam optimizer.
Example:
::
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> from creme import stream
>>> from sklearn import datasets
>>> X_y = stream.iter_sklearn_dataset(
... dataset=datasets.load_breast_cancer(),
... shuffle=True,
... random_state=42
... )
>>> optimizer = optim.Adam()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer)
... )
>>> metric = metrics.F1()
>>> model_selection.online_score(X_y, model, metric)
F1: 0.959554
References:
1. `Adam: A method for stochastic optimization <https://arxiv.org/pdf/1412.6980.pdf>`_
"""
def __init__(self, lr=0.1, beta_1=0.9, beta_2=0.999, eps=1e-8):
super().__init__(lr)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
self.m = collections.defaultdict(float)
self.v = collections.defaultdict(float)
def _update_after_pred(self, w, g):
for i, gi in g.items():
self.m[i] = self.beta_1 * self.m[i] + (1 - self.beta_1) * gi
self.v[i] = self.beta_2 * self.v[i] + (1 - self.beta_2) * gi ** 2
m = self.m[i] / (1 - self.beta_1 ** (self.n_iterations + 1))
v = self.v[i] / (1 - self.beta_2 ** (self.n_iterations + 1))
w[i] -= self.learning_rate * m / (v ** 0.5 + self.eps)
return w
| [
"maxhalford25@gmail.com"
] | maxhalford25@gmail.com |
ba7c61607f868a2aac822aba553d8c31efe34216 | 8778b7ffafc578a4c5e44a6933aa815ee2f9fbcc | /uniflocpy/uWell/Self_flow_well.py | 7cfe153cdbc27c5f3c300d2c527e19377f663522 | [
"MIT"
] | permissive | hah029/unifloc_py | c4aceeec3ecaaa7ea353102bd4725be4fc781880 | 7338c12788e3f3340bf8d1cb1db15d0471b62434 | refs/heads/master | 2023-08-31T06:58:11.571694 | 2020-11-14T12:09:56 | 2020-11-14T12:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,208 | py | """
Модуль-интерфейс описания работы фонтанирующей скважины
Кобзарь О.С. Хабибуллин Р.А. 20.07.2019 г
"""
# TODO добавить возможноть извлечения всех доступных данных через единственный self.data
# TODO добавить расчет методом снизу вверх
# TODO добавить возможность добавления нескольких колонн НКТ и ОК
# TODO добавить конструкцию трубы - толщины, диаметры внешние и внутренние
import uniflocpy.uTools.uconst as uc
import uniflocpy.uTools.data_workflow as data_workflow
import uniflocpy.uWell.uPipe as uPipe
import uniflocpy.uWell.deviation_survey as deviation_survey
import uniflocpy.uPVT.PVT_fluids as PVT_fluids
import uniflocpy.uPVT.BlackOil_model as BlackOil_model
import numpy as np
import uniflocpy.uReservoir.IPR_simple_line as IPR_simple_line
import uniflocpy.uTemperature.temp_cor_simple_line as temp_cor_simple_line
import uniflocpy.uTemperature.temp_cor_Hasan_Kabir as temp_cor_Hasan_Kabir
import uniflocpy.uMultiphaseFlow.hydr_cor_Beggs_Brill as hydr_cor_Beggs_Brill
from scipy.integrate import solve_ivp
import time
class self_flow_well():
def __init__(self, fluid=0,
well_profile=0,
hydr_corr=0,
temp_corr=0,
pipe=0,
reservoir=-1,
gamma_oil=0.86, gamma_gas=0.6, gamma_wat=1.0, rsb_m3m3=200.0,
h_conductor_mes_m=500, h_conductor_vert_m=500,
h_intake_mes_m=1000, h_intake_vert_m=1000,
h_bottomhole_mes_m=1500, h_bottomhole_vert_m=1500,
d_casing_inner_m=0.120, d_tube_inner_m=0.062,
qliq_on_surface_m3day=100, fw_on_surface_perc=10,
p_bottomhole_bar=200, t_bottomhole_c=92,
p_wellhead_bar=20, t_wellhead_c=20,
t_earth_init_on_surface_c=3, t_earth_init_in_reservoir_c=90, geothermal_grad_cm=0.03,
p_reservoir_bar=None,
well_work_time_sec=60 * 24 * 60 * 60,
step_lenth_in_calc_along_wellbore_m=10,
without_annulus_space=False,
save_all=True,
solver_using=0,
activate_rus_mode=0,
multiplier_for_pi=1,
pb_bar=90):
"""
При создании модели скважины необходимо задать ее конструкцию, PVT свойства флюидов и режим работы
вместе с граничными условиями. Кроме параметров, которые предлагается задать при
инициализации, можно изменить и другие, входящие в состав модели, путем обращения к необходимым
модулям. На что стоит обрать внимание: некоторые параметры выставлены по умолчанию и изменение
всех интересующих параметров необходимо выполнить до процесса расчета.
:param h_conductor_mes_m: измеренная глубина конца кондуктора, м
:param h_conductor_vert_m: вертикальная глубина конца кондуктора, м
:param h_intake_mes_m: измеренная глубина конца колонны НКТ (спуска НКТ), м
:param h_intake_vert_m: вертикальная глубина конца колонны НКТ (спуска НКТ), м
:param h_bottomhole_mes_m: измеренная глубина забоя, м
:param h_bottomhole_vert_m: вертикальная глубина забоя, м
:param qliq_on_surface_m3day: дебит жидкости на поверхности, м3/сутки
:param fw_on_surface_perc: обводненность продукции на поверхности, %
:param d_casing_inner_m: внутренний диаметр обсадной колонны, м
:param d_tube_inner_m: внутренни диаметр НКТ
:param p_bottomhole_bar: давление на забое, бар
:param t_bottomhole_c: температура на забое, С
:param p_wellhead_bar: давление на устье, бар
:param t_wellhead_c: температура на устье, С
:param t_earth_init_on_surface_c: начальная температура земли на поверхности (нейтрального слоя), С
:param t_earth_init_in_reservoir_c: начальная температура пласта, С
:param geothermal_grad_cm: геотермический градиент, С/м
:param well_work_time_sec: время работы скважины, сек
:param step_lenth_in_calc_along_wellbore_m: длина шага вдоль ствола скважины в расчете, м
"""
self.h_conductor_mes_m = h_conductor_mes_m
self.h_conductor_vert_m = h_conductor_vert_m
self.h_intake_mes_m = h_intake_mes_m
self.h_intake_vert_m = h_intake_vert_m
self.h_bottomhole_mes_m = h_bottomhole_mes_m
self.h_bottomhole_vert_m = h_bottomhole_vert_m
self.d_casing_inner_m = d_casing_inner_m
self.d_tube_inner_m = d_tube_inner_m
self.p_bottomhole_bar = p_bottomhole_bar
self.t_bottomhole_c = t_bottomhole_c
self.p_wellhead_bar = p_wellhead_bar
self.t_wellhead_c = t_wellhead_c
self.well_work_time_sec = well_work_time_sec
self.step_lenth_in_calc_along_wellbore_m = step_lenth_in_calc_along_wellbore_m
self.t_earth_init_on_surface_c = t_earth_init_on_surface_c
self.t_earth_init_in_reservoir_c = t_earth_init_in_reservoir_c
self.geothermal_grad_cm = geothermal_grad_cm
self.p_reservoir_bar = p_reservoir_bar
if well_profile == 0:
self.well_profile = deviation_survey.simple_well_deviation_survey()
elif well_profile == 1:
self.well_profile = deviation_survey.well_deviation_survey()
if pipe == 0:
self.pipe = uPipe.Pipe()
if hydr_corr == 0:
self.pipe.hydr_cor = hydr_cor_Beggs_Brill.Beggs_Brill_cor()
if temp_corr == 0:
self.pipe.temp_cor = temp_cor_Hasan_Kabir.Hasan_Kabir_cor()
elif temp_corr == 1:
self.pipe.temp_cor = temp_cor_simple_line.SimpleLineCor()
if fluid == 0:
self.pipe.fluid_flow.fl = PVT_fluids.FluidStanding(gamma_oil=gamma_oil, gamma_gas=gamma_gas,
gamma_wat=gamma_wat, rsb_m3m3=rsb_m3m3)
elif fluid == 1:
self.pipe.fluid_flow.fl = BlackOil_model.Fluid(gamma_oil=gamma_oil, gamma_gas=gamma_gas,
gamma_wat=gamma_wat, rsb_m3m3=rsb_m3m3,
t_res_c=t_bottomhole_c, pb_bar=pb_bar)
if activate_rus_mode:
self.pipe.fluid_flow.fl = BlackOil_model.Fluid(gamma_oil=gamma_oil, gamma_gas=gamma_gas,
gamma_wat=gamma_wat, rsb_m3m3=rsb_m3m3,
t_res_c=t_bottomhole_c, pb_bar=pb_bar, activate_rus_cor=1)
self.data = data_workflow.Data()
self.qliq_on_surface_m3day = qliq_on_surface_m3day
self.fw_on_surface_perc = fw_on_surface_perc
self.h_calculated_vert_m = None
self.h_calculated_mes_m = None
self.p_calculated_bar = None
self.t_calculated_c = None
self.t_calculated_earth_init = None
self.t_grad_calculated_cm = None
self.p_grad_calculated_barm = None
self.without_annulus_space = without_annulus_space
self.save_all = save_all
if reservoir == -1:
self.ipr = None
elif reservoir == 0:
self.ipr = IPR_simple_line.IPRSimpleLine()
if self.p_reservoir_bar == None:
self.p_reservoir_bar = 1000 * uc.g * self.h_bottomhole_vert_m / 100000
self.ipr.pi_m3daybar = self.ipr.calc_pi_m3daybar(self.qliq_on_surface_m3day, self.p_bottomhole_bar,
self.p_reservoir_bar)
self.direction_up = None
self.solver_using = solver_using
self.multiplier_for_pi = multiplier_for_pi
self.time_calculated_sec = None
self.calculation_number_in_one_step = None
def __transfer_data_to_pipe__(self, pipe_object, section_casing, d_inner_pipe_m):
"""
Происходит изменение параметров в используемом подмодуле - трубе -
используя данные, заданые в классе self_flow_well
:param pipe_object: экземпляр класса Pipe - НКТ или ОК
:param section_casing: определение типа Pipe: True - ОК, False - НКТ
:param d_inner_pipe_m: внутренний диаметр трубы, м
:return: None
"""
pipe_object.section_casing = section_casing
pipe_object.fluid_flow.qliq_on_surface_m3day = self.qliq_on_surface_m3day
pipe_object.fluid_flow.fw_on_surface_perc = self.fw_on_surface_perc
pipe_object.time_sec = self.well_work_time_sec
pipe_object.fluid_flow.d_m = d_inner_pipe_m
pipe_object.t_in_c = self.t_bottomhole_c
pipe_object.t_out_c = self.t_wellhead_c
pipe_object.h_mes_in_m = self.h_bottomhole_mes_m
pipe_object.h_mes_out_m = 0
def __init_construction__(self):
self.well_profile.h_conductor_mes_m = self.h_conductor_mes_m
self.well_profile.h_conductor_vert_m = self.h_conductor_vert_m
self.well_profile.h_pump_mes_m = self.h_intake_mes_m
self.well_profile.h_pump_vert_m = self.h_intake_vert_m
self.well_profile.h_bottomhole_mes_m = self.h_bottomhole_mes_m
self.well_profile.h_bottomhole_vert_m = self.h_bottomhole_vert_m
self.well_profile.lenth_of_one_part = self.step_lenth_in_calc_along_wellbore_m
self.well_profile.calc_all()
return None
def calc_p_grad_pam_for_scipy(self, h_m, p_bar, t_c, pipe_object):
p_bar = float(p_bar)
t_c = float(t_c)
p_grad_pam = pipe_object.calc_p_grad_pam(p_bar, t_c)
return uc.Pa2bar(p_grad_pam)
def __calc_pipe__(self, pipe_object, option_last_calc_boolean=False):
"""
Расчет трубы (НКТ или ОК) в текущей точке всех параметров, сохранение их в атрибуты класса и в хранилище
data_workflow - self.data, а после вычисление параметров в следующей точке.
:param pipe_object: экзмепляр класс Pipe - НКТ или ОК
:param option_last_calc_boolean: опция последнего расчета - не вычисляются параметры в следующей точке
:return: None
"""
#print(f"В начале расчета\n"
# f"Давление: {self.p_calculated_bar} и температура {self.t_calculated_c} "
# f"на измеренной глубине {self.h_calculated_mes_m}"
# f"\n")
start_calculation_time = time.time()
if self.direction_up:
sign = 1
else:
sign = - 1
pipe_object.t_earth_init_c = self.t_calculated_earth_init
pipe_object.angle_to_horizontal_grad = self.well_profile.get_angle_to_horizontal_grad(self.h_calculated_mes_m)
self.p_grad_calculated_barm = uc.Pa2bar(pipe_object.calc_p_grad_pam(self.p_calculated_bar,
self.t_calculated_c))
self.t_grad_calculated_cm = pipe_object.calc_t_grad_cm(self.p_calculated_bar, self.t_calculated_c)
if self.save_all:
self.data.get_data(self)
if not option_last_calc_boolean:
self.step_lenth_calculated_along_vert_m = np.abs(self.well_profile.get_h_vert_m(self.h_calculated_mes_m -
self.step_lenth_in_calc_along_wellbore_m) -
self.well_profile.get_h_vert_m(self.h_calculated_mes_m))
if self.solver_using == 1:
new_p_calculated_bar_solve_output = solve_ivp(self.calc_p_grad_pam_for_scipy,
t_span=(self.h_calculated_mes_m,
self.h_calculated_mes_m - self.step_lenth_in_calc_along_wellbore_m * sign),
y0=[self.p_calculated_bar],
args=(self.t_calculated_c, pipe_object),
rtol=0.001, atol=0.001
) #на всем участке постоянная температура, что неверно
#print(new_p_calculated_bar_solve_output)
#print('\n')
new_p_calculated_bar = new_p_calculated_bar_solve_output.y[-1][-1]
#print(f"new_p_calculated_bar = {new_p_calculated_bar}")
self.p_calculated_bar = new_p_calculated_bar
self.calculation_number_in_one_step = new_p_calculated_bar_solve_output.nfev
else:
self.p_calculated_bar -= self.p_grad_calculated_barm * self.step_lenth_in_calc_along_wellbore_m * sign
self.calculation_number_in_one_step = 1
#if self.p_calculated_bar < 1:
# self.p_calculated_bar = 1
self.t_calculated_c -= self.t_grad_calculated_cm * self.step_lenth_in_calc_along_wellbore_m * sign
self.h_calculated_mes_m -= self.step_lenth_in_calc_along_wellbore_m * sign
self.h_calculated_vert_m = self.well_profile.get_h_vert_m(self.h_calculated_mes_m)
self.t_calculated_earth_init -= self.geothermal_grad_cm * self.step_lenth_calculated_along_vert_m * sign
#print(f"Давление: {self.p_calculated_bar} и температура {self.t_calculated_c} "
# f"на измеренной глубине {self.h_calculated_mes_m}")
if self.p_calculated_bar < 1.1:
self.p_calculated_bar = 1.1
self.time_calculated_sec = time.time() - start_calculation_time
def calc_all_from_down_to_up(self):
"""
Расчет фонтанирующей скважины методом снизу-вверх
:return: None
"""
self.direction_up = True
self.well_profile.h_conductor_mes_m = self.h_conductor_mes_m
self.well_profile.h_conductor_vert_m = self.h_conductor_vert_m
self.well_profile.h_pump_mes_m = self.h_intake_mes_m
self.well_profile.h_pump_vert_m = self.h_intake_vert_m
self.well_profile.h_bottomhole_mes_m = self.h_bottomhole_mes_m
self.well_profile.h_bottomhole_vert_m = self.h_bottomhole_vert_m
self.well_profile.lenth_of_one_part = self.step_lenth_in_calc_along_wellbore_m
self.well_profile.calc_all()
self.h_calculated_mes_m = self.h_bottomhole_mes_m
self.h_calculated_vert_m = self.h_bottomhole_vert_m
if self.ipr != None:
self.p_calculated_bar = self.ipr.calc_p_bottomhole_bar(self.qliq_on_surface_m3day, self.multiplier_for_pi)
self.p_bottomhole_bar = self.p_calculated_bar
else:
self.p_calculated_bar = self.p_bottomhole_bar
self.t_calculated_c = self.t_bottomhole_c
self.t_calculated_earth_init = self.t_earth_init_in_reservoir_c
self.step_lenth_calculated_along_vert_m = (self.well_profile.get_h_vert_m(self.h_calculated_mes_m -
self.step_lenth_in_calc_along_wellbore_m) -
self.well_profile.get_h_vert_m(self.h_calculated_mes_m))
self.data.clear_data()
self.__transfer_data_to_pipe__(self.pipe, section_casing=True, d_inner_pipe_m=self.d_casing_inner_m)
# casing calc
while self.h_calculated_mes_m >= self.h_intake_mes_m + self.step_lenth_in_calc_along_wellbore_m:
self.__calc_pipe__(self.pipe)
# last calc in casing
step_lenth_in_calc_along_wellbore_m = self.step_lenth_in_calc_along_wellbore_m
self.step_lenth_in_calc_along_wellbore_m = self.h_calculated_mes_m-self.h_intake_mes_m * 0.9999
self.__calc_pipe__(self.pipe)
self.step_lenth_in_calc_along_wellbore_m = step_lenth_in_calc_along_wellbore_m
if self.without_annulus_space:
self.__transfer_data_to_pipe__(self.pipe, section_casing=True, d_inner_pipe_m=self.d_tube_inner_m)
else:
self.__transfer_data_to_pipe__(self.pipe, section_casing=False, d_inner_pipe_m=self.d_tube_inner_m)
# tubing calc
while self.h_intake_mes_m > self.h_calculated_mes_m >= self.step_lenth_in_calc_along_wellbore_m:
self.__calc_pipe__(self.pipe)
# last step in tubing before 0 point
step_lenth_in_calc_along_wellbore_m = self.step_lenth_in_calc_along_wellbore_m
self.step_lenth_in_calc_along_wellbore_m = self.h_calculated_mes_m
self.__calc_pipe__(self.pipe, option_last_calc_boolean=False)
self.step_lenth_in_calc_along_wellbore_m = step_lenth_in_calc_along_wellbore_m
# calc grad in 0 point and save
self.__calc_pipe__(self.pipe, option_last_calc_boolean=True)
if not self.save_all:
self.data.get_data(self)
self.p_wellhead_bar = self.p_calculated_bar
self.t_wellhead_c = self.t_calculated_c
def calc_all_from_up_to_down(self):
"""
Расчет фонтанирующей скважины методом снизу-вверх
:return: None
"""
self.direction_up = False
self.__init_construction__()
self.h_calculated_mes_m = 0
self.h_calculated_vert_m = 0
self.p_calculated_bar = self.p_wellhead_bar
self.t_calculated_c = self.t_wellhead_c
self.t_calculated_earth_init = self.t_wellhead_c # TODO
self.step_lenth_calculated_along_vert_m = (self.well_profile.get_h_vert_m(self.h_calculated_mes_m +
self.step_lenth_in_calc_along_wellbore_m) -
self.well_profile.get_h_vert_m(self.h_calculated_mes_m))
self.data.clear_data()
# tubing calc
if self.without_annulus_space:
self.__transfer_data_to_pipe__(self.pipe, section_casing=True, d_inner_pipe_m=self.d_tube_inner_m)
else:
self.__transfer_data_to_pipe__(self.pipe, section_casing=False, d_inner_pipe_m=self.d_tube_inner_m)
while self.h_calculated_mes_m <= self.h_intake_mes_m - self.step_lenth_in_calc_along_wellbore_m:
self.__calc_pipe__(self.pipe)
# last calc in tubing
step_lenth_in_calc_along_wellbore_m = self.step_lenth_in_calc_along_wellbore_m
self.step_lenth_in_calc_along_wellbore_m = self.h_intake_mes_m - self.h_calculated_mes_m * 0.9999
self.__calc_pipe__(self.pipe)
self.step_lenth_in_calc_along_wellbore_m = step_lenth_in_calc_along_wellbore_m
# casing calc
self.__transfer_data_to_pipe__(self.pipe, section_casing=True, d_inner_pipe_m=self.d_casing_inner_m)
while self.h_calculated_mes_m <= self.h_bottomhole_mes_m - self.step_lenth_in_calc_along_wellbore_m:
self.__calc_pipe__(self.pipe)
# last step in casing before 0 point
step_lenth_in_calc_along_wellbore_m = self.step_lenth_in_calc_along_wellbore_m
self.step_lenth_in_calc_along_wellbore_m = self.h_bottomhole_mes_m - self.h_calculated_mes_m
self.__calc_pipe__(self.pipe, option_last_calc_boolean=False)
self.step_lenth_in_calc_along_wellbore_m = step_lenth_in_calc_along_wellbore_m
# calc grad in 0 point and save
self.__calc_pipe__(self.pipe, option_last_calc_boolean=True)
if not self.save_all:
self.data.get_data(self)
self.p_bottomhole_bar = self.p_calculated_bar
self.t_bottomhole_c = self.t_calculated_c | [
"oleg.kobzarius@gmail.com"
] | oleg.kobzarius@gmail.com |
81eff45dface1cc77149b38692253a13f88601ea | 10d98fecb882d4c84595364f715f4e8b8309a66f | /neural_additive_models/nam_train.py | 227023ece7cb38a085e91249181db0bf08cbda5e | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 14,352 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Training script for Neural Additive Models.
"""
import operator
import os
from typing import Tuple, Iterator, List, Dict
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from neural_additive_models import data_utils
from neural_additive_models import graph_builder
gfile = tf.io.gfile
DatasetType = data_utils.DatasetType
FLAGS = flags.FLAGS
flags.DEFINE_integer('training_epochs', None,
'The number of epochs to run training for.')
flags.DEFINE_float('learning_rate', 1e-2, 'Hyperparameter: learning rate.')
flags.DEFINE_float('output_regularization', 0.0, 'Hyperparameter: feature reg')
flags.DEFINE_float('l2_regularization', 0.0, 'Hyperparameter: l2 weight decay')
flags.DEFINE_integer('batch_size', 1024, 'Hyperparameter: batch size.')
flags.DEFINE_string('logdir', None, 'Path to dir where to store summaries.')
flags.DEFINE_string('dataset_name', 'Teleco',
'Name of the dataset to load for training.')
flags.DEFINE_float('decay_rate', 0.995, 'Hyperparameter: Optimizer decay rate')
flags.DEFINE_float('dropout', 0.5, 'Hyperparameter: Dropout rate')
flags.DEFINE_integer(
'data_split', 1, 'Dataset split index to use. Possible '
'values are 1 to `FLAGS.num_splits`.')
flags.DEFINE_integer('tf_seed', 1, 'seed for tf.')
flags.DEFINE_float('feature_dropout', 0.0,
'Hyperparameter: Prob. with which features are dropped')
flags.DEFINE_integer(
'num_basis_functions', 1000, 'Number of basis functions '
'to use in a FeatureNN for a real-valued feature.')
flags.DEFINE_integer('units_multiplier', 2, 'Number of basis functions for a '
'categorical feature')
flags.DEFINE_boolean(
'cross_val', False, 'Boolean flag indicating whether to '
'perform cross validation or not.')
flags.DEFINE_integer(
'max_checkpoints_to_keep', 1, 'Indicates the maximum '
'number of recent checkpoint files to keep.')
flags.DEFINE_integer(
'save_checkpoint_every_n_epochs', 10, 'Indicates the '
'number of epochs after which an checkpoint is saved')
flags.DEFINE_integer('n_models', 1, 'the number of models to train.')
flags.DEFINE_integer('num_splits', 3, 'Number of data splits to use')
flags.DEFINE_integer('fold_num', 1, 'Index of the fold to be used')
flags.DEFINE_string(
'activation', 'exu', 'Activation function to used in the '
'hidden layer. Possible options: (1) relu, (2) exu')
flags.DEFINE_boolean(
'regression', False, 'Boolean flag indicating whether we '
'are solving a regression task or a classification task.')
flags.DEFINE_boolean('debug', False, 'Debug mode. Log additional things')
flags.DEFINE_boolean('shallow', False, 'Whether to use shallow or deep NN.')
flags.DEFINE_boolean('use_dnn', False, 'Deep NN baseline.')
flags.DEFINE_integer('early_stopping_epochs', 60, 'Early stopping epochs')
_N_FOLDS = 5
GraphOpsAndTensors = graph_builder.GraphOpsAndTensors
EvaluationMetric = graph_builder.EvaluationMetric
@flags.multi_flags_validator(['data_split', 'cross_val'],
message='Data split should not be used in '
'conjunction with cross validation')
def data_split_with_cross_validation(flags_dict):
return (flags_dict['data_split'] == 1) or (not flags_dict['cross_val'])
def _get_train_and_lr_decay_ops(
graph_tensors_and_ops,
early_stopping):
"""Returns training and learning rate decay ops."""
train_ops = [
g['train_op']
for n, g in enumerate(graph_tensors_and_ops)
if not early_stopping[n]
]
lr_decay_ops = [
g['lr_decay_op']
for n, g in enumerate(graph_tensors_and_ops)
if not early_stopping[n]
]
return train_ops, lr_decay_ops
def _update_latest_checkpoint(checkpoint_dir,
best_checkpoint_dir):
"""Updates the latest checkpoint in `best_checkpoint_dir` from `checkpoint_dir`."""
for filename in gfile.glob(os.path.join(best_checkpoint_dir, 'model.*')):
gfile.remove(filename)
for name in gfile.glob(os.path.join(checkpoint_dir, 'model.*')):
gfile.copy(
name,
os.path.join(best_checkpoint_dir, os.path.basename(name)),
overwrite=True)
def _create_computation_graph(
x_train, y_train, x_validation,
y_validation, batch_size
):
"""Build the computation graph."""
graph_tensors_and_ops = []
metric_scores = []
for n in range(FLAGS.n_models):
graph_tensors_and_ops_n, metric_scores_n = graph_builder.build_graph(
x_train=x_train,
y_train=y_train,
x_test=x_validation,
y_test=y_validation,
activation=FLAGS.activation,
learning_rate=FLAGS.learning_rate,
batch_size=batch_size,
shallow=FLAGS.shallow,
output_regularization=FLAGS.output_regularization,
l2_regularization=FLAGS.l2_regularization,
dropout=FLAGS.dropout,
num_basis_functions=FLAGS.num_basis_functions,
units_multiplier=FLAGS.units_multiplier,
decay_rate=FLAGS.decay_rate,
feature_dropout=FLAGS.feature_dropout,
regression=FLAGS.regression,
use_dnn=FLAGS.use_dnn,
trainable=True,
name_scope=f'model_{n}')
graph_tensors_and_ops.append(graph_tensors_and_ops_n)
metric_scores.append(metric_scores_n)
return graph_tensors_and_ops, metric_scores
def _create_graph_saver(graph_tensors_and_ops,
logdir, num_steps_per_epoch):
"""Create saving hook(s) as well as model and checkpoint directories."""
saver_hooks, model_dirs, best_checkpoint_dirs = [], [], []
save_steps = num_steps_per_epoch * FLAGS.save_checkpoint_every_n_epochs
# The MonitoredTraining Session counter increments by `n_models`
save_steps = save_steps * FLAGS.n_models
for n in range(FLAGS.n_models):
scaffold = tf.train.Scaffold(
saver=tf.train.Saver(
var_list=graph_tensors_and_ops[n]['nn_model'].trainable_variables,
save_relative_paths=True,
max_to_keep=FLAGS.max_checkpoints_to_keep))
model_dirs.append(os.path.join(logdir, 'model_{}').format(n))
best_checkpoint_dirs.append(os.path.join(model_dirs[-1], 'best_checkpoint'))
gfile.makedirs(best_checkpoint_dirs[-1])
saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=model_dirs[-1], save_steps=save_steps, scaffold=scaffold)
saver_hooks.append(saver_hook)
return saver_hooks, model_dirs, best_checkpoint_dirs
def _update_metrics_and_checkpoints(sess,
epoch,
metric_scores,
curr_best_epoch,
best_validation_metric,
best_train_metric,
model_dir,
best_checkpoint_dir,
metric_name = 'RMSE'):
"""Update metric scores and latest checkpoint."""
# Minimize RMSE and maximize AUROC
compare_metric = operator.lt if FLAGS.regression else operator.gt
# Calculate the AUROC/RMSE on the validation split
validation_metric = metric_scores['test'](sess)
if FLAGS.debug:
tf.logging.info('Epoch %d %s Val %.4f', epoch, metric_name,
validation_metric)
if compare_metric(validation_metric, best_validation_metric):
curr_best_epoch = epoch
best_validation_metric = validation_metric
best_train_metric = metric_scores['train'](sess)
# copy the checkpoints files *.meta *.index, *.data* each time
# there is a better result
_update_latest_checkpoint(model_dir, best_checkpoint_dir)
return curr_best_epoch, best_validation_metric, best_train_metric
def training(x_train, y_train, x_validation,
y_validation,
logdir):
"""Trains the Neural Additive Model (NAM).
Args:
x_train: Training inputs.
y_train: Training labels.
x_validation: Validation inputs.
y_validation: Validation labels.
logdir: dir to save the checkpoints.
Returns:
Best train and validation evaluation metric obtained during NAM training.
"""
tf.logging.info('Started training with logdir %s', logdir)
batch_size = min(FLAGS.batch_size, x_train.shape[0])
num_steps_per_epoch = x_train.shape[0] // batch_size
# Keep track of the best validation RMSE/AUROC and train AUROC score which
# corresponds to the best validation metric score.
if FLAGS.regression:
best_train_metric = np.inf * np.ones(FLAGS.n_models)
best_validation_metric = np.inf * np.ones(FLAGS.n_models)
else:
best_train_metric = np.zeros(FLAGS.n_models)
best_validation_metric = np.zeros(FLAGS.n_models)
# Set to a large value to avoid early stopping initially during training
curr_best_epoch = np.full(FLAGS.n_models, np.inf)
# Boolean variables to indicate whether the training of a specific model has
# been early stopped.
early_stopping = [False] * FLAGS.n_models
# Classification: AUROC, Regression : RMSE Score
metric_name = 'RMSE' if FLAGS.regression else 'AUROC'
tf.reset_default_graph()
with tf.Graph().as_default():
tf.compat.v1.set_random_seed(FLAGS.tf_seed)
# Setup your training.
graph_tensors_and_ops, metric_scores = _create_computation_graph(
x_train, y_train, x_validation, y_validation, batch_size)
train_ops, lr_decay_ops = _get_train_and_lr_decay_ops(
graph_tensors_and_ops, early_stopping)
global_step = tf.train.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver_hooks, model_dirs, best_checkpoint_dirs = _create_graph_saver(
graph_tensors_and_ops, logdir, num_steps_per_epoch)
if FLAGS.debug:
summary_writer = tf.summary.FileWriter(os.path.join(logdir, 'tb_log'))
with tf.train.MonitoredSession(hooks=saver_hooks) as sess:
for n in range(FLAGS.n_models):
sess.run([
graph_tensors_and_ops[n]['iterator_initializer'],
graph_tensors_and_ops[n]['running_vars_initializer']
])
for epoch in range(1, FLAGS.training_epochs + 1):
if not all(early_stopping):
for _ in range(num_steps_per_epoch):
sess.run(train_ops) # Train the network
# Decay the learning rate by a fixed ratio every epoch
sess.run(lr_decay_ops)
else:
tf.logging.info('All models early stopped at epoch %d', epoch)
break
for n in range(FLAGS.n_models):
if early_stopping[n]:
sess.run(increment_global_step)
continue
# Log summaries
if FLAGS.debug:
global_summary, global_step = sess.run([
graph_tensors_and_ops[n]['summary_op'],
graph_tensors_and_ops[n]['global_step']
])
summary_writer.add_summary(global_summary, global_step)
if epoch % FLAGS.save_checkpoint_every_n_epochs == 0:
(curr_best_epoch[n], best_validation_metric[n],
best_train_metric[n]) = _update_metrics_and_checkpoints(
sess, epoch, metric_scores[n], curr_best_epoch[n],
best_validation_metric[n], best_train_metric[n], model_dirs[n],
best_checkpoint_dirs[n], metric_name)
if curr_best_epoch[n] + FLAGS.early_stopping_epochs < epoch:
tf.logging.info('Early stopping at epoch {}'.format(epoch))
early_stopping[n] = True # Set early stopping for model `n`.
train_ops, lr_decay_ops = _get_train_and_lr_decay_ops(
graph_tensors_and_ops, early_stopping)
# Reset running variable counters
sess.run(graph_tensors_and_ops[n]['running_vars_initializer'])
tf.logging.info('Finished training.')
for n in range(FLAGS.n_models):
tf.logging.info(
'Model %d: Best Epoch %d, Individual %s: Train %.4f, Validation %.4f',
n, curr_best_epoch[n], metric_name, best_train_metric[n],
best_validation_metric[n])
return np.mean(best_train_metric), np.mean(best_validation_metric)
def create_test_train_fold(
fold_num
):
"""Splits the dataset into training and held-out test set."""
data_x, data_y, _ = data_utils.load_dataset(FLAGS.dataset_name)
tf.logging.info('Dataset: %s, Size: %d', FLAGS.dataset_name, data_x.shape[0])
tf.logging.info('Cross-val fold: %d/%d', FLAGS.fold_num, _N_FOLDS)
# Get the training and test set based on the StratifiedKFold split
(x_train_all, y_train_all), test_dataset = data_utils.get_train_test_fold(
data_x,
data_y,
fold_num=fold_num,
num_folds=_N_FOLDS,
stratified=not FLAGS.regression)
data_gen = data_utils.split_training_dataset(
x_train_all,
y_train_all,
FLAGS.num_splits,
stratified=not FLAGS.regression)
return data_gen, test_dataset
def single_split_training(data_gen,
logdir):
"""Uses a specific (training, validation) split for NAM training."""
for _ in range(FLAGS.data_split):
(x_train, y_train), (x_validation, y_validation) = next(data_gen)
curr_logdir = os.path.join(logdir, 'fold_{}',
'split_{}').format(FLAGS.fold_num,
FLAGS.data_split)
training(x_train, y_train, x_validation, y_validation, curr_logdir)
def main(argv):
del argv # Unused
tf.logging.set_verbosity(tf.logging.INFO)
data_gen, _ = create_test_train_fold(FLAGS.fold_num)
single_split_training(data_gen, FLAGS.logdir)
if __name__ == '__main__':
flags.mark_flag_as_required('logdir')
flags.mark_flag_as_required('training_epochs')
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
05c65b10c2a21c8337ddb3aec63a2a46093b9be1 | 45a6e5a362c4b895915fc4449fffa22a785549c8 | /rango/migrations/0001_initial.py | 285a3e08cdb102fd52d552430077c9c9f3e2c399 | [] | no_license | yangfan111111/tango_with_django_project_1 | dae22a395f45dcb4eb43075eb46440b08a38bb9c | 35e3d8fa4d52203d9e7764ed2998b60e214c9306 | refs/heads/master | 2020-04-15T15:48:35.869770 | 2019-01-22T16:39:16 | 2019-01-22T16:39:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-01-22 12:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
],
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rango.Category')),
],
),
]
| [
"2359448y@student.gla.ac.uk"
] | 2359448y@student.gla.ac.uk |
651a75bfc94fabb5d571f9aad6eea05fed745248 | f8addc2b28c4e228cd587ccbb14b5bd62572c5a9 | /DIN_Demo/utils/1_convert_pd.py | cef78455acfa15c677933df6bebafde14f358aff | [] | no_license | minmintemuerzhao/machine-learning-in-action | 17697a36fae0a35d283693c6a5e623b0806d655e | a1163424ef1e8fc10e8dff2e59ba84c575b4bb7b | refs/heads/master | 2022-07-26T04:37:01.248183 | 2021-07-20T06:07:17 | 2021-07-20T06:07:17 | 207,340,015 | 1 | 1 | null | 2022-06-22T04:12:05 | 2019-09-09T15:19:31 | Python | UTF-8 | Python | false | false | 667 | py | import pickle
import pandas as pd
def to_df(file_path):
with open(file_path, 'r') as fin:
df = {}
i = 0
for line in fin:
df[i] = eval(line)
i += 1
df = pd.DataFrame.from_dict(df, orient='index')
return df
reviews_df = to_df('../raw_data/reviews_Electronics_5.json')
with open('../raw_data/reviews.pkl', 'wb') as f:
pickle.dump(reviews_df, f, pickle.HIGHEST_PROTOCOL)
meta_df = to_df('../raw_data/meta_Electronics.json')
meta_df = meta_df[meta_df['asin'].isin(reviews_df['asin'].unique())]
meta_df = meta_df.reset_index(drop=True)
with open('../raw_data/meta.pkl', 'wb') as f:
pickle.dump(meta_df, f, pickle.HIGHEST_PROTOCOL)
| [
"tdzm1351@163.com"
] | tdzm1351@163.com |
cb5e41a729ecdd9fa04a36a655a3095798f99b9f | e8d989faff8bf46559cb3694c9aa2a4adbd9d5ec | /Programa_cliente/cliente.py | d198c2c61fc0999ba2be6ebf2891432283e06d4d | [] | no_license | Gary-Joan/Proyecto1_SOPES1 | 6e1926891c0f79ddcadb54a13e8dec5a8da6c9bb | 9711614f4715ca782175fb7652b1452fb991d75e | refs/heads/master | 2022-12-26T14:09:32.759238 | 2020-10-11T15:09:02 | 2020-10-11T15:09:02 | 296,085,404 | 0 | 0 | null | 2020-09-21T22:01:04 | 2020-09-16T16:15:35 | C | UTF-8 | Python | false | false | 1,448 | py | import nltk
from nltk.tokenize import sent_tokenize
import json
import requests
def main():
opcion = "";
while opcion != "n":
#inicio del programa que pide la ruta del archivo y la direccion del balanceador de google cloud
print("PROGRAMA CLIENTE")
autor = input("Nombre del autor del archivo: ")
ruta_archivo = input("Ingrese ruta del archivo: ")
ruta_balanceador = input("Ingrese IP del balanceador con (https): ")
archivo = open("hello.txt", 'r')
contenido = archivo.read()
#iteramos la lista de oraciones para enviarlos al balanceador
lista_contenido= sent_tokenize(contenido)
for item in lista_contenido:
json_publicacion ={
"autor": autor,
"nota" : item
}
publicacion=json.dumps(json_publicacion)
newHeaders = {'Content-type': 'application/json', 'Accept': 'text/plain'}
try:
rq = requests.post(ruta_balanceador+'/balanceador',data=publicacion,headers=newHeaders)
print(rq.status_code)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
print("Nota Ingresada: " +publicacion)
archivo.close()
opcion = input("Desea enviar otro archivo? (y/n): ")
print("Fin del programa!!")
if __name__ == "__main__":
main() | [
"goritz1490@gmail.com"
] | goritz1490@gmail.com |
1b7d6740019c90cf416b282c274c98311dd776b7 | 7aead390305eaf8b860566ef755bef3f0dd8902a | /importers/insert_closed_user_and_date.py | 656d919dcc3f603930d7f7507af8e1719c0431db | [] | no_license | wikiteams/github-data-tools | ef30fcbcd497207a6e525c32eba12dfc3897a2db | 83c61d20f8bffbc22c6ed2093951c907191fea65 | refs/heads/master | 2020-05-07T22:04:05.289132 | 2015-04-23T10:22:56 | 2015-04-23T10:23:43 | 18,237,643 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,124 | py | __author__ = 'snipe'
import csv, glob, sys, urllib2
from pymongo import MongoClient
from github import Github
import github, threading, time, math, requests
import dateutil.parser
from lxml import html
class MongoImporter(threading.Thread):
def __init__(self, threadId):
self.threadId = threadId
threading.Thread.__init__(self)
self.daemon = True
self.db = MongoClient(host='localhost', port=27017)
def run(self):
events = list(self.db.wikiteams.events.find({ "web_fetched": False }).limit(100).skip(100* self.threadId))
#events = list(self.db.wikiteams.events.find({}).limit(5000).skip(5000 * self.threadId))
for event in events:
page = requests.get(event['url'], verify=False)
if page.status_code != 200:
print "ommit"
continue
print 'Updating issue: %s' % event['url']
tree = html.fromstring(page.text)
openedDate = tree.xpath('//time[@class = "js-relative-date"]/@datetime')
closedDate = tree.xpath('//div[contains(@class, "discussion-event-status-closed")][last()]//time[@class = "js-relative-date"]/@datetime')
closedAuthor = tree.xpath('//div[contains(@class, "discussion-event-status-closed")][last()]//a[@class = "author"]/text()')
if len(closedDate) == 0 or len(closedAuthor) == 0:
event['web_fetched'] = True
self.db.wikiteams.events.save(event)
continue
dateParsed = dateutil.parser.parse(closedDate[0])
event['web_closed_date'] = dateParsed
event['web_closed_author'] = closedAuthor[0]
event['web_fetched'] = True
self.db.wikiteams.events.save(event)
if __name__ == "__main__":
threads = []
db = MongoClient(host='localhost', port=27017)
eventsCount = db.wikiteams.events.count()
threadsCount = int(math.ceil(float(eventsCount) / 100))
for num in xrange(0, threadsCount):
threads.append(MongoImporter(num).start())
while True:
time.sleep(10)
| [
"blazej@gruszka.info"
] | blazej@gruszka.info |
0c84a9d6e3298e137bf520780a4fa47a312b78ad | 2324d8e4544a9b813153ce0ed0f858972ea7f909 | /135-分发糖果.py | fc857a4ba5410fc1316af0a1170fd5c03458002d | [] | no_license | Terry-Ma/Leetcode | af8a4ad8059975f8d12b0351610336f1f5f01097 | cc7f41e2fb3ed5734c2a5af97e49a5bc17afbceb | refs/heads/master | 2021-08-10T16:40:20.482851 | 2021-07-03T08:35:56 | 2021-07-03T08:35:56 | 225,814,239 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | class Solution:
def candy(self, ratings: List[int]) -> int:
left = [1] * len(ratings)
right = 1
for i in range(1, len(ratings)):
if ratings[i] > ratings[i - 1]:
left[i] = left[i - 1] + 1
res = left[-1]
for i in range(len(ratings) - 2, -1, -1):
if ratings[i] > ratings[i + 1]:
right += 1
else:
right = 1
res += max(right, left[i])
return res
| [
"rssmyq@aliyun.com"
] | rssmyq@aliyun.com |
a8aa4e97e33b881a9b38f21ae5f3aae49ea25d5b | 706e5ca651b133e1b4cdd394ce5dfecb1894047a | /apps/books/models.py | 8ad333df7ac5c0f6d63dd43c0d09b38fc072261a | [] | no_license | ethanyjoh/Python-Belt-Review | cd37dd25e0f510fd6911226710494d2bbaa61105 | a2b2339f40ddca8c05ee7934ad3441822e92f6c9 | refs/heads/master | 2021-08-17T11:15:02.512386 | 2017-11-21T04:18:11 | 2017-11-21T04:18:11 | 111,498,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,322 | py | from __future__ import unicode_literals
from django.db import models
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
import bcrypt
class UserManager(models.Manager):
def register(self, datafromhtml):
errors = []
if(len(datafromhtml['name']) < 2):
errors.append("Your name should be at least 2 characters")
if(len(datafromhtml['password']) < 8):
errors.append("Your password should be at least 8 characters")
if(datafromhtml['password'] != datafromhtml['password_confirm']):
errors.append("Your password and you password confirmation must match")
try:
validate_email(datafromhtml['email'])
except ValidationError as e:
errors.append("your email must be in a valid format")
if errors:
return {'err_messages': errors}
else:
hash_password = bcrypt.hashpw(datafromhtml['password'].encode(), bcrypt.gensalt())
user = User.objects.create(name=datafromhtml['name'], email=datafromhtml['email'], password=hash_password)
return {'new_user': user}
def login(self, datafromhtml):
try:
user = User.objects.get(email=datafromhtml['email'])
if bcrypt.checkpw(datafromhtml['password'].encode(), user.password.encode()):
return {'logged_user': user}
else:
return {'err_messages': ['Email/Password invalid. Please try again']}
except:
return {'err_messages': ['Email you have entered does not exists. Please register your email']}
class BookManager(models.Manager):
def add_book_review(self, datafromhtml, user_id):
if len(datafromhtml['new_author']) > 1:
author = Author.objects.create(name=datafromhtml['new_author'])
else:
author = Author.objects.get(id=datafromhtml['author_id'])
new_book = self.create(title=datafromhtml['title'], author=author)
user = User.objects.get(id=user_id)
new_review = Review.objects.create(review=datafromhtml['review'], rating=datafromhtml['rating'], user=user, book=new_book)
return {'new_book': new_book}
class User(models.Model):
name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class Author(models.Model):
name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Book(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(Author, related_name='books')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = BookManager()
class Review(models.Model):
review = models.TextField()
rating = models.IntegerField()
user = models.ForeignKey(User, related_name='reviews')
book = models.ForeignKey(Book, related_name='reviews')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"ethanyjoh@gmail.com"
] | ethanyjoh@gmail.com |
3b4f12988d20c1ed2dcb52b45dc31e95746f5a24 | c63539f299cb889ac570bccbb49709621ff7b755 | /python_algorithm/dictionary.py | 76b97374eb267d319b1f082eaa52460255ad5d90 | [] | no_license | CPUDoHyeong/Python | efcb5f458b8db063d244db2a9475fe6244baa458 | 36aebff110455c2465668feaaddda9b1a4555b27 | refs/heads/master | 2023-01-08T23:28:58.363310 | 2020-11-20T05:13:00 | 2020-11-20T05:13:00 | 281,676,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # 딕셔너리 테스트
# 딕셔너리 생성
info = {
1 : 'Kim',
2 : 'Lee',
3 : 'Park'
}
# 빈 딕셔너리 생성
d = dict()
e = {}
# 원소 추가
info[4] = 'Yu'
# 원소 삭제
del info[4]
# 길이 확인
print(len(info))
# 키값이 딕셔너리에 있는지 확인
print(1 in info)
print(4 in info)
# in과 반대
print(4 not in info)
# 모든 자료 삭제
info.clear()
print(info)
| [
"kdh0074@naver.com"
] | kdh0074@naver.com |
81ad5b3571300ae6ab88c2e3561e8bc2183a6c4b | e71d50a2399128f3927a26a0348d5a00ee7142d6 | /superblog/manage.py | 8949eb9af46b409dac91167f1c99f02db73ab6ba | [] | no_license | lorenzobianchi/superblog | a947a7e1f30c65eadef76e58f42f2a8b41f8bd4f | 5dea4eacdecda7cfc1c553024174984b0b5a8b59 | refs/heads/master | 2021-09-11T16:31:28.888417 | 2018-04-09T17:23:35 | 2018-04-09T17:23:35 | 111,206,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superblog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"lorenzo.bianchi@inmagik.com"
] | lorenzo.bianchi@inmagik.com |
f61caaf7302bda93ce12e0e98e8ec61ca87ffdfc | cde11aea86ce9e1e370b02fb14553358b4aaab8b | /practice/hard/_51_disk_stacking.py | 69b5dbd133b93eaa83f31887401f81f3562c17be | [] | no_license | pavankumarag/ds_algo_problem_solving_python | 56f9a2bb64dd62f16028c3f49a72542b8588369a | cbd323de31f2f4a4b35334ce3249bb3e9525dbf8 | refs/heads/master | 2023-06-21T20:29:41.317005 | 2023-06-10T18:11:39 | 2023-06-10T18:11:39 | 223,919,558 | 2 | 1 | null | 2023-06-10T18:11:40 | 2019-11-25T10:16:27 | Python | UTF-8 | Python | false | false | 780 | py | """
Tower of Honoi
we have three rods and n disks the objective of the puzzle is to move the entire stack to another rod,
obeying the following simple rules:
1) Only one disk can be moved at a time.
2) Each move consists of taking the upper disk from one of the stacks and placing it on top of another stack i.e.
a disk can only be moved if it is the uppermost disk on a stack.
3) No disk may be placed on top of a smaller disk.
"""
def tower_of_honoi(n, from_rod, to_rod, aux_rod):
if n == 1:
print "Move disk 1 from ", from_rod, "to ", to_rod
return
tower_of_honoi(n-1, from_rod, aux_rod, to_rod)
print "Move disk",n, "from ", from_rod, "to ", to_rod
tower_of_honoi(n-1, aux_rod, to_rod, from_rod)
if __name__ == "__main__":
n = 4
tower_of_honoi(n, 'A', 'C', 'B') | [
"pavan.govindraj@nutanix.com"
] | pavan.govindraj@nutanix.com |
ec25fbfa0846875e29b7c321050a45e0d6c05ffb | 65e54ca14ac21d2c2572ba35ba351df5903cb667 | /src/petronia/core/layout/binding/bootstrap.py | 7fc0858f4fa68c43fe1e660bdcc50a8a0f177cf0 | [
"MIT"
] | permissive | groboclown/petronia | 29b93e88b82d2732bb529621ad8bff50334d36b9 | 486338023d19cee989e92f0c5692680f1a37811f | refs/heads/master | 2022-07-25T10:08:58.468385 | 2020-01-23T14:59:03 | 2020-01-23T14:59:03 | 71,741,212 | 22 | 3 | NOASSERTION | 2022-07-13T15:27:32 | 2016-10-24T01:30:01 | Python | UTF-8 | Python | false | false | 7,016 | py |
"""
Bootstrap the hotkey bindings for the layout events.
"""
from typing import List
from ....aid.std import i18n as _
from ....aid.std import (
EventBus,
EventId,
ParticipantId,
ErrorReport,
report_error,
create_user_error,
)
from ....aid.bootstrap import (
ANY_VERSION,
create_singleton_identity,
)
from ....aid.lifecycle import create_module_listener_helper
from ....base.internal_.internal_extension import petronia_extension
from ....base.util.simple_type import (
PersistTypeSchemaItem,
PERSISTENT_TYPE_SCHEMA_NAME__DOC,
PERSISTENT_TYPE_SCHEMA_TYPE__BOOL,
PERSISTENT_TYPE_SCHEMA_TYPE__STR,
PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT,
optional_str, optional_int, optional_bool,
collect_errors,
)
from ...hotkeys.api import (
HotkeyEventTriggeredEvent,
BoundServiceActionSchema,
as_hotkey_event_triggered_listener,
)
from ..tile.api import (
RequestMoveResizeFocusedWindowEvent,
send_request_move_resize_focused_window_event,
RequestShiftLayoutFocusEvent,
send_request_shift_layout_focus_event,
RequestSetFocusedWindowVisibilityEvent,
send_request_set_window_visibility_event,
)
from ..window.api import (
)
from ..navigation.api import (
)
TARGET_ID_LAYOUT_HOTKEYS = create_singleton_identity("core.layout.binding")
HOTKEY_ACTION_MOVE_ACTIVE = 'move-active'
HOTKEY_ACTION_SHIFT_FOCUS = 'shift-focus'
HOTKEY_ACTION_SET_VISIBILITY = 'set-visible'
def bootstrap_layout_handlers(bus: EventBus) -> None:
listeners = create_module_listener_helper(bus, TARGET_ID_LAYOUT_HOTKEYS)
def handler(
_event_id: EventId,
_target_id: ParticipantId,
event_obj: HotkeyEventTriggeredEvent
) -> None:
errors: List[ErrorReport] = []
# -------------------------------------------------------------------
if event_obj.data.action == HOTKEY_ACTION_MOVE_ACTIVE:
dx = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dx',
lambda: create_user_error(handler, _('"dx" must be a number'))
)) or 0
dy = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dy',
lambda: create_user_error(handler, _('"dy" must be a number'))
)) or 0
dw = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dw',
lambda: create_user_error(handler, _('"dw" must be a number'))
)) or 0
dh = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dh',
lambda: create_user_error(handler, _('"dh" must be a number'))
)) or 0
dz = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dz',
lambda: create_user_error(handler, _('"dz" must be a number'))
)) or 0
send_request_move_resize_focused_window_event(bus, dx, dy, dw, dh, dz)
# -------------------------------------------------------------------
elif event_obj.data.action == HOTKEY_ACTION_SHIFT_FOCUS:
name = collect_errors(errors, optional_str(
event_obj.data.parameters, 'name',
lambda: create_user_error(handler, _('"name" must be a string'))
)) or ''
index = collect_errors(errors, optional_int(
event_obj.data.parameters, 'index',
lambda: create_user_error(handler, _('"index" must be a number'))
)) or 0
print("DEBUG data {0} -> {1}/{2}".format(event_obj.data.parameters, name, index))
send_request_shift_layout_focus_event(bus, name, index)
# -------------------------------------------------------------------
elif event_obj.data.action == HOTKEY_ACTION_SET_VISIBILITY:
visible = collect_errors(errors, optional_bool(
event_obj.data.parameters, 'visible',
lambda: create_user_error(handler, _('"visible" must be true or false'))
)) or False
send_request_set_window_visibility_event(bus, visible)
for error in errors:
report_error(bus, error)
listeners.listen(TARGET_ID_LAYOUT_HOTKEYS, as_hotkey_event_triggered_listener, handler)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_MOVE_ACTIVE, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestMoveResizeFocusedWindowEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"dx": PersistTypeSchemaItem(
"Change in window x position (move)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dy": PersistTypeSchemaItem(
"Change in window y position (move)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dw": PersistTypeSchemaItem(
"Change in window width (resize)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dh": PersistTypeSchemaItem(
"Change in window height (resize)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dz": PersistTypeSchemaItem(
"Change in window z-order (focus)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
}
)
)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_SHIFT_FOCUS, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestShiftLayoutFocusEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"name": PersistTypeSchemaItem(
"Layout focus shift name", PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"index": PersistTypeSchemaItem(
"Layout focus shift index", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
}
)
)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_SET_VISIBILITY, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestSetFocusedWindowVisibilityEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"visible": PersistTypeSchemaItem(
"True to make the window visible, False to make it hidden", PERSISTENT_TYPE_SCHEMA_TYPE__BOOL
),
}
)
)
EXTENSION_METADATA = petronia_extension({
"name": "core.layout.binding",
"type": "standalone",
"version": (1, 0, 0,),
"depends": ({
"extension": "core.hotkeys.api",
"minimum": ANY_VERSION,
}, {
"extension": "core.layout.api",
"minimum": ANY_VERSION,
},),
})
| [
"matt@groboclown.net"
] | matt@groboclown.net |
fccfc6819c6bf1890cfcab359444b4d829be2cc7 | ae3d328a6dcd2596ce39601138cdb361208d174e | /backtranslate.py | d4bcd7389b446801b0ee6a4d8499126a9e15e6d8 | [
"MIT"
] | permissive | alexFocus92/spainAI_hackathon_2020_pln | 31a13b8737342b4e6ae4ddccb380af8e30f2784d | 6d09896cbe67939f5732d25f665c459e880dc710 | refs/heads/main | 2023-04-18T18:01:21.929901 | 2021-05-02T14:33:02 | 2021-05-02T14:33:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | """
Generate a backtranslated version of the data
"""
import argparse
from BackTranslation import BackTranslation
from data import load_full_train
import pandas as pd
from spacy.lang.en import English
from time import sleep
from tqdm import tqdm
def backtranslate_dataset(output_file, source_lang="en", target_lang="es"):
"""Backtranslates the full training dataset to generate a new one"""
print(f"Backtranslating from {source_lang} to {target_lang}")
full = load_full_train()
backtranslated = backtranslate_texts(full["description"], source_lang, target_lang)
backtranslated_df = pd.DataFrame(
index=full.index,
data={
"name": full["name"],
"description": backtranslated
}
)
backtranslated_df.to_csv(output_file, index=False)
def backtranslate_texts(texts, source_lang="en", target_lang="es"):
"""Backtranslates an iterable of texts between two languages"""
nlp = English()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
backtranslator = BackTranslation()
backtranslations = []
for text in tqdm(texts):
# Split text into sentences
sentences = [sent.string.strip() for sent in nlp(text).sents]
# Back translate each sentence
sentences_backtranslations = []
for sentence in sentences:
translated = False
while not translated:
try:
backtranslation = backtranslator.translate(sentence, src=source_lang, tmp=target_lang)
sentences_backtranslations.append(backtranslation.result_text.lower())
translated = True
except:
sleep(1)
# Join backtranslations
backtranslations.append(" ".join(sentences_backtranslations))
return backtranslations
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generates backtranslated samples for the full training dataset')
parser.add_argument('output_file', type=str, help='output file in which to write backtranslated texts')
parser.add_argument('--source_lang', type=str, default="en", help='source language')
parser.add_argument('--target_lang', type=str, default="es", help='target language')
args = parser.parse_args()
backtranslate_dataset(args.output_file, args.source_lang, args.target_lang)
| [
"albarjip@gmail.com"
] | albarjip@gmail.com |
c71122287edf1d8c87b1091ce0bfc95cfae5c5c7 | 9710283202232b782d7015d1f46622fd99dc544f | /Desafio080.py | 1bfab258ca808216d01cabe5a557c12875b585ca | [
"MIT"
] | permissive | sidneyalex/Desafios-do-Curso | f48cdfafa6c03f694d63ff62670a3f76d83cf52c | 11605caf59fb8b456adaca78a41eae2f7469ab7b | refs/heads/main | 2023-06-02T12:58:24.941287 | 2021-06-18T21:10:14 | 2021-06-18T21:10:14 | 341,619,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | #Crie um pgm onde o usuario possa digitar cinco valores numericos e cadastre-os em uma lista, já na posição correta de inserção(sem usar o sort()).
# No final, mostre a lista ordenada na tela.
lista = []
for c in range(0, 5):
num = int(input('Digite um valor: '))
if len(lista) == 0 or num >= lista[-1]:
print('Valor adicionado ao final da lista')
lista.append(num)
else:
p = 0
for i, v in enumerate(lista):
if v <= num:
p = i + 1
print(f'Valor adicionado a posição {p}')
lista.insert(p,num)
print(f'{lista}')
| [
"79478176+sidneyalex@users.noreply.github.com"
] | 79478176+sidneyalex@users.noreply.github.com |
175c097af091e42bd0783bc4942604c52d49b01d | 7b234b4f6074f6aaedaf892d2759db74d910c860 | /Widgets/Buttons/RubberBandButton.py | 2bbc713d9c0ec8637be900f97b3964c2d5c2c1ca | [] | no_license | mazhou/pythonApp | 4cb7847bdb640c983effe58dc2c8d515e002efbd | b7d73f5a829f4fc53e8d33d90c198579dd29e16a | refs/heads/master | 2020-06-09T01:38:37.580270 | 2019-06-23T13:38:22 | 2019-06-23T13:38:22 | 193,344,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,925 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtCore import Qt, pyqtProperty, QRectF, QPropertyAnimation, \
QEasingCurve, QParallelAnimationGroup
from PyQt5.QtGui import QPainter, QColor
from PyQt5.QtWidgets import QPushButton, QStylePainter, QStyleOptionButton, \
QStyle
class RubberBandButton(QPushButton):
def __init__(self, *args, **kwargs):
super(RubberBandButton, self).__init__(*args, **kwargs)
self.setFlat(True)
self.setCursor(Qt.PointingHandCursor)
self._width = 0
self._height = 0
self._bgcolor = QColor(Qt.green)
def paintEvent(self, event):
self._initAnimate()
painter = QStylePainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
painter.setRenderHint(QPainter.HighQualityAntialiasing, True)
painter.setRenderHint(QPainter.SmoothPixmapTransform, True)
painter.setBrush(QColor(self._bgcolor))
painter.setPen(QColor(self._bgcolor))
painter.drawEllipse(QRectF(
(self.minimumWidth() - self._width) / 2,
(self.minimumHeight() - self._height) / 2,
self._width,
self._height
))
# 绘制本身的文字和图标
options = QStyleOptionButton()
options.initFrom(self)
size = options.rect.size()
size.transpose()
options.rect.setSize(size)
options.features = QStyleOptionButton.Flat
options.text = self.text()
options.icon = self.icon()
options.iconSize = self.iconSize()
painter.drawControl(QStyle.CE_PushButton, options)
event.accept()
def _initAnimate(self):
if hasattr(self, '_animate'):
return
self._width = self.minimumWidth() * 7 / 8
self._height = self.minimumHeight() * 7 / 8
# self._width=175
# self._height=175
wanimate = QPropertyAnimation(self, b'rWidth')
wanimate.setEasingCurve(QEasingCurve.OutElastic)
wanimate.setDuration(700)
wanimate.valueChanged.connect(self.update)
wanimate.setKeyValueAt(0, self._width)
# wanimate.setKeyValueAt(0.1, 180)
# wanimate.setKeyValueAt(0.2, 185)
# wanimate.setKeyValueAt(0.3, 190)
# wanimate.setKeyValueAt(0.4, 195)
wanimate.setKeyValueAt(0.5, self._width + 6)
# wanimate.setKeyValueAt(0.6, 195)
# wanimate.setKeyValueAt(0.7, 190)
# wanimate.setKeyValueAt(0.8, 185)
# wanimate.setKeyValueAt(0.9, 180)
wanimate.setKeyValueAt(1, self._width)
hanimate = QPropertyAnimation(self, b'rHeight')
hanimate.setEasingCurve(QEasingCurve.OutElastic)
hanimate.setDuration(700)
hanimate.setKeyValueAt(0, self._height)
# hanimate.setKeyValueAt(0.1, 170)
# hanimate.setKeyValueAt(0.3, 165)
hanimate.setKeyValueAt(0.5, self._height - 6)
# hanimate.setKeyValueAt(0.7, 165)
# hanimate.setKeyValueAt(0.9, 170)
hanimate.setKeyValueAt(1, self._height)
self._animate = QParallelAnimationGroup(self)
self._animate.addAnimation(wanimate)
self._animate.addAnimation(hanimate)
def enterEvent(self, event):
super(RubberBandButton, self).enterEvent(event)
self._animate.stop()
self._animate.start()
@pyqtProperty(int)
def rWidth(self):
return self._width
@rWidth.setter
def rWidth(self, value):
self._width = value
@pyqtProperty(int)
def rHeight(self):
return self._height
@rHeight.setter
def rHeight(self, value):
self._height = value
@pyqtProperty(QColor)
def bgColor(self):
return self._bgcolor
@bgColor.setter
def bgColor(self, color):
self._bgcolor = QColor(color)
| [
"mazhoumz@gmail.com"
] | mazhoumz@gmail.com |
50340cd165507a44fb3215b1b15bb7aa5d395f98 | 92acd45ae30d75ef24d0ec57be89b85675cd0fc8 | /class_1_exercises.py | 1d700608357b6107e9946a3fb16a2bb5df8b9a48 | [] | no_license | nogayair/nogas_repo | 9f472c812219282c30cb8df4ecfd22c905da8c66 | 264840c3678bf27a23e3c25156170f48325af184 | refs/heads/master | 2021-03-03T19:20:55.713144 | 2020-04-01T12:52:25 | 2020-04-01T12:52:25 | 245,981,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | letters=['a','b','c']
replace={'a':'d','b':'e','c':'f'}
enigmad=[]
for letter in letters:
enigmad.append(replace[letter])
print(enigmad)
numbers=[1,2,3,4,5,6,7]
list1=[]
list2=[]
if len(numbers)%2==0:
list1=numbers[:(int(len(numbers)/2))]
list2=numbers[int(len(numbers)/2):]
else:
list1=numbers[:int(len(numbers)/2+0.5)]
list2=numbers[int(len(numbers)/2+0.5):]
print(list1)
print(list2)
| [
"nogayair23@gmail.com"
] | nogayair23@gmail.com |
093cbcd2ffcfb9696d478edb2b5115379409b091 | 0b4e81296d111f336f7857bc1763c89741ae903e | /policy.py | b28418af1a0919b7656e09b640c65778bafeab7a | [] | no_license | arunrajansharma/HistoryBasedPolicyEvaluation | 733625b45be638e612b0d5bbb4fd340363c0d708 | f4a426110c4ae85aa8d754c09eed4a9c26950268 | refs/heads/master | 2020-01-23T21:49:50.491502 | 2017-07-17T14:22:05 | 2017-07-17T14:22:05 | 74,741,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,302 | py |
import da
PatternExpr_346 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_351 = da.pat.FreePattern('p')
PatternExpr_370 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_375 = da.pat.FreePattern('p')
PatternExpr_387 = da.pat.TuplePattern([da.pat.ConstantPattern('policyDecisionFromSub_Co'), da.pat.FreePattern('decision')])
PatternExpr_394 = da.pat.FreePattern('sub_coord_id')
PatternExpr_461 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_466 = da.pat.FreePattern('p')
PatternExpr_485 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_490 = da.pat.FreePattern('p')
PatternExpr_502 = da.pat.TuplePattern([da.pat.ConstantPattern('evalRequestFromApp'), da.pat.FreePattern('req')])
PatternExpr_509 = da.pat.FreePattern('app_id')
PatternExpr_633 = da.pat.TuplePattern([da.pat.ConstantPattern('decisionFromWorker'), da.pat.FreePattern('req')])
PatternExpr_640 = da.pat.FreePattern('w_id')
PatternExpr_830 = da.pat.TuplePattern([da.pat.ConstantPattern('conflictEvalReplyFromRes_Co'), da.pat.FreePattern('conflict_decision'), da.pat.FreePattern('req')])
PatternExpr_839 = da.pat.FreePattern('res_coord_id')
PatternExpr_1016 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_1021 = da.pat.FreePattern('p')
PatternExpr_1040 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_1045 = da.pat.FreePattern('p')
PatternExpr_1057 = da.pat.TuplePattern([da.pat.ConstantPattern('evalRequestFromSub_Co'), da.pat.FreePattern('req')])
PatternExpr_1064 = da.pat.FreePattern('sub_coord_id')
PatternExpr_1132 = da.pat.TuplePattern([da.pat.ConstantPattern('conflictEvalRequestFromSub_Co'), da.pat.FreePattern('req')])
PatternExpr_1139 = da.pat.FreePattern('sub_coord_id')
PatternExpr_1270 = da.pat.TuplePattern([da.pat.ConstantPattern('subAttrsFromDB'), da.pat.FreePattern('sub_attr_dict')])
PatternExpr_1276 = da.pat.FreePattern('dbEmulator')
PatternExpr_1296 = da.pat.TuplePattern([da.pat.ConstantPattern('resAttrsFromDB'), da.pat.FreePattern('res_attr_dict')])
PatternExpr_1302 = da.pat.FreePattern('dbEmulator')
PatternExpr_1672 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_1677 = da.pat.FreePattern('p')
PatternExpr_1696 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_1701 = da.pat.FreePattern('p')
PatternExpr_1713 = da.pat.TuplePattern([da.pat.ConstantPattern('evalRequestFromRes_Co'), da.pat.FreePattern('req')])
PatternExpr_1720 = da.pat.FreePattern('res_coord_id')
PatternExpr_1837 = da.pat.TuplePattern([da.pat.ConstantPattern('updateFromDB'), da.pat.FreePattern('sub_attr_dict'), da.pat.FreePattern('res_attr_dict')])
PatternExpr_1844 = da.pat.FreePattern('dbEmulator')
PatternExpr_2110 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_2115 = da.pat.FreePattern('p')
PatternExpr_2134 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_2139 = da.pat.FreePattern('Master')
PatternExpr_2167 = da.pat.TuplePattern([da.pat.ConstantPattern('getSubAttrs'), da.pat.FreePattern('sub_attrs'), da.pat.FreePattern('sub_id')])
PatternExpr_2176 = da.pat.FreePattern('w_id')
PatternExpr_2196 = da.pat.TuplePattern([da.pat.ConstantPattern('getResAttrs'), da.pat.FreePattern('res_attrs'), da.pat.FreePattern('res_id')])
PatternExpr_2205 = da.pat.FreePattern('w_id')
PatternExpr_2225 = da.pat.TuplePattern([da.pat.ConstantPattern('updateSubAttrs'), da.pat.FreePattern('sub_attrs'), da.pat.FreePattern('sub_id')])
PatternExpr_2234 = da.pat.FreePattern('sub_coord_id')
PatternExpr_2298 = da.pat.TuplePattern([da.pat.ConstantPattern('updateResAttrs'), da.pat.FreePattern('res_attrs'), da.pat.FreePattern('res_id')])
PatternExpr_2307 = da.pat.FreePattern('res_coord_id')
PatternExpr_2825 = da.pat.TuplePattern([da.pat.ConstantPattern('okay')])
PatternExpr_2830 = da.pat.FreePattern('p')
PatternExpr_2866 = da.pat.TuplePattern([da.pat.ConstantPattern('okay')])
PatternExpr_2871 = da.pat.FreePattern('p')
_config_object = {}
import sys
import time
import random
import logging
import configparser
import xml.etree.ElementTree as ET
class Request():
def __init__(self, sub_id, res_id, h, sub_attrs=None, res_attrs=None, action=None):
self.app_id = None
self.sub_id = sub_id
self.res_id = res_id
self.attrs_read_from_tent = {}
self.attrs_read_from_cache = {}
self.timestamp = time.time()
self.sub_attrs = sub_attrs
self.res_attrs = res_attrs
self.sub_attrs_to_update = []
self.res_attrs_to_update = []
self.sub_attrs_for_policy_eval = {}
self.hashMap = h
self.action = action
self.dbEmulator = None
class Rule():
def __init__(self, rulename, sub_id, res_id, action, sub_attrs, res_attrs, sub_attrs_to_update, res_attrs_to_update):
self.rulename = rulename
self.dbEmulator = None
self.sub_id = sub_id
self.res_id = res_id
self.sub_attrs = sub_attrs
self.res_attrs = res_attrs
self.action = action
self.sub_attrs_to_update = sub_attrs_to_update
self.res_attrs_to_update = res_attrs_to_update
class Application(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._ApplicationReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_ApplicationReceivedEvent_0', PatternExpr_346, sources=[PatternExpr_351], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_ApplicationReceivedEvent_1', PatternExpr_370, sources=[PatternExpr_375], destinations=None, timestamps=None, record_history=None, handlers=[self._Application_handler_369]), da.pat.EventPattern(da.pat.ReceivedEvent, '_ApplicationReceivedEvent_2', PatternExpr_387, sources=[PatternExpr_394], destinations=None, timestamps=None, record_history=None, handlers=[self._Application_handler_386])])
def setup(self, hashMap, sub_co, res_co, master_id, dbEmulator, req):
self._state.hashMap = hashMap
self._state.sub_co = sub_co
self._state.res_co = res_co
self._state.master_id = master_id
self._state.dbEmulator = dbEmulator
self._state.req = req
pass
def run(self):
self._state.req.dbEmulator = self._state.dbEmulator
self.output(('Application sending Req to Sub_Co ' + str(self._state.req.hashMap[self._state.req.sub_id])))
self._send(('evalRequestFromApp', self._state.req), self._state.req.hashMap[self._state.req.sub_id])
super()._label('_st_label_341', block=False)
_st_label_341 = 0
while (_st_label_341 == 0):
_st_label_341 += 1
if (len([p for (_, (_, _, p), (_ConstantPattern362_,)) in self._ApplicationReceivedEvent_0 if (_ConstantPattern362_ == 'done')]) == 1):
_st_label_341 += 1
else:
super()._label('_st_label_341', block=True)
_st_label_341 -= 1
def _Application_handler_369(self, p):
self.output((str(self.id) + ' shutting Down'))
_Application_handler_369._labels = None
_Application_handler_369._notlabels = None
def _Application_handler_386(self, decision, sub_coord_id):
self.output(('Received Policy decision from Sub_Co at App-> ' + str(self.id)))
if (decision == 'Success'):
self.output('Policy Evaluated')
self._send(('okay',), self._state.master_id)
_Application_handler_386._labels = None
_Application_handler_386._notlabels = None
class Sub_Co(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._Sub_CoReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_Sub_CoReceivedEvent_0', PatternExpr_461, sources=[PatternExpr_466], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_Sub_CoReceivedEvent_1', PatternExpr_485, sources=[PatternExpr_490], destinations=None, timestamps=None, record_history=None, handlers=[self._Sub_Co_handler_484]), da.pat.EventPattern(da.pat.ReceivedEvent, '_Sub_CoReceivedEvent_2', PatternExpr_502, sources=[PatternExpr_509], destinations=None, timestamps=None, record_history=None, handlers=[self._Sub_Co_handler_501]), da.pat.EventPattern(da.pat.ReceivedEvent, '_Sub_CoReceivedEvent_3', PatternExpr_633, sources=[PatternExpr_640], destinations=None, timestamps=None, record_history=None, handlers=[self._Sub_Co_handler_632]), da.pat.EventPattern(da.pat.ReceivedEvent, '_Sub_CoReceivedEvent_4', PatternExpr_830, sources=[PatternExpr_839], destinations=None, timestamps=None, record_history=None, handlers=[self._Sub_Co_handler_829])])
def setup(self):
self._state.procs = dict()
self._state.app_id = None
self._state.updated_cache = dict()
self._state.has_read_from_tent = []
self._state.tent_updated_attrs = {}
def run(self):
super()._label('_st_label_456', block=False)
_st_label_456 = 0
while (_st_label_456 == 0):
_st_label_456 += 1
if (len([p for (_, (_, _, p), (_ConstantPattern477_,)) in self._Sub_CoReceivedEvent_0 if (_ConstantPattern477_ == 'done')]) == 1):
_st_label_456 += 1
else:
super()._label('_st_label_456', block=True)
_st_label_456 -= 1
def performCleanup(self, req):
req.attrs_read_from_tent = {}
req.attrs_read_from_cache = {}
req.timestamp = time.time()
req.sub_attrs_to_update = []
req.res_attrs_to_update = []
req.sub_attrs_for_policy_eval = {}
return req
def _Sub_Co_handler_484(self, p):
self.output((str(self.id) + ' shutting Down'))
_Sub_Co_handler_484._labels = None
_Sub_Co_handler_484._notlabels = None
def _Sub_Co_handler_501(self, req, app_id):
self.output(('Received Eval Req from App at Sub_Co-> ' + str(self.id)))
if (req.app_id is None):
req.app_id = app_id
self._state.procs[req] = 'Running'
self.output(('reading tent' + str(self._state.tent_updated_attrs)))
for attr in req.sub_attrs:
if (attr in self._state.tent_updated_attrs):
req.attrs_read_from_tent[attr] = self._state.tent_updated_attrs[attr]['value']
elif (attr in self._state.updated_cache):
req.attrs_read_from_cache[attr] = self._state.updated_cache[attr]
self.output('Request Updated with Tent and Cache')
self.output(('Final Req Object is-> ' + str(req)))
self.output(('Sub_Co Sending Eval Request to Res_Co-> ' + str(req.hashMap[req.res_id])))
self._send(('evalRequestFromSub_Co', req), req.hashMap[req.res_id])
_Sub_Co_handler_501._labels = None
_Sub_Co_handler_501._notlabels = None
def _Sub_Co_handler_632(self, req, w_id):
self.output(('Decision from Worker Recvd at-> ' + str(self.id)))
self.output(('Sub obligations-> ' + str(req.sub_attrs_to_update)))
self.output(('Res obligations-> ' + str(req.res_attrs_to_update)))
conflict = None
self.output(('Tent_attr BEFORE-> ' + str(self._state.tent_updated_attrs)))
self.output(('My Time Stamp - ' + str(req.timestamp)))
if req.sub_attrs_for_policy_eval:
for attr in req.sub_attrs_for_policy_eval:
if (attr in self._state.tent_updated_attrs):
if (not (req.sub_attrs_for_policy_eval[attr] == self._state.tent_updated_attrs[attr]['value'])):
conflict = 'Present'
self._state.procs[self.id] = 'Restarted'
self.output(('Subject Conflict Found. Restart-> ' + str(req.app_id)))
req = self.performCleanup(req)
self._send(('evalRequestFromApp', req), self.id)
break
if (not (conflict == 'Present')):
self.output('Subject Conflict-> Absent.')
if req.sub_attrs_to_update:
for attr in req.sub_attrs_to_update:
if (attr in self._state.tent_updated_attrs):
self._state.tent_updated_attrs[attr]['value'] = req.sub_attrs_to_update[attr]
self._state.tent_updated_attrs[attr]['timestamp'] = req.timestamp
else:
val = req.sub_attrs_to_update[attr]
ts = req.timestamp
self._state.tent_updated_attrs[attr] = dict()
self._state.tent_updated_attrs[attr]['value'] = val
self._state.tent_updated_attrs[attr]['timestamp'] = ts
self.output(('Updated the tent_attr-> ' + str(self._state.tent_updated_attrs)))
self.output(('Sending Req for Res Conflict eval to Res_Co-> ' + str(req.hashMap[req.res_id])))
self._send(('conflictEvalRequestFromSub_Co', req), req.hashMap[req.res_id])
_Sub_Co_handler_632._labels = None
_Sub_Co_handler_632._notlabels = None
def _Sub_Co_handler_829(self, conflict_decision, req, res_coord_id):
self.output(('Received Conflict eval reply from Res_Co at-> ' + str(self.id)))
if (conflict_decision == 'Present'):
self.output('Resource Conflict was found at Res_Co.')
for r in self._state.has_read_from_tent:
if (r.timestamp > req.timestamp):
self.output(('Clearing Adminstration for request-> ' + str(r)))
for attr in r.sub_attrs:
if (attr in self._state.tent_updated_attrs):
if (self._state.tent_updated_attrs[attr]['timestamp'] == r.timestamp):
del self._state.tent_updated_attrs[attr]
self.output(('Restarting request ' + str(r)))
self._send(('evalRequestFromApp', r), self.id)
self.output(('Clearing Adminstration for request-> ' + str(req)))
for attr in req.sub_attrs:
if (attr in self._state.tent_updated_attrs):
if (self._state.tent_updated_attrs[attr]['timestamp'] == req.timestamp):
del self._state.tent_updated_attrs[attr]
self.output(('Restarting request-> ' + str(req)))
self._send(('evalRequestFromApp', req), self.id)
else:
self.output('No Conflict. Going to Commit.')
if req.sub_attrs_to_update:
for attr in req.sub_attrs_to_update:
self._state.updated_cache[attr] = req.sub_attrs_to_update[attr]
self.output('Sub_Co Committing updates to DB')
self._send(('updateSubAttrs', self._state.updated_cache, req.sub_id), req.dbEmulator)
self.output(('Sending Policy Decision from Sub_Co to App-> ' + str(req.app_id)))
self._send(('policyDecisionFromSub_Co', 'Success'), req.app_id)
_Sub_Co_handler_829._labels = None
_Sub_Co_handler_829._notlabels = None
class Res_Co(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._Res_CoReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_Res_CoReceivedEvent_0', PatternExpr_1016, sources=[PatternExpr_1021], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_Res_CoReceivedEvent_1', PatternExpr_1040, sources=[PatternExpr_1045], destinations=None, timestamps=None, record_history=None, handlers=[self._Res_Co_handler_1039]), da.pat.EventPattern(da.pat.ReceivedEvent, '_Res_CoReceivedEvent_2', PatternExpr_1057, sources=[PatternExpr_1064], destinations=None, timestamps=None, record_history=None, handlers=[self._Res_Co_handler_1056]), da.pat.EventPattern(da.pat.ReceivedEvent, '_Res_CoReceivedEvent_3', PatternExpr_1132, sources=[PatternExpr_1139], destinations=None, timestamps=None, record_history=None, handlers=[self._Res_Co_handler_1131])])
def setup(self, workers):
self._state.workers = workers
self._state.ongoingEvals = []
self._state.updated_cache = {}
def run(self):
super()._label('_st_label_1011', block=False)
_st_label_1011 = 0
while (_st_label_1011 == 0):
_st_label_1011 += 1
if (len([p for (_, (_, _, p), (_ConstantPattern1032_,)) in self._Res_CoReceivedEvent_0 if (_ConstantPattern1032_ == 'done')]) == 1):
_st_label_1011 += 1
else:
super()._label('_st_label_1011', block=True)
_st_label_1011 -= 1
def _Res_Co_handler_1039(self, p):
self.output((str(self.id) + ' shutting Down'))
_Res_Co_handler_1039._labels = None
_Res_Co_handler_1039._notlabels = None
def _Res_Co_handler_1056(self, req, sub_coord_id):
self.output(('Request from Sub_Co recvd at-> ' + str(self.id)))
self._state.ongoingEvals.append(req)
for attr in req.res_attrs:
if (attr in self._state.updated_cache):
req.res_attrs[attr] = self._state.updated_cache[attr]
workers_list = [p for p in self._state.workers]
w_id = workers_list[random.randint(0, (len(workers_list) - 1))]
self.output(('Sending Request from Res_Co Worker-> ' + str(w_id)))
self._send(('evalRequestFromRes_Co', req), w_id)
_Res_Co_handler_1056._labels = None
_Res_Co_handler_1056._notlabels = None
def _Res_Co_handler_1131(self, req, sub_coord_id):
conflict_decision = None
self.output(('Received Req for Res Conflict eval at-> ' + str(self.id)))
if req.res_attrs_to_update:
for attr in req.res_attrs_to_update:
if (attr in self._state.updated_cache):
if (self._state.updated_cache[attr]['timestamp'] > req.timestamp):
conflict_decision = 'Present'
break
if (conflict_decision == 'Present'):
self.output('Resource Conflict-> Present')
self.output(('Send Conflict msg to Sub_Co-> ' + str(sub_coord_id)))
self._send(('conflictEvalReplyFromRes_Co', conflict_decision, req), sub_coord_id)
else:
self.output('No Conflict at the Resource Coordinator')
if req.sub_attrs_to_update:
for attr in req.res_attrs_to_update:
self.output('Res_Co## Updating the Cache')
self._state.updated_cache[attr] = req.res_attrs_to_update[attr]
self.output('Res Co Committing updates to DB')
self._send(('updateResAttrs', self._state.updated_cache, req.res_id), req.dbEmulator)
self.output(('Send No Conflict msg to Sub_Co-> ' + str(sub_coord_id)))
self._send(('conflictEvalReplyFromRes_Co', conflict_decision, req), sub_coord_id)
_Res_Co_handler_1131._labels = None
_Res_Co_handler_1131._notlabels = None
class Worker(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._WorkerReceivedEvent_2 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_WorkerReceivedEvent_0', PatternExpr_1270, sources=[PatternExpr_1276], destinations=None, timestamps=None, record_history=None, handlers=[self._Worker_handler_1269]), da.pat.EventPattern(da.pat.ReceivedEvent, '_WorkerReceivedEvent_1', PatternExpr_1296, sources=[PatternExpr_1302], destinations=None, timestamps=None, record_history=None, handlers=[self._Worker_handler_1295]), da.pat.EventPattern(da.pat.ReceivedEvent, '_WorkerReceivedEvent_2', PatternExpr_1672, sources=[PatternExpr_1677], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_WorkerReceivedEvent_3', PatternExpr_1696, sources=[PatternExpr_1701], destinations=None, timestamps=None, record_history=None, handlers=[self._Worker_handler_1695]), da.pat.EventPattern(da.pat.ReceivedEvent, '_WorkerReceivedEvent_4', PatternExpr_1713, sources=[PatternExpr_1720], destinations=None, timestamps=None, record_history=None, handlers=[self._Worker_handler_1712]), da.pat.EventPattern(da.pat.ReceivedEvent, '_WorkerReceivedEvent_5', PatternExpr_1837, sources=[PatternExpr_1844], destinations=None, timestamps=None, record_history=None, handlers=[self._Worker_handler_1836])])
def setup(self):
self._state.rules = []
self._state.sub_attr_dict = dict()
self._state.res_attr_dict = dict()
def run(self):
self._state.rules = self.create_rules('policy-example.xml')
super()._label('_st_label_1667', block=False)
_st_label_1667 = 0
while (_st_label_1667 == 0):
_st_label_1667 += 1
if (len([p for (_, (_, _, p), (_ConstantPattern1688_,)) in self._WorkerReceivedEvent_2 if (_ConstantPattern1688_ == 'done')]) == 1):
_st_label_1667 += 1
else:
super()._label('_st_label_1667', block=True)
_st_label_1667 -= 1
def create_rules(self, filename):
tree = ET.parse(filename)
root = tree.getroot()
self._state.rules = []
for rule in root.iter('rule'):
sub_attrs = {}
res_attrs = {}
action = {}
sub_attrs_to_update = {}
res_attrs_to_update = {}
rule_name = rule.attrib['name']
sc = rule.find('subjectCondition')
sub_id = sc.attrib['position']
for key in sc.attrib.keys():
if (not (key == 'position')):
sub_attrs[key] = sc.attrib[key]
rc = rule.find('resourceCondition')
if ('id' in rc.attrib.keys()):
res_id = rc.attrib['id']
else:
res_id = rc.attrib['type']
for key in rc.attrib.keys():
if ((not (key == 'id')) and (not (key == 'type'))):
res_attrs[key] = rc.attrib[key]
act = rule.find('action')
for key in act.attrib.keys():
action = act.attrib[key]
su = rule.find('subjectUpdate')
if (not (su == None)):
for key in su.attrib.keys():
sub_attrs_to_update[key] = su.attrib[key]
ru = rule.find('resourceUpdate')
if (not (ru == None)):
for key in ru.attrib.keys():
res_attrs_to_update[key] = ru.attrib[key]
temp_rule = Rule(rule_name, sub_id, res_id, action, sub_attrs, res_attrs, sub_attrs_to_update, res_attrs_to_update)
self._state.rules.append(temp_rule)
return self._state.rules
def policy(self, req, sub_attrs_for_policy_eval):
out = False
for rule in self._state.rules:
if ((rule.sub_id == req.sub_id) and (rule.res_id == req.res_id) and (rule.action == req.action)):
for attr in rule.sub_attrs:
if ((not (attr in sub_attrs_for_policy_eval)) or (not (rule.sub_attrs[attr] == sub_attrs_for_policy_eval[attr]))):
out = True
break
if (out == False):
for attr in req.res_attrs:
if ((not (attr in rule.res_attrs)) or (not (rule.res_attrs[attr] == req.res_attrs[attr]))):
out = True
break
if (out == False):
self.output('Found a Matching Rule')
return (rule.sub_attrs_to_update, rule.res_attrs_to_update)
return (None, None)
def _Worker_handler_1269(self, sub_attr_dict, dbEmulator):
for attr in sub_attr_dict:
self._state.sub_attr_dict[attr] = sub_attr_dict[attr]
_Worker_handler_1269._labels = None
_Worker_handler_1269._notlabels = None
def _Worker_handler_1295(self, res_attr_dict, dbEmulator):
for attr in res_attr_dict:
self._state.res_attr_dict[attr] = res_attr_dict[attr]
_Worker_handler_1295._labels = None
_Worker_handler_1295._notlabels = None
def _Worker_handler_1695(self, p):
self.output((str(self.id) + ' shutting Down'))
_Worker_handler_1695._labels = None
_Worker_handler_1695._notlabels = None
def _Worker_handler_1712(self, req, res_coord_id):
self.output(('Received Request from Res_Co at-> ' + str(self.id)))
sub_attrs_for_policy_eval = {}
for attr in req.sub_attrs:
if (attr in req.attrs_read_from_tent):
sub_attrs_for_policy_eval[attr] = req.attrs_read_from_tent[attr]
elif (attr in req.attrs_read_from_cache):
sub_attrs_for_policy_eval[attr] = req.attrs_read_from_cache[attr]
elif ((req.sub_id in self._state.sub_attr_dict) and (attr in self._state.sub_attr_dict[req.sub_id])):
sub_attrs_for_policy_eval[attr] = self._state.sub_attr_dict[req.sub_id][attr]
self.output(('Value Read before Rule-- ' + str(sub_attrs_for_policy_eval)))
req.sub_attrs_for_policy_eval = sub_attrs_for_policy_eval
(sub_attrs_to_update, res_attrs_to_update) = self.policy(req, sub_attrs_for_policy_eval)
req.sub_attrs_to_update = sub_attrs_to_update
req.res_attrs_to_update = res_attrs_to_update
self.output(('Sending Decision from Worker to Sub_Co-> ' + str(req.hashMap[req.sub_id])))
self._send(('decisionFromWorker', req), req.hashMap[req.sub_id])
_Worker_handler_1712._labels = None
_Worker_handler_1712._notlabels = None
def _Worker_handler_1836(self, sub_attr_dict, res_attr_dict, dbEmulator):
self._state.sub_attr_dict = sub_attr_dict
self._state.res_attr_dict = res_attr_dict
_Worker_handler_1836._labels = None
_Worker_handler_1836._notlabels = None
class DB_Emulator(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._DB_EmulatorReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_DB_EmulatorReceivedEvent_0', PatternExpr_2110, sources=[PatternExpr_2115], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_DB_EmulatorReceivedEvent_1', PatternExpr_2134, sources=[PatternExpr_2139], destinations=None, timestamps=None, record_history=None, handlers=[self._DB_Emulator_handler_2133]), da.pat.EventPattern(da.pat.ReceivedEvent, '_DB_EmulatorReceivedEvent_2', PatternExpr_2167, sources=[PatternExpr_2176], destinations=None, timestamps=None, record_history=None, handlers=[self._DB_Emulator_handler_2166]), da.pat.EventPattern(da.pat.ReceivedEvent, '_DB_EmulatorReceivedEvent_3', PatternExpr_2196, sources=[PatternExpr_2205], destinations=None, timestamps=None, record_history=None, handlers=[self._DB_Emulator_handler_2195]), da.pat.EventPattern(da.pat.ReceivedEvent, '_DB_EmulatorReceivedEvent_4', PatternExpr_2225, sources=[PatternExpr_2234], destinations=None, timestamps=None, record_history=None, handlers=[self._DB_Emulator_handler_2224]), da.pat.EventPattern(da.pat.ReceivedEvent, '_DB_EmulatorReceivedEvent_5', PatternExpr_2298, sources=[PatternExpr_2307], destinations=None, timestamps=None, record_history=None, handlers=[self._DB_Emulator_handler_2297])])
def setup(self, workers, db_config_file):
self._state.workers = workers
self._state.db_config_file = db_config_file
self._state.sub_attr_dict = dict()
self._state.res_attr_dict = dict()
self._state.minDBLatency = 0.0
self._state.maxDBLatency = 1.0
def run(self):
config = configparser.ConfigParser()
config.read(self._state.db_config_file)
sub_section = config['Subject']
sub_id = sub_section['sub_id'].strip()
sub_attrs_list = sub_section['sub_attrs'].strip().split(',')
attr_dict = {}
if (len(sub_attrs_list) >= 1):
for attrs in sub_attrs_list:
attr_dict[attrs.strip().split(':')[0]] = attrs.strip().split(':')[1]
self._state.sub_attr_dict[sub_id] = attr_dict
for w_id in self._state.workers:
self.output(('Sending updates to worker-> ' + str(w_id)))
self._send(('subAttrsFromDB', self._state.sub_attr_dict), w_id)
res_section = config['Resource']
res_id = res_section['res_id'].strip()
res_attrs_list = res_section['res_attrs'].strip().split(',')
attr_dict = {}
if ((len(res_attrs_list) >= 1) and all((v for v in res_attrs_list))):
for attrs in res_attrs_list:
attr_dict[attrs.strip().split(':')[0]] = attrs.strip().split(':')[1]
self._state.res_attr_dict[res_id] = attr_dict
for w_id in self._state.workers:
self.output(('Sending updates to worker-> ' + str(w_id)))
self._send(('resAttrsFromDB', self._state.res_attr_dict), w_id)
latency_section = config['Latency']
self._state.minDBLatency = int(latency_section['minDBLatency'])
self._state.maxDBLatency = int(latency_section['maxDBLatency'])
super()._label('_st_label_2105', block=False)
_st_label_2105 = 0
while (_st_label_2105 == 0):
_st_label_2105 += 1
if (len([p for (_, (_, _, p), (_ConstantPattern2126_,)) in self._DB_EmulatorReceivedEvent_0 if (_ConstantPattern2126_ == 'done')]) == 1):
_st_label_2105 += 1
else:
super()._label('_st_label_2105', block=True)
_st_label_2105 -= 1
def _DB_Emulator_handler_2133(self, Master):
self.output('DONE recvd at the dbEmulator')
self.output('Dumping the DataBase')
self.output(('Subject DataBase-> ' + str(self._state.sub_attr_dict)))
self.output(('Resource DataBase-> ' + str(self._state.res_attr_dict)))
self.output((str(self.id) + ' shutting Down'))
_DB_Emulator_handler_2133._labels = None
_DB_Emulator_handler_2133._notlabels = None
def _DB_Emulator_handler_2166(self, sub_attrs, sub_id, w_id):
self.output(('Sending sub_attrs to worker->' + str(w_id)))
self._send(('subAttrsFromDB', self._state.sub_attr_dict[sub_id]), w_id)
_DB_Emulator_handler_2166._labels = None
_DB_Emulator_handler_2166._notlabels = None
def _DB_Emulator_handler_2195(self, res_attrs, res_id, w_id):
self.output(('Sending res_attrs to worker->' + str(w_id)))
self._send(('resAttrsFromDB', self._state.res_attr_dict[res_id]), w_id)
_DB_Emulator_handler_2195._labels = None
_DB_Emulator_handler_2195._notlabels = None
def _DB_Emulator_handler_2224(self, sub_attrs, sub_id, sub_coord_id):
for attr in sub_attrs:
self._state.sub_attr_dict[sub_id][attr] = sub_attrs[attr]
self.output(('Recvd req to update by Sub_Co-> ' + str(sub_coord_id)))
waittime = random.uniform(self._state.minDBLatency, self._state.maxDBLatency)
self.output(('Latency Chosen by the DB-> ' + str(waittime)))
time.sleep(waittime)
for w_id in self._state.workers:
self.output(('Sending updates to worker-> ' + str(w_id)))
self._send(('subAttrsFromDB', self._state.sub_attr_dict), w_id)
_DB_Emulator_handler_2224._labels = None
_DB_Emulator_handler_2224._notlabels = None
def _DB_Emulator_handler_2297(self, res_attrs, res_id, res_coord_id):
for attr in res_attrs:
self._state.res_attr_dict[res_id][attr] = res_attrs[attr]
self.output(('Recvd req to update by res co-> ' + str(res_coord_id)))
waittime = random.uniform(self._state.minDBLatency, self._state.maxDBLatency)
self.output(('Latency Chosen by the DB-> ' + str(waittime)))
time.sleep(waittime)
for w_id in self._state.workers:
self.output(('Sending updates to worker-> ' + str(w_id)))
self._send(('resAttrsFromDB', self._state.res_attr_dict), w_id)
_DB_Emulator_handler_2297._labels = None
_DB_Emulator_handler_2297._notlabels = None
class Master(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._MasterReceivedEvent_0 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_MasterReceivedEvent_0', PatternExpr_2825, sources=[PatternExpr_2830], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_MasterReceivedEvent_1', PatternExpr_2866, sources=[PatternExpr_2871], destinations=None, timestamps=None, record_history=None, handlers=[self._Master_handler_2865])])
def setup(self, config_file_name, db_config_file):
self._state.config_file_name = config_file_name
self._state.db_config_file = db_config_file
pass
def run(self):
config = configparser.ConfigParser()
config.read(self._state.config_file_name)
self.output(str(config.sections()))
master_section = config['Master']
num_of_workers = int(master_section['num_of_workers'])
num_of_sub_co = int(master_section['num_of_sub_co'])
num_of_res_co = int(master_section['num_of_res_co'])
sub_id_section = config['sub-id-list']
res_id_section = config['res-id-list']
hashMap = {}
workers = da.new(Worker, [], num=num_of_workers)
sub_co = da.new(Sub_Co, [], num=num_of_sub_co)
res_co = da.new(Res_Co, [workers], num=num_of_res_co)
dbEmulator = da.new(DB_Emulator, [workers, self._state.db_config_file], num=1)
sub_co_list = [p for p in sub_co]
res_co_list = [p for p in res_co]
sub_id_list = sub_id_section['sub_id_list'].strip().split(',')
res_id_list = res_id_section['res_id_list'].strip().split(',')
i = 0
for sub_id in sub_id_list:
hashMap[sub_id] = sub_co_list[i]
i = (i + 1)
i = (i % num_of_sub_co)
i = 0
for res_id in res_id_list:
hashMap[res_id] = res_co_list[i]
i = (i + 1)
i = (i % num_of_res_co)
self.output(('Mapping: ' + str(hashMap)))
app_section = config['Application']
num_of_requests = int(app_section['num_of_request'])
self.output(('number of Request:' + str(num_of_requests)))
for i in range(1, (num_of_requests + 1)):
req = app_section[str(i)].strip().split(',')
sub_attrs = {}
res_attrs = {}
for elem in req:
elem = elem.split('=')
if (elem[0].strip() == 'sub_id'):
sub_id = elem[1]
sub_id_list.append(sub_id)
elif ((elem[0].strip() == 'sub_attrs') and (not (elem[1].strip() == 'None'))):
sub_attrs_list = elem[1].split('|')
for e in sub_attrs_list:
sub_attrs[e.split(':')[0]] = e.split(':')[1]
elif (elem[0].strip() == 'res_id'):
res_id = elem[1]
res_id_list.append(res_id)
elif ((elem[0].strip() == 'res_attrs') and (not (elem[1].strip() == 'None'))):
res_attrs_list = elem[1].split('|')
for e in res_attrs_list:
res_attrs[e.split(':')[0]] = e.split(':')[1]
elif (elem[0].strip() == 'action'):
action = elem[1]
req = Request(sub_id, res_id, hashMap, sub_attrs, res_attrs, action)
app = da.new(Application, [hashMap, sub_co, res_co, self.id, dbEmulator, req], num=1)
self.output(str(app))
da.start(app)
da.start((((sub_co | res_co) | workers) | dbEmulator))
super()._label('_st_label_2820', block=False)
_st_label_2820 = 0
while (_st_label_2820 == 0):
_st_label_2820 += 1
if (len([p for (_, (_, _, p), (_ConstantPattern2841_,)) in self._MasterReceivedEvent_0 if (_ConstantPattern2841_ == 'okay')]) == num_of_requests):
_st_label_2820 += 1
else:
super()._label('_st_label_2820', block=True)
_st_label_2820 -= 1
self._send(('done',), (((sub_co | res_co) | workers) | dbEmulator))
self.output((str(self.id) + ' shutting Down'))
def _Master_handler_2865(self, p):
self._send(('done',), p)
_Master_handler_2865._labels = None
_Master_handler_2865._notlabels = None
class _NodeMain(da.DistProcess):
def run(self):
config_file_name = (str(sys.argv[1]) if (len(sys.argv) > 1) else 'basic.config')
db_config_file = (str(sys.argv[2]) if (len(sys.argv) > 2) else 'dbconfig.config')
log_file_name = (config_file_name.strip().split('.')[0] + '.log')
self.output(log_file_name)
logging.getLogger('').handlers = []
logging.basicConfig(filename=log_file_name, filemode='w', level=logging.INFO)
master = da.new(Master, [config_file_name, db_config_file], num=1)
da.start(master)
| [
"arunrajan@Aruns-MacBook-Pro.local"
] | arunrajan@Aruns-MacBook-Pro.local |
c86d530627921e754323bf7a558a07ba700e0cb8 | ed322b05c25e9cac244e603eb8582df376e1e650 | /array_to_png.py | 7224d4e01f712f58bf698d945ebd39ad91d2e24d | [] | no_license | prateek-77/Array-PNG | 4ba29d82ecc9a91de069199c8598fbba73fc351c | 047da6ceeed95a32aae9ff999b400651349bf3b7 | refs/heads/master | 2020-08-12T03:15:21.095255 | 2019-10-12T16:23:08 | 2019-10-12T16:23:08 | 214,677,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | import os
import matplotlib
path = '/home/prateek/image'
out_path = '/home/prateek/image_out/output/'
a = os.listdir(path)
print(a)
for i, name in enumerate(a):
os.chdir(path + '/' + name)
a = os.listdir(os.getcwd())
os.mkdir(out_path + name)
for i,img in enumerate(a):
img1 = os.path.splitext(img)[0]
path1 = os.getcwd() + "/" + img
try:
original_im = Image.open(path1)
except IOError:
print('Cannot retrieve image. Please check url: ')
print('running deeplab on image %s...')
resized_im, seg_map = MODEL.run(original_im)
print(type(seg_map))
'''
vis_segmentation(resized_im, seg_map)
im = Image.fromarray(seg_map)
im.save(out_path + a + "/img" + "." + png)
'''
os.chdir(path + '/' + name)
matplotlib.pyplot.imsave(out_path + name + "/" + img1 + ".png", seg_map, cmap="gray", vmin=0, vmax=255 )
| [
"prateek.garg108@gmail.com"
] | prateek.garg108@gmail.com |
89c7a300c60f7a866df0c475697b95922971bb21 | 36470cb1962ba819ec4fee3960400f822f4c3832 | /correlation-for-icecream.py | fc509bf30e5896f7a4387e233ffce6f6e93f80f7 | [] | no_license | Cra2yAJ/p106 | fc0dad24db6ea92cd6cbe00dd9304f67adc08e0c | caf5095c841bc8dc8bd4abdac2ccdd64ee7ed003 | refs/heads/main | 2023-05-06T10:03:54.566466 | 2021-05-21T04:09:41 | 2021-05-21T04:09:41 | 369,409,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | import plotly.express as px
import csv
import numpy as np
def plotFigure(data_path):
with open(data_path) as csv_file:
df = csv.DictReader(csv_file)
fig = px.scatter(df,x="Temperature",y="Ice-cream Sales( ₹ )")
fig.show()
def getDataSource(data_path):
ice_cream_sales =[]
cold_drink_sales =[]
with open(data_path) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
ice_cream_sales.append(float(row["Ice-cream Sales( ₹ )"]))
cold_drink_sales.append(float(row["Cold drink sales( ₹ )"]))
return{"x": ice_cream_sales,"y": cold_drink_sales}
def findCorrelation(datasource):
correlation =np.corrcoef(datasource["x"],datasource["y"])
print("Correlation between ice-cream sales and cold drink sales is -- >",correlation[0,1])
def setup():
data_path ="./data/file1.csv"
datasource = getDataSource(data_path)
findCorrelation(datasource)
plotFigure(data_path)
setup() | [
"noreply@github.com"
] | Cra2yAJ.noreply@github.com |
c1d39ebc5f1174152c28d88c2a6e92745f8fea7c | 1e35944fcd9a0e2209e069fb0056f23597e3196c | /0x02-python-import_modules/4-hidden_discovery.py | 9f95073b69cc6970c576aeb2f8a13779a4a17885 | [] | no_license | sonnentag/holbertonschool-higher_level_programming | 1496be9390f557cfa7a3e31bb74b208a7dfbb98f | 5992e3c7ff97ab3fefe33bec5632bdca4d3d8a05 | refs/heads/master | 2022-12-23T12:47:02.957781 | 2020-09-25T04:01:27 | 2020-09-25T04:01:27 | 259,382,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | #!/usr/bin/python3
if __name__ == "__main__":
import hidden_4
for func in dir(hidden_4):
if func[1] != "_":
print(func)
| [
"zocle@zocle.net"
] | zocle@zocle.net |
919078d7b56ca845ac4d22fcaa4f1a78a15a1fd6 | 8f4710009ca956bd3780cb423d9f4aa896d61183 | /hsds/servicenode_lib.py | 787f486f609faa18da7934fb8a1bc1f1498e4e69 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | murlock/hsds | b5482ee3f36680728f1b64034c2f6b9c3bd4cad7 | 9f5fc3cdb64017d07e34eb422eee5398553d213c | refs/heads/master | 2020-06-18T06:35:06.817652 | 2019-07-09T02:01:06 | 2019-07-09T02:01:06 | 196,197,570 | 0 | 0 | Apache-2.0 | 2019-07-10T12:02:51 | 2019-07-10T12:02:50 | null | UTF-8 | Python | false | false | 10,606 | py | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
#
# service node of hsds cluster
#
import os.path as op
from aiohttp.web_exceptions import HTTPBadRequest, HTTPForbidden, HTTPNotFound, HTTPInternalServerError
from util.idUtil import getDataNodeUrl, getCollectionForId, isSchema2Id, getS3Key
from util.s3Util import getS3JSONObj
from util.authUtil import aclCheck
from util.httpUtil import http_get
from util.domainUtil import getBucketForDomain
import hsds_logger as log
async def getDomainJson(app, domain, reload=False):
""" Return domain JSON from cache or fetch from DN if not found
Note: only call from sn!
"""
# TBD - default reload to True because some h5pyd tests fail due to
# cached values being picked up (test case deletes/re-creates domain)
# It would be desirable to use default of False to avoid extra
# round-trips to DN node
log.info(f"getDomainJson({domain}, reload={reload})")
if app["node_type"] != "sn":
log.error("wrong node_type")
raise HTTPInternalServerError()
domain_cache = app["domain_cache"]
if domain in domain_cache:
if reload:
del domain_cache[domain]
else:
log.debug("returning domain_cache value")
return domain_cache[domain]
req = getDataNodeUrl(app, domain)
req += "/domains"
params = { "domain": domain }
log.debug(f"sending dn req: {req}")
domain_json = await http_get(app, req, params=params)
if 'owner' not in domain_json:
log.warn("No owner key found in domain")
raise HTTPInternalServerError()
if 'acls' not in domain_json:
log.warn("No acls key found in domain")
raise HTTPInternalServerError()
domain_cache[domain] = domain_json # add to cache
return domain_json
async def validateAction(app, domain, obj_id, username, action):
""" check that the given object belongs in the domain and that the
requested action (create, read, update, delete, readACL, udpateACL)
is permitted for the requesting user.
"""
meta_cache = app['meta_cache']
log.info(f"validateAction(domain={domain}, obj_id={obj_id}, username={username}, action={action})")
# get domain JSON
domain_json = await getDomainJson(app, domain)
if "root" not in domain_json:
msg = f"Expected root key for domain: {domain}"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
obj_json = None
if obj_id in meta_cache:
obj_json = meta_cache[obj_id]
else:
# fetch from DN
collection = getCollectionForId(obj_id)
req = getDataNodeUrl(app, obj_id)
req += '/' + collection + '/' + obj_id
bucket = getBucketForDomain(domain)
params = {}
if bucket:
params["bucket"] = bucket
obj_json = await http_get(app, req, params=params)
meta_cache[obj_id] = obj_json
log.debug("obj_json[root]: {} domain_json[root]: {}".format(obj_json["root"], domain_json["root"]))
if obj_json["root"] != domain_json["root"]:
log.info("unexpected root, reloading domain")
domain_json = await getDomainJson(app, domain, reload=True)
if "root" not in domain_json or obj_json["root"] != domain_json["root"]:
msg = "Object id is not a member of the given domain"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
if action not in ("create", "read", "update", "delete", "readACL", "updateACL"):
log.error(f"unexpected action: {action}")
raise HTTPInternalServerError()
reload = False
try:
aclCheck(domain_json, action, username) # throws exception if not allowed
except HTTPForbidden:
log.info(f"got HttpProcessing error on validate action for domain: {domain}, reloading...")
# just in case the ACL was recently updated, refetch the domain
reload = True
if reload:
domain_json = await getDomainJson(app, domain, reload=True)
aclCheck(domain_json, action, username)
async def getObjectJson(app, obj_id, bucket=None, refresh=False, include_links=False, include_attrs=False):
""" Return top-level json (i.e. excluding attributes or links by default) for a given obj_id.
If refresh is False, any data present in the meta_cache will be returned. If not
the DN will be queries, and any resultant data added to the meta_cache.
Note: meta_cache values may be stale, but use of immutable data (e.g. type of a dataset)
is always valid
"""
meta_cache = app['meta_cache']
obj_json = None
if include_links or include_attrs:
# links and attributes are subject to change, so always refresh
refresh = True
log.info(f"getObjectJson {obj_id}")
if obj_id in meta_cache and not refresh:
log.debug(f"found {obj_id} in meta_cache")
obj_json = meta_cache[obj_id]
else:
req = getDataNodeUrl(app, obj_id)
collection = getCollectionForId(obj_id)
params = {}
if include_links:
params["include_links"] = 1
if include_attrs:
params["include_attrs"] = 1
if bucket:
params["bucket"] = bucket
req += '/' + collection + '/' + obj_id
obj_json = await http_get(app, req, params=params) # throws 404 if doesn't exist
meta_cache[obj_id] = obj_json
if obj_json is None:
msg = f"Object: {obj_id} not found"
log.warn(msg)
raise HTTPNotFound()
return obj_json
async def getObjectIdByPath(app, obj_id, h5path, bucket=None, refresh=False):
""" Find the object at the provided h5path location.
If not found raise 404 error.
"""
log.info(f"getObjectIdByPath obj_id: {obj_id} h5path: {h5path} refresh: {refresh}")
if h5path.startswith("./"):
h5path = h5path[2:] # treat as relative path
links = h5path.split('/')
for link in links:
if not link:
continue # skip empty link
log.debug(f"getObjectIdByPath for objid: {obj_id} got link: {link}")
if getCollectionForId(obj_id) != "groups":
# not a group, so won't have links
msg = f"h5path: {h5path} not found"
log.warn(msg)
raise HTTPNotFound()
req = getDataNodeUrl(app, obj_id)
req += "/groups/" + obj_id + "/links/" + link
log.debug("get LINK: " + req)
params = {}
if bucket:
params["bucket"] = bucket
link_json = await http_get(app, req, params=params)
log.debug("got link_json: " + str(link_json))
if link_json["class"] != 'H5L_TYPE_HARD':
# don't follow soft/external links
msg = f"h5path: {h5path} not found"
log.warn(msg)
raise HTTPInternalServerError()
obj_id = link_json["id"]
# if we get here, we've traveresed the entire path and found the object
return obj_id
async def getPathForObjectId(app, parent_id, idpath_map, tgt_id=None, bucket=None):
""" Search the object starting with the given parent_id.
idpath should be a dict with at minimum the key: parent_id: <parent_path>.
If tgt_id is not None, returns first path that matches the tgt_id or None if not found.
If Tgt_id is no, returns the idpath_map.
"""
if not parent_id:
log.error("No parent_id passed to getPathForObjectId")
raise HTTPInternalServerError()
if parent_id not in idpath_map:
msg = f"Obj {parent_id} expected to be found in idpath_map"
log.error(msg)
raise HTTPInternalServerError()
parent_path = idpath_map[parent_id]
if parent_id == tgt_id:
return parent_path
req = getDataNodeUrl(app, parent_id)
req += "/groups/" + parent_id + "/links"
params = {}
if bucket:
params["bucket"] = bucket
log.debug("getPathForObjectId LINKS: " + req)
links_json = await http_get(app, req, params=params)
log.debug(f"getPathForObjectId got links json from dn for parent_id: {parent_id}")
links = links_json["links"]
h5path = None
for link in links:
if link["class"] != "H5L_TYPE_HARD":
continue # ignore everything except hard links
link_id = link["id"]
if link_id in idpath_map:
continue # this node has already been visited
title = link["title"]
if tgt_id is not None and link_id == tgt_id:
# found it!
h5path = op.join(parent_path, title)
break
idpath_map[link_id] = op.join(parent_path, title)
if getCollectionForId(link_id) != "groups":
continue
h5path = await getPathForObjectId(app, link_id, idpath_map, tgt_id=tgt_id, bucket=bucket) # recursive call
if tgt_id is not None and h5path:
break
return h5path
async def getRootInfo(app, root_id, bucket=None):
""" Get extra information the root collection. """
# Gather additional info on the domain
log.debug(f"getRootInfo {root_id}")
if not isSchema2Id(root_id):
log.info(f"no dataset details not available for schema v1 id: {root_id} returning null results")
return None
s3_key = getS3Key(root_id)
parts = s3_key.split('/')
# dset_key is in the format db/<root>/d/<dset>/.dataset.json
# get the key for the root info object as: db/<root>/.info.json
if len(parts) != 3:
log.error(f"Unexpected s3key format: {s3_key}")
return None
info_key = f"db/{parts[1]}/.info.json"
try:
info_json = await getS3JSONObj(app, info_key, bucket=bucket)
except HTTPNotFound:
log.warn(f"info.json not found for key: {info_key}")
return None
return info_json
| [
"jreadey@hdfgroup.org"
] | jreadey@hdfgroup.org |
a897fbb8f1cb4baa75f28128d345369671e67439 | 7d5caac5338d52f848bba7cae737e3fd0cd142e5 | /cv/pytorch/unet.py | 49a5fbe8dd50d1639e372e0b30c7f48438df7af1 | [
"MIT"
] | permissive | sebastiancepeda/pedro | a99fba453ee2865bbf6f9e29a604ce99dff634a2 | f606db53aa7ad4f77cd813ac771a0aef3f98b4d8 | refs/heads/master | 2023-02-06T04:06:02.395819 | 2020-12-19T17:57:12 | 2020-12-19T17:57:12 | 280,280,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | """
@author: Sebastian Cepeda
@email: sebastian.cepeda.fuentealba@gmail.com
"""
import torch
import torch.nn as nn
from loguru import logger
def double_conv(in_c, out_c):
conv = nn.Sequential(
nn.Conv2d(in_c, out_c, kernel_size=3),
nn.ReLU(inplace=True),
nn.Conv2d(out_c, out_c, kernel_size=3),
nn.ReLU(inplace=True),
)
return conv
def crop_img(in_tensor, out_tensor):
out_size = out_tensor.size()[2]
in_size = in_tensor.size()[2]
delta = in_size - out_size
delta = delta // 2
result = in_tensor[:, :, delta:in_size - delta, delta:in_size - delta]
return result
class UNet(nn.Module):
def __init__(self, in_channels, out_channels):
super(UNet, self).__init__()
# Down convolutions
self.max_pool_2x2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.down_conv_1 = double_conv(in_channels, 64)
self.down_conv_2 = double_conv(64, 128)
self.down_conv_3 = double_conv(128, 256)
self.down_conv_4 = double_conv(256, 512)
self.down_conv_5 = double_conv(512, 1024)
# Up convolutions
self.up_trans_1 = nn.ConvTranspose2d(
in_channels=1024, out_channels=512,
kernel_size=2, stride=2)
self.up_conv_1 = double_conv(1024, 512)
self.up_trans_2 = nn.ConvTranspose2d(
in_channels=512, out_channels=256,
kernel_size=2, stride=2)
self.up_conv_2 = double_conv(512, 256)
self.up_trans_3 = nn.ConvTranspose2d(
in_channels=256, out_channels=128,
kernel_size=2, stride=2)
self.up_conv_3 = double_conv(256, 128)
self.up_trans_4 = nn.ConvTranspose2d(
in_channels=128, out_channels=64,
kernel_size=2, stride=2)
self.up_conv_4 = double_conv(128, 64)
self.out = nn.Conv2d(
in_channels=64,
out_channels=out_channels,
kernel_size=1
)
def forward(self, image):
# Encoder
x1 = self.down_conv_1(image)
x3 = self.max_pool_2x2(x1)
x3 = self.down_conv_2(x3)
x5 = self.max_pool_2x2(x3)
x5 = self.down_conv_3(x5)
x7 = self.max_pool_2x2(x5)
x7 = self.down_conv_4(x7)
x = self.max_pool_2x2(x7)
x = self.down_conv_5(x)
# Decoder
x = self.up_trans_1(x)
y = crop_img(x7, x)
x = self.up_conv_1(torch.cat([x, y], 1))
x = self.up_trans_2(x)
y = crop_img(x5, x)
x = self.up_conv_2(torch.cat([x, y], 1))
x = self.up_trans_3(x)
y = crop_img(x3, x)
x = self.up_conv_3(torch.cat([x, y], 1))
x = self.up_trans_4(x)
y = crop_img(x1, x)
x = self.up_conv_4(torch.cat([x, y], 1))
x = self.out(x)
return x
| [
"sebastian.cepeda.fuentealba@gmail.com"
] | sebastian.cepeda.fuentealba@gmail.com |
2a25bf6b9a51231b30a3293542d3b07a346b599e | 69d8613caa231879040594b559b073af97729c39 | /courseinfo/migrations/0001_initial.py | 559fcb3e3774aac7a033aa87a59494a9c3fac183 | [] | no_license | jiajunc/Chen_Jiajun_ez_university | 999bd0fef5b11e6fd89d5d78934fb75a8ad43e97 | 95afe340e5ff865398911b71f61735d8aa2fbfcd | refs/heads/master | 2022-12-11T21:17:57.299143 | 2018-12-10T00:13:21 | 2018-12-10T00:13:21 | 161,076,229 | 0 | 0 | null | 2018-12-09T20:02:50 | 2018-12-09T19:59:39 | Python | UTF-8 | Python | false | false | 3,048 | py | # Generated by Django 2.0.5 on 2018-09-22 18:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('course_id', models.AutoField(primary_key=True, serialize=False)),
('course_number', models.CharField(max_length=20)),
('course_name', models.CharField(max_length=225)),
],
),
migrations.CreateModel(
name='Instructor',
fields=[
('instructor_id', models.AutoField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Registration',
fields=[
('registration_id', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Section',
fields=[
('section_id', models.AutoField(primary_key=True, serialize=False)),
('section_name', models.CharField(max_length=10)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='sections', to='courseinfo.Course')),
('instructor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='sections', to='courseinfo.Instructor')),
],
),
migrations.CreateModel(
name='Semester',
fields=[
('semester_id', models.AutoField(primary_key=True, serialize=False)),
('semester_name', models.CharField(max_length=45, unique=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('student_id', models.AutoField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('nickname', models.CharField(blank=True, default='', max_length=45)),
],
),
migrations.AddField(
model_name='section',
name='semester',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='sections', to='courseinfo.Semester'),
),
migrations.AddField(
model_name='registration',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='registrations', to='courseinfo.Section'),
),
migrations.AddField(
model_name='registration',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='registrations', to='courseinfo.Student'),
),
]
| [
"jiajunc2@illinois.edu"
] | jiajunc2@illinois.edu |
b6593f0ee2bf3873100eb8247b962bc8e6c053d0 | 961b871a1244e79ce923152724d507f8332886b4 | /minimal-ci.py | 3f1ee2a9a40cb2185237dbbe3d95ff0927eed514 | [] | no_license | EmbLux-Kft/tbot-tbot2go | 9adc597507529a7868a35ae175f843a1181716eb | 63c0ee8fdae7e2ea39ae39da3e70cb73004130c4 | refs/heads/master | 2022-12-26T01:22:30.368411 | 2020-06-15T08:18:46 | 2020-06-17T05:51:51 | 259,633,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,107 | py | #!/usr/bin/env python3
import argparse
import json
import os
import schedule
import subprocess
import sys
import time
# pip3 install schedule --user
# https://schedule.readthedocs.io/en/stable/
def parse_arguments():
parser = argparse.ArgumentParser(description='minimal CI')
parser.add_argument(
'-f',
action='store_true',
default=False,
dest='force',
help='force board build'
)
parser.add_argument(
'-l',
action='store_true',
default=False,
dest='list',
help='list all boards'
)
parser.add_argument(
"-n",
dest="name",
type=str,
help="boardname for force",
)
parser.add_argument(
"-c",
dest="cfgfile",
default='./minimal-ci.json',
type=str,
help="cfgfile",
)
args = parser.parse_args()
return args
def parse_config(filename):
with open(filename) as f:
cfg = json.load(f)
return cfg
def test_one_board(cfg, name):
print(f'Test board {name}')
tests = cfg["tests"]
test = None
for t in tests:
if t["name"] == name:
test = t
break
if test == None:
print(f'board {name} not found in config.')
sys.exit(1)
# cfg -> Envvariables
# test -> Testparameter
print("Test ", test)
tbotlog = cfg["TBOT_LOGFILE"].format(boarddir=name)
tbotoutput = cfg["TBOT_STDIO_LOGFILE"].format(boarddir=name)
systemmap = cfg["TBOT_SYSTEMMAP"].format(boarddir=name)
print("tbotlog ", tbotlog)
print("tbotout ", tbotoutput)
print("sysemm ", systemmap)
path = os.path.dirname(tbotlog)
print("PATH ", path)
# start subshell
bash = subprocess.Popen(["bash"],
stdin =subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
bufsize=0)
# Send ssh commands to stdin
bash.stdin.write("uname -a\n")
bash.stdin.write(f'export SERVER_PORT={cfg["SERVER_PORT"]}\n')
bash.stdin.write(f'export SERVER_URL={cfg["SERVER_URL"]}\n')
bash.stdin.write(f'export SERVER_USER={cfg["SERVER_USER"]}\n')
bash.stdin.write(f'export SERVER_PASSWORD={cfg["SERVER_PASSWORD"]}\n')
bash.stdin.write(f'export TBOT_STDIO_LOGFILE={tbotoutput}\n')
bash.stdin.write(f'export TBOT_LOGFILE={tbotlog}\n')
bash.stdin.write(f'export TBOT_SYSTEMMAP={systemmap}\n')
bash.stdin.write("echo $TBOT_LOGFILE\n")
bash.stdin.write("echo $TBOT_SYSTEMMAP\n")
bash.stdin.write("echo $SERVER_PORT\n")
# delete and create tmp result path
bash.stdin.write(f'rm {path}/*\n')
bash.stdin.write(f'mkdir -p {path}\n')
bash.stdin.write(f'timeout -k 9 {test["timeout"]} tbot {test["tbotargs"]} {test["tbottest"]} --log {tbotlog} | tee {tbotoutput}\n')
bash.stdin.write(f'sync\n')
# copy tbot output into tbotoutput
# ToDo do this in tbot with shell_copy
bash.stdin.write(f'scp {test["systemmappath"]} {systemmap}\n')
# push result to server
bash.stdin.write(f'./push-testresult.py -p {cfg["TBOTPATH"]} -f {tbotlog}\n')
bash.stdin.write(f'cat results/pushresults/tbot.txt\n')
bash.stdin.close()
for line in bash.stdout:
print(line)
def main() -> None: # noqa: C901
args = parse_arguments()
# parse cfg file
cfg = parse_config(args.cfgfile)
if args.list:
print("Boards :")
tests = cfg["tests"]
for t in tests:
print(t["name"])
sys.exit(0)
if args.force:
print(f'Force testing board {args.name}')
test_one_board(cfg, args.name)
sys.exit(0)
# schedule
tests = cfg["tests"]
for t in tests:
print(f'Schedule {t["name"]} @ {t["starttime"]}')
schedule.every().day.at(t["starttime"]).do(test_one_board, cfg=cfg, name=t["name"])
while 1:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
main()
| [
"hs@denx.de"
] | hs@denx.de |
ba9cb4e07e522b73623d797209c7b5ec2a962c34 | 19229619c160d4625226205696120b9ce594e711 | /build/catkin_generated/order_packages.py | 74e26e55f252e05387e83a90778e0e6a1cea08ca | [] | no_license | BetterEthan/elevation_map | 843cb5b06ee40991c8165860fe3a3e2826e9207a | cc937f4f723d5974a45ce912e16c23612c48e659 | refs/heads/master | 2020-03-27T06:12:44.332991 | 2018-08-25T11:45:48 | 2018-08-25T11:45:48 | 146,088,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/xp/Desktop/catkin_learning/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/xp/Desktop/catkin_learning/devel;/home/xp/catkin_ws/devel;/opt/ros/kinetic".split(';') if "/home/xp/Desktop/catkin_learning/devel;/home/xp/catkin_ws/devel;/opt/ros/kinetic" != "" else []
| [
"1002531620@qq.com"
] | 1002531620@qq.com |
b970cb7a9421a179fb53f5272a8b21908a4e9e7e | 8b81588cea990aca1ecc4ce3fe45847cc46e7d00 | /x11/library/libXScrnSaver/actions.py | 676e427c37687edf597e963446797e167056b929 | [] | no_license | Zaryob/SulinRepository | 67a4a6d15d909422f73d5ec4bbc8bd16f40057a9 | c89c643b9773d191996d721b262dd739e4203bc0 | refs/heads/main | 2021-06-12T19:30:34.281242 | 2019-04-18T17:56:24 | 2019-04-18T17:56:24 | 201,469,580 | 11 | 2 | null | 2021-06-02T16:51:13 | 2019-08-09T13:08:57 | Roff | UTF-8 | Python | false | false | 507 | py | # -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import get
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
inarytools.dodoc("ChangeLog", "COPYING", "README")
| [
"zaryob.dev@gmail.com"
] | zaryob.dev@gmail.com |
2cacc35dad927239826dea74300b3926c7cc1092 | cbca22133ba7c02ba0532bc046d7e6b0524c2f4c | /Matplotlib_With_PYQT/封装toolbar功能/fnag.py | 5b9a1a4d3a75baf71b162e3e9c3e93eb74751638 | [] | no_license | Inc175/ll_crowluya-Matplotlib_With_PYQT-master | a923c195121f5e1d382b702b6a9ea0732c60c204 | dcf1fd6725f4fffd0b7ff6b9298cc3635735b30d | refs/heads/master | 2021-09-24T23:58:02.044255 | 2018-10-05T16:00:11 | 2018-10-05T16:00:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | import sys
import matplotlib
import PyQt5.sip
# matplotlib的键盘按压事件引入到pyqt5中
# from matplotlib.backend_bases import key_press_handler
matplotlib.use("Qt5Agg")
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QSizePolicy, QAction, QLabel,
QWidget,QStackedWidget, QPushButton,QTabWidget, QAction, QMessageBox, QFileDialog, QHBoxLayout)
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from MatploWidget import PlotCanvas # qt绘制matplotlib图像的类
from mainFrom import Ui_MainWindow # 弹出为屏幕中心主窗口
from loadlines import load_all_lines # 加载数据
# 添加曲线到画布上
from PyQt5.QtWidgets import QDesktopWidget, QApplication, QMainWindow, QPushButton
from utils import log
# from MatploWidget import PlotCanvas
fig = plt.figure()
ax = fig.add_subplot(111)
lines = load_all_lines()
tab1 = PlotCanvas(width=9, height=6, dpi=100)
tab1.draw_one_line(lines[0])
# fig.add_subplot(tab1)
tab1.draw()
# plt.show() | [
"s1107308633@gmail.com"
] | s1107308633@gmail.com |
25a3487a2b2eed5f4cec9a42f90a93353f29c9d9 | 6d2d1d4c4f82392c59b9028e6cf75071a3ad6e9b | /common/email_handler.py | 2f74fda2c4e2f8823102e1e56f6531bfdc404c27 | [] | no_license | tiangan0529/myProject | 8ab294e653bd0ae121470f2cd114a1bbff0ad99d | 8e9564b3c69b65eb4e01203b5ab33a91a54efe34 | refs/heads/master | 2023-03-21T01:58:30.982769 | 2021-03-12T03:08:12 | 2021-03-12T03:08:12 | 296,203,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | # coding: utf-8
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
from datetime import datetime
from middleware.handler import Handler
def send_mail(report_path, project_name):
'''发送邮件'''
with open(report_path, 'rb') as f:
# 读取测试报告正文,作为邮件正文
mail_body = f.read()
now = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
# 获取邮箱服务地址
hd = Handler()
smtp_server = hd.yaml['email']['smtp_server']
try:
# 启动163邮箱服务
smtp = smtplib.SMTP(smtp_server, 25)
# 获取发件人信息
sender = hd.yaml['email']['smtp_sender']
# 发件人邮箱授权码
password = hd.yaml['email']['smtp_sender_password']
# 接收人邮箱
receiver = hd.yaml['email']['smtp_receiver']
# 登陆
smtp.login(sender, password)
msg = MIMEMultipart()
# 编写html类型的邮件正文,MIMEtext() 用于定义邮件正文
# 发送
text = MIMEText(mail_body, 'html', 'utf-8')
# 定义邮件正文标题
text['Subject'] = Header(project_name + '接口测试报告', 'utf-8')
msg.attach(text)
# 发送附件
# Header 用于定义邮件主题,主题加上时间,防止主题重复导致发送失败
msg['Subject'] = Header('【测试用例执行结果】:' + project_name + '测试报告' + now, 'utf-8')
msg_file = MIMEText(mail_body, 'html', 'utf-8')
# 构造MIMEBase对象做为文件附件内容并附加到根容器
msg_file['Content-Type'] = 'application/octet-stream'
msg_file['Content-Disposition'] = 'attachment; file_name="测试报告.html"'
msg.attach(msg_file)
# 定义发件人
msg['From'] = sender
# 定义收件人
msg['To'] = receiver
smtp.sendmail(msg['From'], msg['To'].split(';'), msg.as_string())
smtp.quit()
return True
except smtplib.SMTPException as e:
print(str(e))
return False
| [
"tiangan_529@163.com"
] | tiangan_529@163.com |
460914ffc6f670eac2f2f32e1ed4a7573c45911f | a31f6f825c82e7e7cb5bf5989f611bab671adac4 | /ssbio/protein/sequence/properties/tmhmm.py | bef39421d1ae655a2e9cda894c5819798cde3a2b | [
"MIT"
] | permissive | i386uk/ssbio | f7f8c634e2fa18f4a93613c82930cc321f067742 | 8b98339346d25effa1375e8c1f375e8fd3372b17 | refs/heads/master | 2020-04-01T21:21:13.798977 | 2018-10-21T10:24:42 | 2018-10-21T10:24:42 | 153,650,917 | 0 | 0 | null | 2018-10-18T16:00:55 | 2018-10-18T16:00:54 | null | UTF-8 | Python | false | false | 6,453 | py | from collections import defaultdict
import logging
log = logging.getLogger(__name__)
# Parsing TMHMM results
# There are two output formats: Long and short. Long output format
#
# For the long format (default), tmhmm gives some statistics and a list of the location of the predicted transmembrane helices and the predicted location of the intervening loop regions. Here is an example:
#
# # COX2_BACSU Length: 278
# # COX2_BACSU Number of predicted TMHs: 3
# # COX2_BACSU Exp number of AAs in TMHs: 68.6888999999999
# # COX2_BACSU Exp number, first 60 AAs: 39.8875
# # COX2_BACSU Total prob of N-in: 0.99950
# # COX2_BACSU POSSIBLE N-term signal sequence
# COX2_BACSU TMHMM2.0 inside 1 6
# COX2_BACSU TMHMM2.0 TMhelix 7 29
# COX2_BACSU TMHMM2.0 outside 30 43
# COX2_BACSU TMHMM2.0 TMhelix 44 66
# COX2_BACSU TMHMM2.0 inside 67 86
# COX2_BACSU TMHMM2.0 TMhelix 87 109
# COX2_BACSU TMHMM2.0 outside 110 278
#
# If the whole sequence is labeled as inside or outside, the prediction is that it contains no membrane
# helices. It is probably not wise to interpret it as a prediction of location. The prediction gives the most probable location and orientation of transmembrane helices in the sequence. It is found by an algorithm called N-best (or 1-best in this case) that sums over all paths through the model with the same location and direction of the helices.
#
# The first few lines gives some statistics:
#
# Length: the length of the protein sequence.
#
# Number of predicted TMHs: The number of predicted transmembrane helices.
#
# Exp number of AAs in TMHs: The expected number of amino acids intransmembrane helices. If this number is larger than 18 it is very likely to be a transmembrane protein (OR have a signal peptide).
#
# Exp number, first 60 AAs: The expected number of amino acids in transmembrane helices in the first 60 amino acids of the protein. If this number more than a few, you should be warned that a predicted transmembrane helix in the N-term could be a signal peptide.
#
# Total prob of N-in: The total probability that the N-term is on the cytoplasmic side of the membrane.
#
# POSSIBLE N-term signal sequence: a warning that is produced when "Exp number, first 60 AAs" is larger than 10.
def parse_tmhmm_long(tmhmm_results):
with open(tmhmm_results) as f:
lines = f.read().splitlines()
infodict = defaultdict(dict)
for l in lines:
if 'Number of predicted TMHs:' in l:
gene = l.split(' Number')[0].strip('# ')
infodict[gene]['num_tm_helices'] = int(l.split(': ')[1])
if 'WARNING' in l:
log.warning('{}: no TMHMM predictions'.format(l))
continue
# TODO: POSSIBLE N-term signal sequence - parse this
# Look for the lines without #, these are the TM predicted regions
if '#' not in l:
stuff = l.split()
if stuff[1] == 'TMHMM2.0':
gene = stuff[0]
region = stuff[2]
region_start = stuff[3]
region_end = stuff[4]
if 'sequence' in infodict[gene]:
tm_seq = infodict[gene]['sequence']
else:
tm_seq = ''
if region == 'outside':
info = 'O'
elif region == 'inside':
info = 'I'
elif region == 'TMhelix':
info = 'T'
else:
log.error('{}: unknown region type'.format(info))
info = '-'
for r in range(int(region_start), int(region_end) + 1):
tm_seq += info
infodict[gene]['sequence'] = tm_seq
return infodict
def label_TM_tmhmm_residue_numbers_and_leaflets(tmhmm_seq):
"""Determine the residue numbers of the TM-helix residues that cross the membrane and label them by leaflet.
Args:
tmhmm_seq: g.protein.representative_sequence.seq_record.letter_annotations['TM-tmhmm']
Returns:
leaflet_dict: a dictionary with leaflet_variable : [residue list] where the variable is inside or outside
TM_boundary dict: outputs a dictionar with : TM helix number : [TM helix residue start , TM helix residue end]
TODO:
untested method!
"""
TM_number_dict = {}
T_index = []
T_residue = []
residue_count = 1
for residue_label in tmhmm_seq:
if residue_label == 'T':
T_residue.append(residue_count)
residue_count = residue_count + 1
TM_number_dict.update({'T_residue': T_residue})
# finding the TM boundaries
T_residue_list = TM_number_dict['T_residue']
count = 0
max_count = len(T_residue_list) - 1
TM_helix_count = 0
TM_boundary_dict = {}
while count <= max_count:
# first residue = TM start
if count == 0:
TM_start = T_residue_list[count]
count = count + 1
continue
# Last residue = TM end
elif count == max_count:
TM_end = T_residue_list[count]
TM_helix_count = TM_helix_count + 1
TM_boundary_dict.update({'TM_helix_' + str(TM_helix_count): [TM_start, TM_end]})
break
# middle residues need to be start or end
elif T_residue_list[count] != T_residue_list[count + 1] - 1:
TM_end = T_residue_list[count]
TM_helix_count = TM_helix_count + 1
TM_boundary_dict.update({'TM_helix_' + str(TM_helix_count): [TM_start, TM_end]})
# new TM_start
TM_start = T_residue_list[count + 1]
count = count + 1
# assign leaflet to proper TM residues O or I
leaflet_dict = {}
for leaflet in ['O', 'I']:
leaflet_list = []
for TM_helix, TM_residues in TM_boundary_dict.items():
for residue_num in TM_residues:
tmhmm_seq_index = residue_num - 1
previous_residue = tmhmm_seq_index - 1
next_residue = tmhmm_seq_index + 1
# identify if the previous or next residue closest to the TM helix start/end is the proper leaflet
if tmhmm_seq[previous_residue] == leaflet or tmhmm_seq[next_residue] == leaflet:
leaflet_list.append(residue_num)
leaflet_dict.update({'tmhmm_leaflet_' + leaflet: leaflet_list})
return TM_boundary_dict, leaflet_dict | [
"nmih@ucsd.edu"
] | nmih@ucsd.edu |
bd1fddb4123c81763480c846e78233894f1f62fa | a9f44dfa5cca5ca52d2cc8d30a3d12bd614fdcad | /hangman/words.py | c7f4985ab523c1fc6a485c79c5ad684740cd281c | [] | no_license | Williams44T/12-python-projects | c6d065297fbdcde23dc66e25ed308c16af3475f9 | 9c527b7c80599ce3dedfc03a46f82807295691c4 | refs/heads/main | 2023-04-07T18:24:48.129748 | 2021-04-21T18:39:45 | 2021-04-21T18:39:45 | 357,683,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,132 | py | # a list of 5000(?) words pulled from a file on stackoverflow
words = ["aback","abaft","abandoned","abashed","aberrant","abhorrent","abiding","abject","ablaze","able","abnormal","aboard","aboriginal","abortive","abounding","abrasive","abrupt","absent","absorbed","absorbing","abstracted","absurd","abundant","abusive","acceptable","accessible","accidental","accurate","acid","acidic","acoustic","acrid","actually","ad hoc","adamant","adaptable","addicted","adhesive","adjoining","adorable","adventurous","afraid","aggressive","agonizing","agreeable","ahead","ajar","alcoholic","alert","alike","alive","alleged","alluring","aloof","amazing","ambiguous","ambitious","amuck","amused","amusing","ancient","angry","animated","annoyed","annoying","anxious","apathetic","aquatic","aromatic","arrogant","ashamed","aspiring","assorted","astonishing","attractive","auspicious","automatic","available","average","awake","aware","awesome","awful","axiomatic","bad","barbarous","bashful","bawdy","beautiful","befitting","belligerent","beneficial","bent","berserk","best","better","bewildered","big","billowy","bite-sized","bitter","bizarre","black","black-and-white","bloody","blue","blue-eyed","blushing","boiling","boorish","bored","boring","bouncy","boundless","brainy","brash","brave","brawny","breakable","breezy","brief","bright","bright","broad","broken","brown","bumpy","burly","bustling","busy","cagey","calculating","callous","calm","capable","capricious","careful","careless","caring","cautious","ceaseless","certain","changeable","charming","cheap","cheerful","chemical","chief","childlike","chilly","chivalrous","chubby","chunky","clammy","classy","clean","clear","clever","cloistered","cloudy","closed","clumsy","cluttered","coherent","cold","colorful","colossal","combative","comfortable","common","complete","complex","concerned","condemned","confused","conscious","cooing","cool","cooperative","coordinated","courageous","cowardly","crabby","craven","crazy","creepy","crooked","crowded","cruel","cuddly","cultured","cumbersome","curious","curly","curved","curvy","cut","cute","cute","cynical","daffy","daily","damaged","damaging","damp","dangerous","dapper","dark","dashing","dazzling","dead","deadpan","deafening","dear","debonair","decisive","decorous","deep","deeply","defeated","defective","defiant","delicate","delicious","delightful","demonic","delirious","dependent","depressed","deranged","descriptive","deserted","detailed","determined","devilish","didactic","different","difficult","diligent","direful","dirty","disagreeable","disastrous","discreet","disgusted","disgusting","disillusioned","dispensable","distinct","disturbed","divergent","dizzy","domineering","doubtful","drab","draconian","dramatic","dreary","drunk","dry","dull","dusty","dusty","dynamic","dysfunctional","eager","early","earsplitting","earthy","easy","eatable","economic","educated","efficacious","efficient","eight","elastic","elated","elderly","electric","elegant","elfin","elite","embarrassed","eminent","empty","enchanted","enchanting","encouraging","endurable","energetic","enormous","entertaining","enthusiastic","envious","equable","equal","erect","erratic","ethereal","evanescent","evasive","even","excellent","excited","exciting","exclusive","exotic","expensive","extra-large","extra-small","exuberant","exultant","fabulous","faded","faint","fair","faithful","fallacious","false","familiar","famous","fanatical","fancy","fantastic","far","far-flung","fascinated","fast","fat","faulty","fearful","fearless","feeble","feigned","female","fertile","festive","few","fierce","filthy","fine","finicky","first","five","fixed","flagrant","flaky","flashy","flat","flawless","flimsy","flippant","flowery","fluffy","fluttering","foamy","foolish","foregoing","forgetful","fortunate","four","frail","fragile","frantic","free","freezing","frequent","fresh","fretful","friendly","frightened","frightening","full","fumbling","functional","funny","furry","furtive","future","futuristic","fuzzy","gabby","gainful","gamy","gaping","garrulous","gaudy","general","gentle","giant","giddy","gifted","gigantic","glamorous","gleaming","glib","glistening","glorious","glossy","godly","good","goofy","gorgeous","graceful","grandiose","grateful","gratis","gray","greasy","great","greedy","green","grey","grieving","groovy","grotesque","grouchy","grubby","gruesome","grumpy","guarded","guiltless","gullible","gusty","guttural","habitual","half","hallowed","halting","handsome","handsomely","handy","hanging","hapless","happy","hard","hard-to-find","harmonious","harsh","hateful","heady","healthy","heartbreaking","heavenly","heavy","hellish","helpful","helpless","hesitant","hideous","high","highfalutin","high-pitched","hilarious","hissing","historical","holistic","hollow","homeless","homely","honorable","horrible","hospitable","hot","huge","hulking","humdrum","humorous","hungry","hurried","hurt","hushed","husky","hypnotic","hysterical","icky","icy","idiotic","ignorant","ill","illegal","ill-fated","ill-informed","illustrious","imaginary","immense","imminent","impartial","imperfect","impolite","important","imported","impossible","incandescent","incompetent","inconclusive","industrious","incredible","inexpensive","infamous","innate","innocent","inquisitive","insidious","instinctive","intelligent","interesting","internal","invincible","irate","irritating","itchy","jaded","jagged","jazzy","jealous","jittery","jobless","jolly","joyous","judicious","juicy","jumbled","jumpy","juvenile","kaput","keen","kind","kindhearted","kindly","knotty","knowing","knowledgeable","known","labored","lackadaisical","lacking","lame","lamentable","languid","large","last","late","laughable","lavish","lazy","lean","learned","left","legal","lethal","level","lewd","light","like","likeable","limping","literate","little","lively","lively","living","lonely","long","longing","long-term","loose","lopsided","loud","loutish","lovely","loving","low","lowly","lucky","ludicrous","lumpy","lush","luxuriant","lying","lyrical","macabre","macho","maddening","madly","magenta","magical","magnificent","majestic","makeshift","male","malicious","mammoth","maniacal","many","marked","massive","married","marvelous","material","materialistic","mature","mean","measly","meaty","medical","meek","mellow","melodic","melted","merciful","mere","messy","mighty","military","milky","mindless","miniature","minor","miscreant","misty","mixed","moaning","modern","moldy","momentous","motionless","mountainous","muddled","mundane","murky","mushy","mute","mysterious","naive","nappy","narrow","nasty","natural","naughty","nauseating","near","neat","nebulous","necessary","needless","needy","neighborly","nervous","new","next","nice","nifty","nimble","nine","nippy","noiseless","noisy","nonchalant","nondescript","nonstop","normal","nostalgic","nosy","noxious","null","numberless","numerous","nutritious","nutty","oafish","obedient","obeisant","obese","obnoxious","obscene","obsequious","observant","obsolete","obtainable","oceanic","odd","offbeat","old","old-fashioned","omniscient","one","onerous","open","opposite","optimal","orange","ordinary","organic","ossified","outgoing","outrageous","outstanding","oval","overconfident","overjoyed","overrated","overt","overwrought","painful","painstaking","pale","paltry","panicky","panoramic","parallel","parched","parsimonious","past","pastoral","pathetic","peaceful","penitent","perfect","periodic","permissible","perpetual","petite","petite","phobic","physical","picayune","pink","piquant","placid","plain","plant","plastic","plausible","pleasant","plucky","pointless","poised","polite","political","poor","possessive","possible","powerful","precious","premium","present","pretty","previous","pricey","prickly","private","probable","productive","profuse","protective","proud","psychedelic","psychotic","public","puffy","pumped","puny","purple","purring","pushy","puzzled","puzzling","quack","quaint","quarrelsome","questionable","quick","quickest","quiet","quirky","quixotic","quizzical","rabid","racial","ragged","rainy","rambunctious","rampant","rapid","rare","raspy","ratty","ready","real","rebel","receptive","recondite","red","redundant","reflective","regular","relieved","remarkable","reminiscent","repulsive","resolute","resonant","responsible","rhetorical","rich","right","righteous","rightful","rigid","ripe","ritzy","roasted","robust","romantic","roomy","rotten","rough","round","royal","ruddy","rude","rural","rustic","ruthless","sable","sad","safe","salty","same","sassy","satisfying","savory","scandalous","scarce","scared","scary","scattered","scientific","scintillating","scrawny","screeching","second","second-hand","secret","secretive","sedate","seemly","selective","selfish","separate","serious","shaggy","shaky","shallow","sharp","shiny","shivering","shocking","short","shrill","shut","shy","sick","silent","silent","silky","silly","simple","simplistic","sincere","six","skillful","skinny","sleepy","slim","slimy","slippery","sloppy","slow","small","smart","smelly","smiling","smoggy","smooth","sneaky","snobbish","snotty","soft","soggy","solid","somber","sophisticated","sordid","sore","sore","sour","sparkling","special","spectacular","spicy","spiffy","spiky","spiritual","spiteful","splendid","spooky","spotless","spotted","spotty","spurious","squalid","square","squealing","squeamish","staking","stale","standing","statuesque","steadfast","steady","steep","stereotyped","sticky","stiff","stimulating","stingy","stormy","straight","strange","striped","strong","stupendous","stupid","sturdy","subdued","subsequent","substantial","successful","succinct","sudden","sulky","super","superb","superficial","supreme","swanky","sweet","sweltering","swift","symptomatic","synonymous","taboo","tacit","tacky","talented","tall","tame","tan","tangible","tangy","tart","tasteful","tasteless","tasty","tawdry","tearful","tedious","teeny","teeny-tiny","telling","temporary","ten","tender","tense","tense","tenuous","terrible","terrific","tested","testy","thankful","therapeutic","thick","thin","thinkable","third","thirsty","thirsty","thoughtful","thoughtless","threatening","three","thundering","tidy","tight","tightfisted","tiny","tired","tiresome","toothsome","torpid","tough","towering","tranquil","trashy","tremendous","tricky","trite","troubled","truculent","true","truthful","two","typical","ubiquitous","ugliest","ugly","ultra","unable","unaccountable","unadvised","unarmed","unbecoming","unbiased","uncovered","understood","undesirable","unequal","unequaled","uneven","unhealthy","uninterested","unique","unkempt","unknown","unnatural","unruly","unsightly","unsuitable","untidy","unused","unusual","unwieldy","unwritten","upbeat","uppity","upset","uptight","used","useful","useless","utopian","utter","uttermost","vacuous","vagabond","vague","valuable","various","vast","vengeful","venomous","verdant","versed","victorious","vigorous","violent","violet","vivacious","voiceless","volatile","voracious","vulgar","wacky","waggish","waiting","wakeful","wandering","wanting","warlike","warm","wary","wasteful","watery","weak","wealthy","weary","well-groomed","well-made","well-off","well-to-do","wet","whimsical","whispering","white","whole","wholesale","wicked","wide","wide-eyed","wiggly","wild","willing","windy","wiry","wise","wistful","witty","woebegone","womanly","wonderful","wooden","woozy","workable","worried","worthless","wrathful","wretched","wrong","wry","yellow","yielding","young","youthful","yummy","zany","zealous","zesty","zippy","zonked","account","achiever","acoustics","act","action","activity","actor","addition","adjustment","advertisement","advice","aftermath","afternoon","afterthought","agreement","air","airplane","airport","alarm","amount","amusement","anger","angle","animal","ants","apparatus","apparel","appliance","approval","arch","argument","arithmetic","arm","army","art","attack","attraction","aunt","authority","babies","baby","back","badge","bag","bait","balance","ball","base","baseball","basin","basket","basketball","bat","bath","battle","bead","bear","bed","bedroom","beds","bee","beef","beginner","behavior","belief","believe","bell","bells","berry","bike","bikes","bird","birds","birth","birthday","bit","bite","blade","blood","blow","board","boat","bomb","bone","book","books","boot","border","bottle","boundary","box","boy","brake","branch","brass","breath","brick","bridge","brother","bubble","bucket","building","bulb","burst","bushes","business","butter","button","cabbage","cable","cactus","cake","cakes","calculator","calendar","camera","camp","can","cannon","canvas","cap","caption","car","card","care","carpenter","carriage","cars","cart","cast","cat","cats","cattle","cause","cave","celery","cellar","cemetery","cent","chalk","chance","change","channel","cheese","cherries","cherry","chess","chicken","chickens","children","chin","church","circle","clam","class","cloth","clover","club","coach","coal","coast","coat","cobweb","coil","collar","color","committee","company","comparison","competition","condition","connection","control","cook","copper","corn","cough","country","cover","cow","cows","crack","cracker","crate","crayon","cream","creator","creature","credit","crib","crime","crook","crow","crowd","crown","cub","cup","current","curtain","curve","cushion","dad","daughter","day","death","debt","decision","deer","degree","design","desire","desk","destruction","detail","development","digestion","dime","dinner","dinosaurs","direction","dirt","discovery","discussion","distance","distribution","division","dock","doctor","dog","dogs","doll","dolls","donkey","door","downtown","drain","drawer","dress","drink","driving","drop","duck","ducks","dust","ear","earth","earthquake","edge","education","effect","egg","eggnog","eggs","elbow","end","engine","error","event","example","exchange","existence","expansion","experience","expert","eye","eyes","face","fact","fairies","fall","fang","farm","fear","feeling","field","finger","finger","fire","fireman","fish","flag","flame","flavor","flesh","flight","flock","floor","flower","flowers","fly","fog","fold","food","foot","force","fork","form","fowl","frame","friction","friend","friends","frog","frogs","front","fruit","fuel","furniture","gate","geese","ghost","giants","giraffe","girl","girls","glass","glove","gold","government","governor","grade","grain","grandfather","grandmother","grape","grass","grip","ground","group","growth","guide","guitar","gun","hair","haircut","hall","hammer","hand","hands","harbor","harmony","hat","hate","head","health","heat","hill","history","hobbies","hole","holiday","home","honey","hook","hope","horn","horse","horses","hose","hospital","hot","hour","house","houses","humor","hydrant","ice","icicle","idea","impulse","income","increase","industry","ink","insect","instrument","insurance","interest","invention","iron","island","jail","jam","jar","jeans","jelly","jellyfish","jewel","join","judge","juice","jump","kettle","key","kick","kiss","kittens","kitty","knee","knife","knot","knowledge","laborer","lace","ladybug","lake","lamp","land","language","laugh","leather","leg","legs","letter","letters","lettuce","level","library","limit","line","linen","lip","liquid","loaf","lock","locket","look","loss","love","low","lumber","lunch","lunchroom","machine","magic","maid","mailbox","man","marble","mark","market","mask","mass","match","meal","measure","meat","meeting","memory","men","metal","mice","middle","milk","mind","mine","minister","mint","minute","mist","mitten","mom","money","monkey","month","moon","morning","mother","motion","mountain","mouth","move","muscle","name","nation","neck","need","needle","nerve","nest","night","noise","north","nose","note","notebook","number","nut","oatmeal","observation","ocean","offer","office","oil","orange","oranges","order","oven","page","pail","pan","pancake","paper","parcel","part","partner","party","passenger","payment","peace","pear","pen","pencil","person","pest","pet","pets","pickle","picture","pie","pies","pig","pigs","pin","pipe","pizzas","place","plane","planes","plant","plantation","plants","plastic","plate","play","playground","pleasure","plot","plough","pocket","point","poison","pollution","popcorn","porter","position","pot","potato","powder","power","price","produce","profit","property","prose","protest","pull","pump","punishment","purpose","push","quarter","quartz","queen","question","quicksand","quiet","quill","quilt","quince","quiver","rabbit","rabbits","rail","railway","rain","rainstorm","rake","range","rat","rate","ray","reaction","reading","reason","receipt","recess","record","regret","relation","religion","representative","request","respect","rest","reward","rhythm","rice","riddle","rifle","ring","rings","river","road","robin","rock","rod","roll","roof","room","root","rose","route","rub","rule","run","sack","sail","salt","sand","scale","scarecrow","scarf","scene","scent","school","science","scissors","screw","sea","seashore","seat","secretary","seed","selection","self","sense","servant","shade","shake","shame","shape","sheep","sheet","shelf","ship","shirt","shock","shoe","shoes","shop","show","side","sidewalk","sign","silk","silver","sink","sister","sisters","size","skate","skin","skirt","sky","slave","sleep","sleet","slip","slope","smash","smell","smile","smoke","snail","snails","snake","snakes","sneeze","snow","soap","society","sock","soda","sofa","son","song","songs","sort","sound","soup","space","spade","spark","spiders","sponge","spoon","spot","spring","spy","square","squirrel","stage","stamp","star","start","statement","station","steam","steel","stem","step","stew","stick","sticks","stitch","stocking","stomach","stone","stop","store","story","stove","stranger","straw","stream","street","stretch","string","structure","substance","sugar","suggestion","suit","summer","sun","support","surprise","sweater","swim","swing","system","table","tail","talk","tank","taste","tax","teaching","team","teeth","temper","tendency","tent","territory","test","texture","theory","thing","things","thought","thread","thrill","throat","throne","thumb","thunder","ticket","tiger","time","tin","title","toad","toe","toes","tomatoes","tongue","tooth","toothbrush","toothpaste","top","touch","town","toy","toys","trade","trail","train","trains","tramp","transport","tray","treatment","tree","trees","trick","trip","trouble","trousers","truck","trucks","tub","turkey","turn","twig","twist","umbrella","uncle","underwear","unit","use","vacation","value","van","vase","vegetable","veil","vein","verse","vessel","vest","view","visitor","voice","volcano","volleyball","voyage","walk","wall","war","wash","waste","watch","water","wave","waves","wax","way","wealth","weather","week","weight","wheel","whip","whistle","wilderness","wind","window","wine","wing","winter","wire","wish","woman","women","wood","wool","word","work","worm","wound","wren","wrench","wrist","writer","writing","yak","yam","yard","yarn","year","yoke","zebra","zephyr","zinc","zipper","zoo","accept","add","admire","admit","advise","afford","agree","alert","allow","amuse","analyse","announce","annoy","answer","apologise","appear","applaud","appreciate","approve","argue","arrange","arrest","arrive","ask","attach","attack","attempt","attend","attract","avoid","back","bake","balance","ban","bang","bare","bat","bathe","battle","beam","beg","behave","belong","bleach","bless","blind","blink","blot","blush","boast","boil","bolt","bomb","book","bore","borrow","bounce","bow","box","brake","branch","breathe","bruise","brush","bubble","bump","burn","bury","buzz","calculate","call","camp","care","carry","carve","cause","challenge","change","charge","chase","cheat","check","cheer","chew","choke","chop","claim","clap","clean","clear","clip","close","coach","coil","collect","colour","comb","command","communicate","compare","compete","complain","complete","concentrate","concern","confess","confuse","connect","consider","consist","contain","continue","copy","correct","cough","count","cover","crack","crash","crawl","cross","crush","cry","cure","curl","curve","cycle","dam","damage","dance","dare","decay","deceive","decide","decorate","delay","delight","deliver","depend","describe","desert","deserve","destroy","detect","develop","disagree","disappear","disapprove","disarm","discover","dislike","divide","double","doubt","drag","drain","dream","dress","drip","drop","drown","drum","dry","dust","earn","educate","embarrass","employ","empty","encourage","end","enjoy","enter","entertain","escape","examine","excite","excuse","exercise","exist","expand","expect","explain","explode","extend","face","fade","fail","fancy","fasten","fax","fear","fence","fetch","file","fill","film","fire","fit","fix","flap","flash","float","flood","flow","flower","fold","follow","fool","force","form","found","frame","frighten","fry","gather","gaze","glow","glue","grab","grate","grease","greet","grin","grip","groan","guarantee","guard","guess","guide","hammer","hand","handle","hang","happen","harass","harm","hate","haunt","head","heal","heap","heat","help","hook","hop","hope","hover","hug","hum","hunt","hurry","identify","ignore","imagine","impress","improve","include","increase","influence","inform","inject","injure","instruct","intend","interest","interfere","interrupt","introduce","invent","invite","irritate","itch","jail","jam","jog","join","joke","judge","juggle","jump","kick","kill","kiss","kneel","knit","knock","knot","label","land","last","laugh","launch","learn","level","license","lick","lie","lighten","like","list","listen","live","load","lock","long","look","love","man","manage","march","mark","marry","match","mate","matter","measure","meddle","melt","memorise","mend","mess up","milk","mine","miss","mix","moan","moor","mourn","move","muddle","mug","multiply","murder","nail","name","need","nest","nod","note","notice","number","obey","object","observe","obtain","occur","offend","offer","open","order","overflow","owe","own","pack","paddle","paint","park","part","pass","paste","pat","pause","peck","pedal","peel","peep","perform","permit","phone","pick","pinch","pine","place","plan","plant","play","please","plug","point","poke","polish","pop","possess","post","pour","practise","pray","preach","precede","prefer","prepare","present","preserve","press","pretend","prevent","prick","print","produce","program","promise","protect","provide","pull","pump","punch","puncture","punish","push","question","queue","race","radiate","rain","raise","reach","realise","receive","recognise","record","reduce","reflect","refuse","regret","reign","reject","rejoice","relax","release","rely","remain","remember","remind","remove","repair","repeat","replace","reply","report","reproduce","request","rescue","retire","return","rhyme","rinse","risk","rob","rock","roll","rot","rub","ruin","rule","rush","sack","sail","satisfy","save","saw","scare","scatter","scold","scorch","scrape","scratch","scream","screw","scribble","scrub","seal","search","separate","serve","settle","shade","share","shave","shelter","shiver","shock","shop","shrug","sigh","sign","signal","sin","sip","ski","skip","slap","slip","slow","smash","smell","smile","smoke","snatch","sneeze","sniff","snore","snow","soak","soothe","sound","spare","spark","sparkle","spell","spill","spoil","spot","spray","sprout","squash","squeak","squeal","squeeze","stain","stamp","stare","start","stay","steer","step","stir","stitch","stop","store","strap","strengthen","stretch","strip","stroke","stuff","subtract","succeed","suck","suffer","suggest","suit","supply","support","suppose","surprise","surround","suspect","suspend","switch","talk","tame","tap","taste","tease","telephone","tempt","terrify","test","thank","thaw","tick","tickle","tie","time","tip","tire","touch","tour","tow","trace","trade","train","transport","trap","travel","treat","tremble","trick","trip","trot","trouble","trust","try","tug","tumble","turn","twist","type","undress","unfasten","unite","unlock","unpack","untidy","use","vanish","visit","wail","wait","walk","wander","want","warm","warn","wash","waste","watch","water","wave","weigh","welcome","whine","whip","whirl","whisper","whistle","wink","wipe","wish","wobble","wonder","work","worry","wrap","wreck","wrestle","wriggle","x-ray","yawn","yell","zip","zoom"] | [
"williams44t@gmail.com"
] | williams44t@gmail.com |
42e748ffc45d9278916009d2483b54f316602368 | 7133de159c5cdc06b92bc5b168fe193caf0bea2a | /packages/grid_control/parameters/psource_data.py | f2f1003f2b7798eb21834106677ade5e27e87a17 | [] | no_license | thomas-mueller/grid-control | fac566c21bb79b0bd4439d36421a0c0b14bc8776 | 36f01d19b71c41c8dd55eddd190181db8849f920 | refs/heads/master | 2020-12-28T23:34:59.983357 | 2016-04-22T06:28:57 | 2016-04-22T06:28:57 | 56,689,010 | 0 | 0 | null | 2016-04-20T13:26:29 | 2016-04-20T13:26:29 | null | UTF-8 | Python | false | false | 4,408 | py | # | Copyright 2009-2016 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import os, time
from grid_control import utils
from grid_control.datasets import DataProvider
from grid_control.gc_exceptions import UserError
from grid_control.parameters.psource_base import ParameterSource
from python_compat import md5_hex
class DataParameterSource(ParameterSource):
def __init__(self, dataDir, srcName, dataProvider, dataSplitter, dataProc, keepOld = True):
ParameterSource.__init__(self)
(self._dataDir, self._srcName, self._dataProvider, self._dataSplitter, self._part_proc) = \
(dataDir, srcName, dataProvider, dataSplitter, dataProc)
if not dataProvider:
pass # debug mode - used by scripts - disables resync
elif os.path.exists(self.getDataPath('cache.dat') and self.getDataPath('map.tar')):
self._dataSplitter.importPartitions(self.getDataPath('map.tar'))
else:
DataProvider.saveToFile(self.getDataPath('cache.dat'), self._dataProvider.getBlocks(silent = False))
self._dataSplitter.splitDataset(self.getDataPath('map.tar'), self._dataProvider.getBlocks())
self._maxN = self._dataSplitter.getMaxJobs()
self._keepOld = keepOld
def getNeededDataKeys(self):
return self._part_proc.getNeededKeys(self._dataSplitter)
def getMaxParameters(self):
return self._maxN
def fillParameterKeys(self, result):
result.extend(self._part_proc.getKeys())
def fillParameterInfo(self, pNum, result):
splitInfo = self._dataSplitter.getSplitInfo(pNum)
self._part_proc.process(pNum, splitInfo, result)
def getHash(self):
return md5_hex(str(self._srcName) + str(self._dataSplitter.getMaxJobs()) + str(self.resyncEnabled()))
def show(self):
return ['%s: src = %s' % (self.__class__.__name__, self._srcName)]
def __repr__(self):
return 'data(%s)' % utils.QM(self._srcName == 'data', '', self._srcName)
def getDataPath(self, postfix):
return os.path.join(self._dataDir, self._srcName + postfix)
def resync(self):
(result_redo, result_disable, result_sizeChange) = ParameterSource.resync(self)
if self.resyncEnabled() and self._dataProvider:
# Get old and new dataset information
old = DataProvider.loadFromFile(self.getDataPath('cache.dat')).getBlocks()
self._dataProvider.clearCache()
new = self._dataProvider.getBlocks()
self._dataProvider.saveToFile(self.getDataPath('cache-new.dat'), new)
# Use old splitting information to synchronize with new dataset infos
jobChanges = self._dataSplitter.resyncMapping(self.getDataPath('map-new.tar'), old, new)
if jobChanges:
# Move current splitting to backup and use the new splitting from now on
def backupRename(old, cur, new):
if self._keepOld:
os.rename(self.getDataPath(cur), self.getDataPath(old))
os.rename(self.getDataPath(new), self.getDataPath(cur))
backupRename( 'map-old-%d.tar' % time.time(), 'map.tar', 'map-new.tar')
backupRename('cache-old-%d.dat' % time.time(), 'cache.dat', 'cache-new.dat')
old_maxN = self._dataSplitter.getMaxJobs()
self._dataSplitter.importPartitions(self.getDataPath('map.tar'))
self._maxN = self._dataSplitter.getMaxJobs()
result_redo.update(jobChanges[0])
result_disable.update(jobChanges[1])
result_sizeChange = result_sizeChange or (old_maxN != self._maxN)
self.resyncFinished()
return (result_redo, result_disable, result_sizeChange)
def create(cls, pconfig = None, src = 'data'): # pylint:disable=arguments-differ
if src not in DataParameterSource.datasetsAvailable:
raise UserError('Dataset parameter source "%s" not setup!' % src)
result = DataParameterSource.datasetsAvailable[src]
DataParameterSource.datasetsUsed.append(result)
return result
create = classmethod(create)
DataParameterSource.datasetsAvailable = {}
DataParameterSource.datasetsUsed = []
ParameterSource.managerMap['data'] = 'DataParameterSource'
| [
"stober@cern.ch"
] | stober@cern.ch |
751ea6fcb29344879059d248d2c4a189ecaaeddb | 92ce549784dd4a7a7b42d453c1aa0d874e1cf907 | /selenium_driver.py | 6f0f24ac5b44b6f1b0f41cabfdd39cfe6c4a2eda | [] | no_license | joaovb/python-examples | 751a3daf7d6b14f448397ffcc4c7180781165706 | c2896dfcea5dfa47799ecbf105cff12342407b5c | refs/heads/master | 2020-05-19T20:30:30.751557 | 2019-05-06T13:33:35 | 2019-05-06T13:33:35 | 185,203,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | import time
from selenium import webdriver
driver = webdriver.Chrome("C:/Pyautomators/drivers/chromedriver.exe")
driver.get("https://google.com.br")
print(driver.title)
time.sleep(8)
driver.quit()
| [
"jvictorr@indracompany.com"
] | jvictorr@indracompany.com |
85907292c2ef3fca5acf6f665fb89a56d1f2d833 | 1cb2b065ac33f66a1041c1bf56bb7ecd8d3d6977 | /constants.py | aee5071d738c056a575260eaeb620bbeb898c6bc | [] | no_license | hjlloveff/function-write | 2ab2cb478fd5ebab07d089f456795865e9d4c056 | 62673228ef8580199eb656796e20850d64809212 | refs/heads/master | 2020-03-19T10:57:26.363089 | 2018-04-13T07:49:36 | 2018-04-13T07:49:36 | 136,416,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # -*- coding: utf-8 -*-
ENV_CONTENT_SERVER = 'CONTENT_SERVER'
ENV_CONTENT_KEY = 'CONTENT_KEY'
CFG_CONTENT_SERVER = 'server_url'
CFG_CONTENT_KEY = 'api_key'
CFG_WEATHER_TEMPLATE = 'weather_template'
CFG_SOCCER_TEMPLATE = 'soccer_template'
CFG_TIMEINFO_TEMPLATE = 'timeinfo_template'
CONTENT_SERVER_DEFAULT = 'https://content.emotibot.com'
CONTENT_KEY_DEFAULT = '2WDGS5SCH68RWDLC76BI9J6CZEKJM5QM'
DATADOG_HOST = '172.17.0.1'
DATADOG_PORT = 8125
DATADOG_NAMESPACE = 'emotibot'
DATADOG_TAGS = ['module:robotwriter']
DATADOG_USE_MS = True
| [
"mikechiu@emotibot.com"
] | mikechiu@emotibot.com |
697feaa0dac075b4ea26728acb624472073f183e | 4042de53b4afc486ae7a7d59383d30b6a96f2e1e | /.venv/bin/django-admin | e259cc1172a82763066558944257a0118163df3a | [] | no_license | alesalg/Customer-Manager | 3c0d5da25985051fc8e0f812765f499f9b9054ee | 6621a3a61ef8b450f9aa77d3adad3521c2239904 | refs/heads/master | 2023-08-05T12:59:14.157871 | 2021-09-19T13:48:27 | 2021-09-19T13:48:27 | 395,099,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | #!/home/alexandre/Documentos/Projetos/Python/crm/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"allesalg@gmail.com"
] | allesalg@gmail.com | |
8cbd47f6b0e64d3a5da4f3a2a5dd1093b20b0441 | d194b4c7e53f01a2dcfbc39f034ede6fa90308cd | /com/pdf_word_test/test.py | 46fe3c51204744660d80ba5c3cbf1f9dfd5aa5dd | [] | no_license | quan327886341/test | 3d09dbbbb1da5ff99792b214032fb51c703f5761 | dcd612bf2c1937bd0bc77bbf16e0fea685246953 | refs/heads/master | 2020-04-18T01:39:49.870213 | 2019-10-17T07:18:47 | 2019-10-17T07:18:47 | 167,128,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | import PyPDF2
# pdfFileObj = open('meetingminutes.pdf', 'rb')
# pdfFileObj = open('encrypted.pdf', 'rb')
# pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# print(pdfReader.isEncrypted)
# pdfReader.decrypt('rosebud')
# print(pdfReader.numPages)
# pageObj = pdfReader.getPage(0)
# print(pageObj.extractText())
# pdf1File = open('meetingminutes.pdf', 'rb')
# pdf2File = open('meetingminutes2.pdf', 'rb')
# pdf1Reader = PyPDF2.PdfFileReader(pdf1File)
# pdf2Reader = PyPDF2.PdfFileReader(pdf2File)
# pdfWriter = PyPDF2.PdfFileWriter()
#
# for pageNum in range(pdf1Reader.numPages):
# pageObj = pdf1Reader.getPage(pageNum)
# pdfWriter.addPage(pageObj)
#
# for pageNum in range(pdf2Reader.numPages):
# pageObj = pdf2Reader.getPage(pageNum)
# if pageNum < 3:
# pageObj.rotateClockwise(90)
# pdfWriter.addPage(pageObj)
#
# pdfOutputFile = open('combinedminutes.pdf', 'wb')
# pdfWriter.write(pdfOutputFile)
# pdfOutputFile.close()
# pdf1File.close()
# pdf2File.close()
minutesFile = open('meetingminutes.pdf', 'rb')
pdf1Reader = PyPDF2.PdfFileReader(minutesFile)
minutesFirstPage = pdf1Reader.getPage(0)
pdfWatermarkReader = PyPDF2.PdfFileReader(open('watermark.pdf', 'rb'))
minutesFirstPage.mergePage(pdfWatermarkReader.getPage(0))
pdfWriter = PyPDF2.PdfFileWriter()
pdfWriter.addPage(minutesFirstPage)
for pageNum in range(1, pdf1Reader.numPages):
pageObj = pdf1Reader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfWriter.encrypt('quan1984')
resultPdfFile = open('watermarkedCover.pdf', 'wb')
pdfWriter.write(resultPdfFile)
minutesFile.close()
minutesFile.close()
| [
"hanyouquan@MacBook-Pro-3.local"
] | hanyouquan@MacBook-Pro-3.local |
c0813fd49ac616816e2eb56f488dfe6c1fa96928 | d70a0b001587c732f2ba7a436bcd106d4bd6e034 | /matrix.py | 8ec1398a6a77d3ce7567b28905ab6b66ca39da38 | [] | no_license | babiperina/page_rank | e7ea469c1cac04ddc7fce1e03a90ce1d278f8d74 | d0c8b9824e9eabe48c78d987d814ade035fd800e | refs/heads/master | 2020-09-29T07:27:40.883066 | 2019-12-09T23:17:04 | 2019-12-09T23:17:04 | 226,986,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,657 | py | import math
from abstracts.abstract_matrix import AbstractMatrix
class Matrix(AbstractMatrix):
"""
This class was used to represent a matrix
by:Bárbara Perina
"""
MSG_DIFFERENT_SIZES = "The matrixs doesn't have the same size"
def __len__(self):
return len(self.data)
def __getitem__(self, key):
i, j = key
return self.data[(j - 1) + (i - 1) * self.cols]
def __setitem__(self, key, value):
try:
i, j = key
self.data[(j - 1) + (i - 1) * self.cols] = value
except:
print(Exception, " occurred")
def __repr__(self):
return str(self)
def __str__(self):
matrix = "";
for row in range(1,self.rows+1):
for col in range(1,self.cols+1):
# matrix = matrix + "(row:" + str(row+1) + " column:" + str(col+1) + "): "
matrix = matrix + str(self[row,col]) + "\t"
matrix = matrix + "\n"
return matrix
# other + matrix
def __radd__(self, other):
return self + other
# matrix + other
def __add__(self, other):
res = Matrix(self.rows, self.cols)
if(type(other) == Matrix):
# print("Add self.matrix with: \n" + str(other))
if self.rows != other.rows or self.cols != other.cols:
print(self.MSG_DIFFERENT_SIZES)
return "error __add__ matrix"
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = self[i, j] + other[i, j]
else:
# print("Add self.matrix with a scale number: " + other)
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = self[i, j] + other
return res
# other - matrix
def __rsub__(self, other):
return self - other
# matrix - other
def __sub__(self, other):
res = Matrix(self.rows, self.cols)
if(type(other) == Matrix):
# print("Sub self.matrix with: \n" + str(other))
if self.rows != other.rows or self.cols != other.cols:
print(self.MSG_DIFFERENT_SIZES)
return "error __sub__ matrix"
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = self[i, j] - other[i, j]
else:
# print("Sub self.matrix with a scale number: " + other)
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = self[i, j] - other
return res
# other * matrix
def __rmul__(self, other):
return self * other
# matriz * other
def __mul__(self, other):
res = Matrix(self.rows, self.cols)
if (type(other) == Matrix):
# print("Mul self.matrix with: \n" + str(other))
if self.rows != other.rows or self.cols != other.cols:
print(self.MSG_DIFFERENT_SIZES)
return "error __mul__ matrix"
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = self[i, j] * other[i, j]
else:
# print("Mul self.matrix with a scale number: " + other)
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = self[i, j] * other
return res
# other / matriz
def __rtruediv__(self, other):
return self / other
# matriz / other
def __truediv__(self, other):
res = Matrix(self.rows, self.cols)
if (type(other) == Matrix):
# print("Truediv self.matrix with: \n" + str(other))
if self.rows != other.rows or self.cols != other.cols:
print(self.MSG_DIFFERENT_SIZES)
return "error __truediv__ matrix"
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = self[i, j] / other[i, j]
else:
# print("Truediv self.matrix with a scale number: " + other)
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = self[i, j] / other
return res
# self * other
def dot(self, other):
if(type(other) == Matrix):
if(self.cols != other.rows):
return "matrix A must be the same amount of columns as amount matrix B rows amount"
res = Matrix(self.rows, other.cols)
for col in range(1, self.cols + 1):
for row in range(1, self.rows + 1):
for othercol in range(1, other.cols + 1):
res[row, othercol] += self[row, col] * other[col, othercol]
return res
else:
return "the dot method only works between matrixs"
# other * self
def rdot(self, other):
if(type(other) == Matrix):
if(self.rows != other.cols):
return "matrix A must be the same amount of columns as amount matrix B rows amount"
res = Matrix(self.rows, other.cols)
for col in range(1, other.cols + 1):
for row in range(1, other.rows + 1):
for selfcol in range(1, self.cols + 1):
res[row, selfcol] += other[row, col] * self[col, selfcol]
return res
else:
return "the dot method only works between matrixs"
def transpose(self):
res = Matrix(self.cols, self.rows)
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[j, i] = self[i, j]
return res
def gauss_jordan(self):
"""Aplica o algoritmo de Gauss Jordan na matriz
Aplica o método de Gauss-Jordan na matriz corrente. Pode ser utilizado para resolver
um sistema de equações lineares, calcular matrix inversa, etc.
"Returns:
Retorna a matrix resultante da operação, por exemplo:
#> a = Matrix(3,4,[1, -2, 1, 0, 0, 2, -8, 8, 5, 0, -5, 10])
#> a
1.0000 -2.0000 1.0000 0.0000
0.0000 2.0000 -8.0000 8.0000
5.0000 0.0000 -5.0000 10.0000
#> c = a.gauss_jordan()
#> c
1.0000 0.0000 0.0000 1.0000
0.0000 1.0000 0.0000 0.0000
0.0000 0.0000 1.0000 -1.0000
"""
pass
def inverse(self):
"""Calcula a matriz inversa da matriz corrente
Realiza o calculo da matrix inversa utilizando o algoritmo de Gauss-Jordan.
"Returns:
Retorna a matrix resultante da operação, por exemplo:
#> a = Matrix(2,2,[1, 2, 3, 4])
#> a
1.0000 -2.0000 1.0000 0.0000
0.0000 2.0000 -8.0000 8.0000
5.0000 0.0000 -5.0000 10.0000
#> c = a.inverse()
#> c
-2.0000 1.0000
1.5000 -0.5000
"""
pass
def float_format(self):
res = Matrix(self.rows, self.cols)
for i in range(1, self.rows + 1):
for j in range(1, self.cols + 1):
res[i, j] = float("{0:.2f}".format(self[i, j]))
return res
def normalized (self):
vector = self.data
soma = 0
for v in vector:
square = v*v
soma+=square
res = math.sqrt(soma)
return res
def selfmodule(self):
module = 0
for d in self.data:
module = module + (d*d)
# print(module)
module = math.sqrt(module)
return module
def module(self, eigenvector):
# print(eigenvector)
module = 0
for d in eigenvector.data:
module = module + (d*d)
module = math.sqrt(module)
return module
def get_matrix_bigger_element(self):
bigger = self.data[0]
for element in self.data:
if element > bigger:
bigger = element
return bigger
def eigen(self):
A = self
eigenvalues = []
eigenvectors = []
for x in range(0,3):
B = A.power_method()
eigenvalue = B[0]
eigenvector = B[1]
eigenvalues.append(B[0])
eigenvectors.append(B[1])
# print('eigenvalue: \n' + str(eigenvalue))
# print('eigenvector: \n' + str(eigenvector))
A = A.deflation(eigenvalue, eigenvector)
return eigenvalues, eigenvectors
def power_method(self):
MAX_ITERATIONS = 7
MATRIX_SIZE = self.rows
ITERATION = 1
data = [1] * (MATRIX_SIZE * 1)
matriz_inicial = Matrix(MATRIX_SIZE, 1, data)
while ITERATION <= MAX_ITERATIONS:
# print('ITERAÇÃO ' + str(ITERATION))
# print('A= \n'+ str(self))
# print('\nx= \n' + str(matriz_inicial))
x = self.dot(matriz_inicial)
x = (1/x.get_matrix_bigger_element()) * x
# print('x'+str(ITERATION)+'= ' + str(x))
matriz_inicial = x
ITERATION += 1
x = self.dot(matriz_inicial)
#get eigenvalue
eigenvalue = x.get_matrix_bigger_element()
eigenvalue = float("{0:.2f}".format(eigenvalue))
# print('eigenvalue= ' + str(eigenvalue))
#get eigenvector
eigenvector = (1 / x.get_matrix_bigger_element()) * x
eigenvector = eigenvector.float_format()
# print('eigenvector= \n' + str(eigenvector))
return eigenvalue, eigenvector
def deflation(self, eigenvalue, eigenvector):
module = self.module(eigenvector)
# print(module)
eigenvector = eigenvector/module
result = eigenvalue*(eigenvector.dot(eigenvector.transpose()))
B = self - result
return B
def get_vetor_autoridades(self):
print("------ENTRANDO NO MÉTODO GET VETOR AUTORIDADES------")
At = self.transpose()
print("MATRIZ TRANSPOSTA: \n" + str(At))
h = At.get_vetor_centros()
return h
def get_vetor_centros(self):
print("------ENTRANDO NO MÉTODO GET VETOR CENTROS------")
print("------CALCULANDO A SOMA DAS LINHAS DA MATRIZ ACIMA------")
a = []
for i in range(1,self.rows+1):
soma = 0
for j in range(1,self.cols+1):
print("["+str(i)+","+str(j)+"]" + str(self[i,j]))
soma+=self[i,j]
a.append(soma)
# print("VETOR A: \n" + str(a))
return a
def pagerank(self):
print("------ENTRANDO NO MÉTODO PAGE RANK------")
print("MATRIZ DE ADJACÊNCIA: \n" + str(self))
h = self.get_vetor_centros()
print("VETOR h0: \n" + str(h))
a = self.get_vetor_autoridades()
print("VETOR a0: \n" + str(a))
MAX_ITERATIONS = self.rows+self.cols
ITERATION = 1
a = Matrix(len(a), 1, a)
while ITERATION <= MAX_ITERATIONS:
if ITERATION>1:
a = Matrix(a.rows, 1, a.data)
Aa = self.dot(a)
h = Aa / Aa.selfmodule() # h
print("MATRIZ h"+str(ITERATION)+": \n" + str(h))
a = self.transpose().dot(h) / (self.transpose().dot(h)).selfmodule() # a
print("MATRIZ a"+str(ITERATION)+": \n" + str(a))
ITERATION += 1
return a | [
"babiperina@MacBook-Pro-de-Barbara.local"
] | babiperina@MacBook-Pro-de-Barbara.local |
3c195b3da62260fc1e84d2ad053e5d8789c58061 | 61cef52a05bc9669a83ee0fc2e7e73352aef3110 | /pythonAssignment/dataTypes/1st.py | 24ecd772bac1b781d40afecf6834da7b5c487885 | [] | no_license | Maadaan/IW-pythonAssignment | f1b501243928535c1ed21a53c779ee6b9d5d4ea5 | 164312ae6697fc73bbe567b1e455e7c6600b8ef9 | refs/heads/master | 2022-11-07T18:47:54.939217 | 2020-06-27T11:52:01 | 2020-06-27T11:52:01 | 275,361,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
Write a Python program to count the number of characters (character
frequency) in a string. Sample String : google.com'
Expected Result : {'o': 3, 'g': 2, '.': 1, 'e': 1, 'l': 1, 'm': 1, 'c': 1}
"""
strings = 'google.com'
countTheWord = {}
for char in strings:
count = 0
for characters in strings:
if char == characters:
count = count + 1
countTheWord[char] = count
print(countTheWord) | [
"chapagainmadan7@gmail.com"
] | chapagainmadan7@gmail.com |
74cdb6bab6b4969197a34fbfee92461b69a5b9b5 | e02f9a40efaa75aae10d57a52372c1d4758e1a1a | /ZameenScraper/ZameenScraper/middlewares.py | 2617f2c03cfc97fb1f4403a9e98d9fccf6265aef | [] | no_license | Usman-Ghani-Mughal/Zameen.com-Scraper | 75b9e722c08ed30cf05bcdf0711bd20e21d5c740 | bf80de14cd62249d06fdc07fd5838f0a630dfe35 | refs/heads/master | 2023-05-11T00:59:47.119704 | 2021-05-30T09:39:06 | 2021-05-30T09:39:06 | 372,174,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,662 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class ZameenscraperSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ZameenscraperDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"usmangm087@gmail.com"
] | usmangm087@gmail.com |
705f4bcf20babd9e198ff784dd9dade6e2ffb566 | eb7b0c71c5bddec430b82a4f7eb3d7d963b15c2c | /displacement.py | fd4339ef7acfbf5db2b853813f4bdb3f5e8a08dd | [] | no_license | fbolanos/ImagingAnalysis | f8a116f511547942b08ab3c154bbad47754e54e1 | 0c178f779186ae3110c8eb26765dd420610c0e46 | refs/heads/master | 2021-01-10T12:12:57.416689 | 2015-09-27T22:03:09 | 2015-09-27T22:03:09 | 43,264,915 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,404 | py | from filter import *
from math import pow, sqrt
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2
base_dir = ["/media/user/DataFB/AutoHeadFix_Data/0731/EL_LRL/",
"/media/user/DataFB/AutoHeadFix_Data/0730/EL_LRL_fluc/",
"/media/user/DataFB/AutoHeadFix_Data/0806/EL_LRL/",
"/media/user/DataFB/AutoHeadFix_Data/0812/EP_LRL/",
"/media/user/DataFB/AutoHeadFix_Data/0618/"]
mice = ["1312000377", "1302000245", "1312000300", "2015050202", "2015050115"]
master_filenames = ["/media/user/DataFB/AutoHeadFix_Data/377_master.raw",
"/media/user/DataFB/AutoHeadFix_Data/245_master.raw",
"/media/user/DataFB/AutoHeadFix_Data/300_master.raw",
"/media/user/DataFB/AutoHeadFix_Data/202_master.raw",
"/media/user/DataFB/AutoHeadFix_Data/115_master.raw"]
#mice = ["1312000377", "2015050115", "1302000245", "1312000159", "1312000300", "2015050202", "1312000573"]
frame_oi = 400
limit_time = 1438365391.603202
class Position:
def __init__(self, dx, dy):
self.dx = dx
self.dy = dy
self.dd = sqrt(pow(dx, 2) + pow(dy, 2))
def get_file_list(base_dir, mouse):
print base_dir
lof = []
lofilenames = []
for root, dirs, files in os.walk(base_dir):
for file in files:
if (file.endswith(".raw") or file.endswith(".mp4")) and mouse in file:
#print os.path.join(root, file)
#if check_time_is_valid(file):
# lof.append((os.path.join(root, file)))
# lofilenames.append(file)
#else:
# print file
lof.append((os.path.join(root, file)))
lofilenames.append(file)
return lof, lofilenames
def check_time_is_valid(video_file):
time = video_file[12:-4]
if float(time) <= limit_time:
return False
else:
return True
def get_video_frames(lof):
list_of_trial_frames = []
for video_file in lof:
print "Getting frames: " + video_file
frames = get_frames(video_file)
frames = frames[:800, :, :]
print np.shape(frames)
if frames.shape[0] >= 800:
list_of_trial_frames.append(frames)
else:
print "Did not add this file, not enough frames."
print np.shape(list_of_trial_frames)
return list_of_trial_frames
def get_all_processed_frames(lof):
list_of_trial_frames = []
for video_file in lof:
print "Getting frames: " + video_file
frames = get_processed_frames(video_file)
print np.shape(frames)
list_of_trial_frames.append(frames)
print np.shape(list_of_trial_frames)
return list_of_trial_frames
def find_min_ref(lor):
curr_min = 100
print np.shape(lor)
for positions in lor:
sum = 0
for position in positions:
sum += position.dd
print sum
if curr_min > sum:
curr_min = sum
curr_min_positions = positions
print curr_min
return curr_min_positions
def get_distance_var_mp4(all_frames):
filtered_frames = []
print "Filtering the frame of interest for all trials..."
for frames in all_frames:
filtered_frames.append(filter2_test(frames, frame_oi))
print "Getting all the distances.."
# Get all the distances using all videos as ref point, thus size of matrix is n^2
list_of_ref = []
for frame_ref in filtered_frames:
list_of_positions = []
res_trials = parmap.map(image_registration.chi2_shift, filtered_frames, frame_ref)
# res_trials is array of trials * [dx, dy, edx, edy]
for res in res_trials:
list_of_positions.append(Position(res[0], res[1]))
#for frame in filtered_frames:
# dx, dy, edx, edy = image_registration.chi2_shift(frame_ref, frame)
# list_of_positions.append(Position(dx, dy))
list_of_ref.append(list_of_positions)
print "Finding the min..."
list_of_positions = find_min_ref(list_of_ref)
return list_of_positions
def get_distance_var(lof):
list_of_frames = np.asarray(get_video_frames(lof), dtype=np.uint8)
print "Filtering the frame of interest for all trials..."
# Filter the frame of interest to make vessels obvious.
filtered_frames = []
for frames in list_of_frames:
filtered_frames.append(filter2_test(frames, frame_oi))
print "Getting all the distances.."
# Get all the distances using all videos as ref point, thus size of matrix is n^2
list_of_ref = []
for frame_ref in filtered_frames:
list_of_positions = []
res_trials = parmap.map(image_registration.chi2_shift, filtered_frames, frame_ref)
# res_trials is array of trials * [dx, dy, edx, edy]
for res in res_trials:
list_of_positions.append(Position(res[0], res[1]))
#for frame in filtered_frames:
# dx, dy, edx, edy = image_registration.chi2_shift(frame_ref, frame)
# list_of_positions.append(Position(dx, dy))
list_of_ref.append(list_of_positions)
print "Finding the min..."
list_of_positions = find_min_ref(list_of_ref)
return list_of_positions
zz
def get_distance_var_from_master_frame(all_frames, master_frames):
filtered_frames_oi = []
list_of_positions = []
print "Getting frame of interest for all frames."
for frames in all_frames:
filtered_frames_oi.append(filter2_test(frames, frame_oi))
master_frame = master_frames[frame_oi]
print "Getting distances relative to the master frame"
res_trials = parmap.map(image_registration.chi2_shift, filtered_frames_oi, master_frame)
for res in res_trials:
list_of_positions.append(Position(res[0], res[1]))
return list_of_positions
class MouseInfo:
def __init__(self, tag, p2p_x, p2p_y, avg_x, avg_y, n_trials):
self.tag = tag
self.p2p_x = p2p_x
self.p2p_y = p2p_y
self.avg_x = avg_x
self.avg_y = avg_y
self.n_trials = n_trials
def p2p(arr):
return max(arr)-min(arr)
def do_it_all():
list_mouse_info = []
for mouse in mice:
lof, lofilenames = get_file_list(base_dir, mouse)
print "Lof: ", lof
lop = get_distance_var(lof)
dx_trials = []
dy_trials = []
for position in lop:
dx_trials.append(position.dx)
for position in lop:
dy_trials.append(position.dy)
peak_x = p2p(dx_trials)
peak_y = p2p(dy_trials)
avg_x = np.mean(dx_trials)
avg_y = np.mean(dy_trials)
list_mouse_info.append(MouseInfo(mouse, peak_x, peak_y, avg_x, avg_y, len(lop)))
with open(base_dir+"data.tsv", "w") as file:
file.write("Tag\tp2p_x\tavg_x\tp2p_y\tavg_y\tn_trials\n")
for info in list_mouse_info:
file.write(info.tag + "\t" + str(info.p2p_x) + "\t" + str(info.avg_x) + "\t" + str(info.p2p_y) + "\t" + str(info.avg_y) + "\t" + str(info.n_trials) + "\n")
print "Done it all!"
def process_frames(frames, freq, mouse, dir):
print "Fixing paint.."
mouse = mouse[-3:]
mask = 0
with open(dir+mouse+"_paint_mask.raw", "rb") as file:
mask = np.fromfile(file, dtype=np.float32)
mask = mask.byteswap()
indices = np.squeeze((mask > 0).nonzero())
paint_frames = np.zeros((frames.shape[0], len(indices)))
frames = np.reshape(frames, (frames.shape[0], width*height))
for i in range(frames.shape[0]):
paint_frames[i, :] = frames[i, indices]
print np.shape(paint_frames)
mean_paint = np.mean(paint_frames, axis=1)
mean_paint /= np.mean(mean_paint)
print np.shape(mean_paint)
frames = np.divide(frames.T, mean_paint)
frames = frames.T
frames = np.reshape(frames, (frames.shape[0], width, height))
print "Calculating mean..."
avg_pre_filt = calculate_avg(frames)
print "Temporal filter... ", freq.low_limit, "-", freq.high_limit, "Hz"
frames = cheby_filter(frames, freq.low_limit, freq.high_limit)
frames += avg_pre_filt
print "Calculating DF/F0..."
frames = calculate_df_f0(frames)
print "Applying MASKED GSR..."
#frames = gsr(frames)
frames = masked_gsr(frames, dir+mouse+"_mask.raw")
#print "Getting SD map..."
#sd = standard_deviation(frames)
return frames
def shift_frames(frames, positions):
print positions.dx, positions.dy
print frames.shape
for i in range(len(frames)):
frames[i] = image_registration.fft_tools.shift2d(frames[i], positions.dx, positions.dy)
return frames
def align_frames(mouse, dir, freq):
lofiles, lofilenames = get_file_list(dir+"Videos/", mouse)
print lofilenames
lop = get_distance_var(lofiles)
all_frames = np.asarray(get_video_frames(lofiles), dtype=np.uint8)
print "Alligning all video frames..."
all_frames = parmap.starmap(shift_frames, zip(all_frames, lop))
## for i in range(len(lop)):
## for frame in all_frames[i]:
## frame = image_registration.fft_tools.shift2d(frame, lop[i].dx, lop[i].dy)
print np.shape(all_frames)
count = 0
new_all_frames = parmap.map(process_frames, all_frames, freq, mouse, dir)
'''
for frames in all_frames:
print np.shape(frames)
save_to_file("Green/"+lofilenames[count][:-4]+"_aligned.raw", frames, np.float32)
print "Calculating mean..."
avg_pre_filt = calculate_avg(frames)
print "Temporal filter..."
frames = cheby_filter(frames)
frames += avg_pre_filt
save_to_file("Green/Cheby/"+lofilenames[count][:-4]+"_BPFilter_0.1-1Hz.raw", frames, np.float32)
print "Calculating DF/F0..."
frames = calculate_df_f0(frames)
save_to_file("Green/DFF/"+lofilenames[count][:-4]+"_DFF.raw", frames, np.float32)
print "Applying MASKED GSR..."
#frames = gsr(frames)
frames = masked_gsr(frames, save_dir+"202_mask.raw")
save_to_file("Green/GSR/"+lofilenames[count][:-4]+"_GSR.raw", frames, np.float32)
print "Getting SD map..."
sd = standard_deviation(frames)
save_to_file("Green/SD_maps/"+lofilenames[count][:-4]+"_SD.raw", frames, np.float32)
new_all_frames.append(frames)
count += 1
'''
print "Creating array..."
new_all_frames = np.asarray(new_all_frames, dtype=np.float32)
all_frames = np.asarray(all_frames, dtype=np.float32)
print "Joining Files..."
new_all_frames = np.reshape(new_all_frames,
(new_all_frames.shape[0]*new_all_frames.shape[1],
new_all_frames.shape[2],
new_all_frames.shape[3]))
all_frames = np.reshape(all_frames,
(all_frames.shape[0]*all_frames.shape[1],
all_frames.shape[2],
all_frames.shape[3]))
print "Shapes: "
print np.shape(all_frames)
print np.shape(new_all_frames)
where_are_NaNs = np.isnan(new_all_frames)
new_all_frames[where_are_NaNs] = 0
save_to_file("FULL_conc.raw", new_all_frames, np.float32)
save_to_file("conc_RAW.raw", all_frames, np.float32)
sd = standard_deviation(new_all_frames)
save_to_file("FULL_SD.raw", sd, np.float32)
print "Displaying correlation map..."
mapper = CorrelationMapDisplayer(new_all_frames)
mapper.display('spectral', 0.0, 1.0)
def process_frames_evoked(frames, freq, mouse, dir):
print "Fixing paint.."
mouse = mouse[-3:]
mask = 0
with open(dir+mouse+"_paint_mask.raw", "rb") as file:
mask = np.fromfile(file, dtype=np.float32)
mask = mask.byteswap()
indices = np.squeeze((mask > 0).nonzero())
paint_frames = np.zeros((frames.shape[0], len(indices)))
frames = np.reshape(frames, (frames.shape[0], width*height))
for i in range(frames.shape[0]):
paint_frames[i, :] = frames[i, indices]
print np.shape(paint_frames)
mean_paint = np.mean(paint_frames, axis=1)
mean_paint /= np.mean(mean_paint)
print np.shape(mean_paint)
print "Calculating mean..."
avg_pre_filt = calculate_avg(frames)
print "Temporal filter... ", freq.low_limit, "-", freq.high_limit, "Hz"
frames = cheby_filter(frames, freq.low_limit, freq.high_limit)
frames += avg_pre_filt
print "Calculating DF/F0..."
frames = calculate_df_f0(frames)
print "Applying MASKED GSR..."
frames = gsr(frames)
frames = masked_gsr(frames, dir+mouse+"_mask.raw")
return frames
def get_evoked_map(mouse, dir, master_frames_filename, freq):
lofiles, lofilenames = get_file_list(dir+"Videos/", mouse)
print lofilenames
#all_frames = np.asarray(get_video_frames(lofiles), dtype=np.float32)
'''
master_frames = get_frames(master_frames_filename)
lop = get_distance_var_from_master_frame(all_frames, master_frames)
print "Alligning all video frames..."
all_frames = parmap.starmap(shift_frames, zip(all_frames, lop))
all_frames = np.asarray(all_frames, dtype=np.float32)
print np.shape(all_frames)
'''
lop = get_distance_var(lofiles)
all_frames = np.asarray(get_video_frames(lofiles), dtype=np.float32)
print "Alligning all video frames..."
all_frames = parmap.starmap(shift_frames, zip(all_frames, lop))
print np.shape(all_frames)
all_frames = np.asarray(all_frames, dtype=np.float32)
#all_frames = np.reshape(all_frames,
# (all_frames.shape[0]*all_frames.shape[1],
# all_frames.shape[2],
# all_frames.shape[3]))
#save_to_file(dir,"raw_conc_"+mouse+".raw", all_frames, np.float32)
new_all_frames = parmap.map(process_frames_evoked, all_frames, freq, mouse, dir)
all_frames = np.reshape(all_frames,
(all_frames.shape[0]*all_frames.shape[1],
all_frames.shape[2],
all_frames.shape[3]))
print "Creating array.."
new_all_frames = np.asarray(new_all_frames, dtype=np.float32)
print "Averaging together..."
new_all_frames = np.mean(new_all_frames, axis=0)
print np.shape(new_all_frames)
#count = 1
#for frames in new_all_frames:
# print "Saving trial %s." % (str(count))
# save_to_file("/media/user/DataFB/AutoHeadFix_Data/0815/EL_noled/stim/", "trial_"+str(count)+".raw", frames, np.float32)
# count += 1
#save_to_file(dir,"raw_conc_"+mouse+".raw", all_frames, np.float32)
save_to_file(dir,"evoked_"+mouse+"_.raw", new_all_frames, np.float32)
sd = standard_deviation(new_all_frames)
save_to_file(dir,"all_frames_SD"+mouse+"_.raw", sd, np.float32)
def get_mp4_frames(filename):
list_of_frames = []
print filename
vidcap = cv2.VideoCapture(filename)
frames = []
while True:
success, image = vidcap.read()
if not success:
break
image = np.asarray(image)
image = image[:, :, 1]
frames.append(image)
frames = np.asarray(frames, dtype=np.float32)
frames = frames[10:710, :, :]
print np.shape(frames)
return frames
def process_mp4_frames(frames, freq, mouse, dir):
mouse = mouse[-3:]
print "Calculating mean..."
avg_pre_filt = calculate_avg(frames)
print "Temporal filter... ", freq.low_limit, "-", freq.high_limit, "Hz"
frames = cheby_filter(frames, freq.low_limit, freq.high_limit)
frames += avg_pre_filt
print "Calculating DF/F0..."
frames = calculate_df_f0(frames)
print "Applying MASKED GSR..."
frames = masked_gsr(frames, dir+mouse+"_mask.raw")
return frames
def get_corr_maps_mp4(mouse, dir, freq):
str_freq = str(freq.low_limit) + "-" + str(freq.high_limit) + "Hz"
lofiles, lofilenames = get_file_list(dir+"MP4/", mouse)
print lofilenames
all_frames = []
for filename in lofiles:
print "Opening " + filename
all_frames.append(get_mp4_frames(filename))
all_frames = np.asarray(all_frames, dtype=np.float32)
lop = get_distance_var_mp4(all_frames)
print "Alligning all video frames..."
all_frames = parmap.starmap(shift_frames, zip(all_frames, lop))
all_frames = np.asarray(all_frames, dtype=np.float32)
print np.shape(all_frames)
print "Joining Files..."
number_of_files = all_frames.shape[0]
frames_per_file = all_frames.shape[1]
all_frames = np.reshape(all_frames,
(number_of_files*frames_per_file,
width,
height))
print "Saving raw concatanated frames..."
save_to_file(dir, "115.raw", all_frames, np.float32)
all_frames = np.reshape(all_frames,
(number_of_files,
frames_per_file,
width,
height))
all_frames = parmap.map(process_mp4_frames, all_frames, freq, mouse, dir)
all_frames = np.asarray(all_frames, dtype=np.float32)
all_frames = np.reshape(all_frames,
(number_of_files*frames_per_file,
width,
height))
print "Saving processed frames..."
save_to_file(dir, "115_processed.raw", all_frames, np.float32)
print "Displaying correlation map..."
mapper = CorrelationMapDisplayer(all_frames, dir, mouse)
mapper.display('spectral', 0.0, 1.0)
print "All done!! :))"
def get_corr_maps(mouse, dir, freq, coords, master_frames_filename):
str_freq = str(freq.low_limit) + "-" + str(freq.high_limit) + "Hz"
lofiles, lofilenames = get_file_list(dir+"Videos/", mouse)
print lofilenames
#all_frames = np.asarray(get_video_frames(lofiles), dtype=np.float32)
#master_frames = get_frames(master_frames_filename)
#lop = get_distance_var_from_master_frame(all_frames, master_frames)
#print "Alligning all video frames..."
#all_frames = parmap.starmap(shift_frames, zip(all_frames, lop))
lop = get_distance_var(lofiles)
all_frames = np.asarray(get_video_frames(lofiles), dtype=np.uint8)
print "Alligning all video frames..."
all_frames = parmap.starmap(shift_frames, zip(all_frames, lop))
print np.shape(all_frames)
count = 0
new_all_frames = parmap.map(process_frames, all_frames, freq, mouse, dir)
print "Creating array..."
new_all_frames = np.asarray(new_all_frames, dtype=np.float32)
all_frames = np.asarray(all_frames, dtype=np.float32)
print "Joining Files..."
new_all_frames = np.reshape(new_all_frames,
(new_all_frames.shape[0]*new_all_frames.shape[1],
new_all_frames.shape[2],
new_all_frames.shape[3]))
print "Shapes: "
print np.shape(all_frames)
print np.shape(new_all_frames)
where_are_NaNs = np.isnan(new_all_frames)
new_all_frames[where_are_NaNs] = 0
save_to_file(dir,"raw_conc_"+mouse+"_"+str_freq+".raw", all_frames, np.float32)
print "Saving the processed concatenated file..."
save_to_file(dir,"processed_conc_"+mouse+"_"+str_freq+".raw", new_all_frames, np.float32)
#sd = standard_deviation(new_all_frames)
#save_to_file(dir,"all_frames_SD"+mouse+"_"+str_freq+".raw", sd, np.float32)
#for coord in coords:
# corr_map = get_correlation_map(coord.x, coord.y, new_all_frames)
# save_to_file(dir, "All_Maps/"+mouse+"_map_"+str(coord.x)+","+str(coord.y)+"_"+str_freq+".raw", corr_map, dtype=np.float32)
print "Displaying correlation map..."
mapper = CorrelationMapDisplayer(new_all_frames, dir, mouse)
mapper.display('spectral', 0.0, 1.0)
print "All done!! :))"
class FrequencyLimit:
def __init__(self, low, high):
self.low_limit = low
self.high_limit = high
class Coordinate:
def __init__(self, x, y):
self.x = x
self.y = y
def get_correlation_map(seed_x, seed_y, frames):
seed_pixel = np.asarray(frames[:, seed_x, seed_y], dtype=np.float32)
print np.shape(seed_pixel)
# Reshape into time and space
frames = np.reshape(frames, (frames.shape[0], width*height))
print np.shape(frames)
print 'Getting correlation... x=', seed_x, ", y=", seed_y
correlation_map = parmap.map(corr, frames.T, seed_pixel)
correlation_map = np.asarray(correlation_map, dtype=np.float32)
correlation_map = np.reshape(correlation_map, (width, height))
print np.shape(correlation_map)
return correlation_map
#do_it_all()
frequencies = [FrequencyLimit(0.3, 3.),
FrequencyLimit(0.01, 6.0)]
coords = [Coordinate(138, 192)]
##for i in range(3, len(mice)):
## for freq in frequencies:
## get_corr_maps(mice[i], base_dir[i], freq, coords)
get_corr_maps_mp4(mice[4], base_dir[4], frequencies[0])
#get_corr_maps(mice[4], base_dir[4], frequencies[0], coords, master_filenames[3])
#get_evoked_map(mice[4], base_dir[4], master_filenames[3], frequencies[1])
#freq = FrequencyLimit(0.3, 3.0)
#align_frames(mice[0], base_dir[0], freq)
#get_evoked_map(mice[6])
##test_gcamp = get_frames("/media/user/DataFB/AutoHeadFix_Data/0731/EL_LRL/Videos/M1312000377_1438367187.563086.raw")
##
##avg_pre_filt = calculate_avg(test_gcamp)
##test_gcamp = cheby_filter(test_gcamp)
##test_gcamp += avg_pre_filt
##test_gcamp = calculate_df_f0(test_gcamp)
##test_gcamp = masked_gsr(test_gcamp, save_dir+"377_mask.raw")
#test_gfp = get_frames("/media/user/DataFB/AutoHeadFix_Data/0806/EL_LRL/Videos/M1312000300_1438887348.410214.raw")
#avg_pre_filt = calculate_avg(test_gfp)
#test_gfp = cheby_filter(test_gfp)
#test_gfp += avg_pre_filt
#test_gfp = calculate_df_f0(test_gfp)
#test_gfp = masked_gsr(test_gfp, "/media/user/DataFB/AutoHeadFix_Data/0806/EL_LRL/300_mask.raw")
#mapper= CorrelationMapDisplayer(test_gcamp)
#mapper.display('spectral', -0.5, 1.0)
#mapper2= CorrelationMapDisplayer(test_gfp)
#mapper2.display('spectral', -0.5, 1.0)
#lofiles, lofilenames = get_file_list("/media/user/DataFB/AutoHeadFix_Data/0731/EL_LRL/Green/GSR/", mice[0])
#all_frames = np.asarray(get_all_processed_frames(lofiles), dtype=np.float32)
#print "Joining Files..."
#all_frames = np.reshape(all_frames,
# (all_frames.shape[0]*all_frames.shape[1],
# all_frames.shape[2],
# all_frames.shape[3]))
#print np.shape(all_frames)
#print "Displaying correlation map..."
#mapper = CorrelationMapDisplayer(all_frames)
#mapper.display('spectral', -0.5, 1.0)
#lof = get_file_list(base_dir, mice[1])
# List of positions for all trials with the best reference point
#lop =get_distance_var(lof)
#dx_trials = []
#for position in lop:
# dx_trials.append(position.dx)
#plt.plot(dx_trials)
#plt.title("Change in X for all trials")
#plt.ylabel("dx in pixels")
#plt.xlabel("Trial number")
#plt.show()
#dy_trials = []
#for position in lop:
# dy_trials.append(position.dy)
#plt.plot(dy_trials)
#plt.title("Change in Y for all trials")
#plt.ylabel("dy in pixels")
#plt.xlabel("Trial number")
#plt.show()
| [
"federico@alumni.ubc.ca"
] | federico@alumni.ubc.ca |
819f916451d212969a294520210767ee7b4da40d | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /Leetcode/Algorithm/python/3000/02079-Watering Plants.py | 4e375badaaf013ccb48f4140475ac47e3102f9c7 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | class Solution(object):
def wateringPlants(self, plants, capacity):
cur = capacity
step = 0
for i, plant in enumerate(plants):
if plant > cur:
cur = capacity - plant
step += i * 2 + 1
else:
cur -= plant
step += 1
return step
| [
"noreply@github.com"
] | Wizmann.noreply@github.com |
afc45b39b235a097420051d68a0d442e7eea22a8 | 3a11fbea540aa1441256a685a18301df30d0b2c3 | /Steel/AISC Shapes Databases/aisc_steel_shapes.py | ec8cded1710a19ae8ec4cc65ad0c9e297d1a97e2 | [
"BSD-3-Clause"
] | permissive | Gia869/Structural-Engineering | 17af7991a6472b209e553574c34b9b24197e56f8 | 47b025d7482461f7ee55b036f60c16a937b8d203 | refs/heads/master | 2021-01-07T10:15:45.750745 | 2020-09-18T02:03:47 | 2020-09-18T02:03:47 | 241,659,981 | 0 | 0 | BSD-3-Clause | 2020-02-19T16:05:59 | 2020-02-19T16:05:58 | null | UTF-8 | Python | false | false | 29,738 | py | '''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import Tkinter as tk
import tkFont
import tkMessageBox
class Master_window:
def __init__(self, master):
self.shape_types = ['C', 'MC', 'HSS','HSS - ROUND', 'HP', 'M', 'L', 'ST', 'PIPE', 'MT', 'S', 'WT', 'W', '2L']
self.values_list = ['T_F', 'W', 'A', 'd', 'ddet', 'Ht', 'h', 'OD', 'bf', 'bfdet', 'B', 'b', 'ID', 'tw', 'twdet', 'twdet/2', 'tf', 'tfdet', 't', 'tnom', 'tdes', 'kdes', 'kdet', 'k1', 'x', 'y', 'eo', 'xp', 'yp', 'bf/2tf', 'b/t', 'b/tdes', 'h/tw', 'h/tdes', 'D/t', 'Ix', 'Zx', 'Sx', 'rx', 'Iy', 'Zy', 'Sy', 'ry', 'Iz', 'rz', 'Sz', 'J', 'Cw', 'C', 'Wno', 'Sw1', 'Sw2', 'Sw3', 'Qf', 'Qw', 'ro', 'H', 'tan(alpha)', 'Qs', 'Iw', 'zA', 'zB', 'zC', 'wA', 'wB', 'wC', 'SwA', 'SwB', 'SwC', 'SzA', 'SzB', 'SzC', 'rts', 'ho', 'PA', 'PB']
self.values_units = ['', 'lbs/ft', 'in^2', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', 'in', '', '', '', '', '', '', 'in^4', 'in^3', 'in^3', 'in', 'in^4', 'in^3', 'in^3', 'in', 'in^4', 'in', 'in^3', 'in^4', 'in^6', 'in^3', 'in^2', 'in^4', 'in^4', 'in^4', 'in^3', 'in^3', 'in', '', '', '', 'in^4', 'in', 'in', 'in', 'in', 'in', 'in', 'in^3', 'in^3', 'in^3', 'in^3', 'in^3', 'in^3', 'in', 'in', 'in', 'in']
self.shape_special_note = ['', '', '', '','', 'Section has sloped flanges', '', '', '', 'Section has sloped flanges', '', 'Flange thickness greater than 2 in.', 'Flange thickness greater than 2 in.', '']
self.values_help = ['T_F - A true/false variable. A value of T (true) indicates that there is a special note for that shape (see below). A value of F (false) indicates that there is not a special note for that shape.', 'W - Nominal weight lb/ft (kg/m)', 'A - Cross-sectional area in^2 (mm2)', 'd - Overall depth of member or width of shorter leg for angles or width of the outstanding legs of long legs back-to-back double angles or the width of the back-to-back legs of short legs back-to-back double angles in. (mm)', 'ddet - Detailing value of member depth in. (mm)', 'Ht - Overall depth of square or rectangular HSS in. (mm)', 'h - Depth of the flat wall of square or rectangular HSS in. (mm)', 'OD - Outside diameter of round HSS or pipe in. (mm)', 'bf - Flange width in. (mm)', 'bfdet - Detailing value of flange width in. (mm)', 'B - Overall width of square or rectangular HSS (the same as B per the 2010 AISC Specification) in. (mm)', 'b - Width of the flat wall of square or rectangular HSS or width of the longer leg for angles or width of the back-to-back legs of long legs back-to-back double angles or width of the outstanding legs of short legs back-to-back double angles in. (mm)', 'ID - Inside diameter of round HSS or pipe in. (mm)', 'tw - Web thickness in. (mm)', 'twdet - Detailing value of web thickness in. (mm)', 'twdet/2 - Detailing value of tw/2 in. (mm)', 'tf - Flange thickness in. (mm)', 'tfdet - Detailing value of flange thickness in. (mm)', 't - Thickness of angle leg in. (mm)', 'tnom - HSS and pipe nominal wall thickness in. (mm)', 'tdes - HSS and pipe design wall thickness in. (mm)', 'kdes - Design distance from outer face of flange to web toe of fillet in. (mm)', 'kdet - Detailing distance from outer face of flange to web toe of fillet in. (mm)', 'k1 - Detailing distance from center of web to flange toe of fillet in. (mm)', 'x - Horizontal distance from designated member edge as defined in the AISC Steel Construction Manual to member centroidal axis in. (mm)', 'y - Vertical distance from designated member edge as defined in the AISC Steel Construction Manual to member centroidal axis in. (mm)', 'eo - Horizontal distance from designated member edge as defined in the AISC Steel Construction Manual to member shear center in. (mm)', 'xp - Horizontal distance from designated member edge as defined in the AISC Steel Construction Manual to member plastic neutral axis in. (mm)', 'yp - Vertical distance from designated member edge as defined in the AISC Steel Construction Manual to member plastic neutral axis in. (mm)', 'bf/2tf - Slenderness ratio', 'b/t - Slenderness ratio for single angles', 'b/tdes - Slenderness ratio for square or rectangular HSS', 'h/tw - Slenderness ratio', 'h/tdes - Slenderness ratio for square or rectangular HSS', 'D/t - Slenderness ratio for round HSS and pipe or tee shapes', 'Ix - Moment of inertia about the x-axis in^4 (mm^4 /10^6)', 'Zx - Plastic section modulus about the x-axis in^3 (mm^3 /10^3)', 'Sx - Elastic section modulus about the x-axis in^3 (mm^3 /10^3)', 'rx - Radius of gyration about the x-axis in. (mm)', 'Iy - Moment of inertia about the y-axis in^4 (mm^4 /10^6)', 'Zy - Plastic section modulus about the y-axis in^3 (mm^3 /10^3)', 'Sy - Elastic section modulus about the y-axis in^3 (mm^3 /10^3)', 'ry - Radius of gyration about the y-axis (with no separation for double angles back-to-back) in. (mm)', 'Iz - Moment of inertia about the z-axis in^4 (mm^3 /10^6)', 'rz - Radius of gyration about the z-axis in. (mm)', 'Sz - Elastic section modulus about the z-axis in^3 (mm^3 /10^3)', 'J - Torsional moment of inertia in^4 (mm^4 /10^3)', 'Cw - Warping constant in^6 (mm^6 /10^9)', 'C - HSS torsional constant in^3 (mm^3 /10^3)', 'Wno - Normalized warping function as used in Design Guide 9 in^2 (mm^2)', 'Sw1 - Warping statical moment at point 1 on cross section as used in Design Guide 9 and shown in Figures 1 and 2 in^4 (mm^4 /10^6)', 'Sw2 - Warping statical moment at point 2 on cross section as used in Design Guide 9 and shown in Figure 2 in^4 (mm^4 /10^6)', 'Sw3 - Warping statical moment at point 3 on cross section as used in Design Guide 9 and shown in Figure 2 in^4 (mm^4 /10^6)', 'Qf - Statical moment for a point in the flange directly above the vertical edge of the web as used in Design Guide 9 in^3 (mm^3 /10^3)', 'Qw - Statical moment for a point at mid-depth of the cross section as used in Design Guide 9 in^3 (mm^3 /10^3)', 'ro - Polar radius of gyration about the shear center in. (mm)', 'H - Flexural constant', 'tan(alpha) - Tangent of the angle between the y-y and z-z axes for single angles where alpga is shown in Figure 3', 'Qs - Reduction factor for slender unstiffened compression elements', 'Iw - Moment of inertia about the w-axis in^4 (mm^4 /10^6)', 'zA - Distance from point A to center of gravity along z-axis as shown in Figure 3 in. (mm)', 'zB - Distance from point B to center of gravity along z-axis as shown in Figure 3 in. (mm)', 'zC - Distance from point C to center of gravity along z-axis as shown in Figure 3 in. (mm)', 'wA - Distance from point A to center of gravity along w-axis as shown in Figure 3 in. (mm)', 'wB - Distance from point B to center of gravity along w-axis as shown in Figure 3 in. (mm)', 'wC - Distance from point C to center of gravity along w-axis as shown in Figure 3 in. (mm)', 'SwA - Elastic section modulus about the w-axis at point A on cross section as shown in Figure 3 in^3 (mm^3 /10^3)', 'SwB - Elastic section modulus about the w-axis at point B on cross section as shown in Figure 3 in^3 (mm^3 /10^3)', 'SwC - Elastic section modulus about the w-axis at point C on cross section as shown in Figure 3 in^3 (mm^3 /10^3)', 'SzA - Elastic section modulus about the z-axis at point A on cross section as shown in Figure 3 in^3 (mm^3 /10^3)', 'SzB - Elastic section modulus about the z-axis at point B on cross section as shown in Figure 3 in^3 (mm^3 /10^3)', 'SzC - Elastic section modulus about the z-axis at point C on cross section as shown in Figure 3 in^3 (mm^3 /10^3)', 'rts - Effective radius of gyration in. (mm)', 'ho - Distance between the flange centroids in. (mm)', 'PA - Shape perimeter minus one flange surface as used in Design Guide 19 in. (mm)', 'PB - Shape perimeter as used in Design Guide 19 in. (mm)']
self.widgets = []
## Build master shapes dictionaries from CSV file
shapes_file = open('aisc_shapes.csv','r')
shape_data_raw = shapes_file.readlines()
shapes_file.close()
self.shape_sets = []
for i in range(0,len(self.shape_types)):
self.shape_sets.append({})
for line in shape_data_raw:
shape_data_split = line.split(',')
if shape_data_split[0] == 'Type' or shape_data_split[0]=='':
pass
else:
shape_set_index = self.shape_types.index(shape_data_split[0])
if shape_data_split[9] == "-" and shape_set_index == 2:
shape_set_index = 3
else:
pass
shape = shape_data_split[2]
shape_data_split[-1]=shape_data_split[-1].rstrip('\n')
shape_data_holder = shape_data_split[3:]
temp_shape_dict = {shape: shape_data_holder}
self.shape_sets[shape_set_index].update(temp_shape_dict)
self.master = master
self.f_size = 8
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.menubar = tk.Menu(self.master)
self.menu = tk.Menu(self.menubar, tearoff=0)
self.menu_props = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label = "File", menu=self.menu)
self.menu.add_command(label="Quit", command=self.quit_app)
self.menubar.add_cascade(label = "Window Properties", menu=self.menu_props)
self.menu_props.add_command(label="Increase Font Size", command=self.font_size_up)
self.menu_props.add_command(label="Decrease Font Size", command=self.font_size_down)
try:
self.master.config(menu=self.menubar)
except AttributeError:
self.master.tk.call(master, "config", "-menu", self.menubar)
#Main Frames
self.main_frame = tk.Frame(master, bd=2, relief='sunken', padx=5,pady=5)
self.main_frame.pack(anchor='c', padx= 5, pady= 5, fill=tk.BOTH, expand=1)
self.base_frame = tk.Frame(master, bd=2, relief='sunken', padx=5,pady=5)
self.base_frame.pack(side=tk.BOTTOM, padx= 5, pady= 5, fill=tk.X, expand=1)
#Picker Frame
self.picker_frame = tk.Frame(self.main_frame, padx=2, pady=2)
self.shape_type = tk.StringVar()
self.shape_type.set(self.shape_types[0])
self.shape_type_label = tk.Label(self.picker_frame, text="Steel Shape Type : ", font=helv)
self.widgets.append(self.shape_type_label)
self.shape_type_label.pack(side=tk.TOP, fill=tk.X, expand=True)
self.shape_type_menu = tk.OptionMenu(self.picker_frame, self.shape_type, *self.shape_types, command=self.shape_change)
self.shape_type_menu.config(font=helv)
self.shape_type_menu.pack(side=tk.TOP, fill=tk.X, expand=True)
self.shape_frame = tk.LabelFrame(self.picker_frame, text="Section:", bd=1, relief='sunken', padx=5, pady=5, font=helv)
self.widgets.append(self.shape_frame)
self.shape_menu = tk.Listbox(self.shape_frame, height = 20, width = 40, font=helv)
self.widgets.append(self.shape_menu)
for section in sorted(self.shape_sets[0].keys()):
self.shape_menu.insert(tk.END, section)
self.shape_menu.pack(side=tk.LEFT, fill=tk.Y, expand=True)
self.shape_scrollbar = tk.Scrollbar(self.shape_frame, orient="vertical")
self.shape_menu.config(yscrollcommand=self.shape_scrollbar.set)
self.shape_scrollbar.config(command=self.shape_menu.yview)
self.shape_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.shape_menu.bind("<<ListboxSelect>>",self.shape_click)
self.shape_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.picker_frame.pack(side=tk.LEFT)
self.data_frame = tk.LabelFrame(self.main_frame, text="Section Properties - AISC 14th Edition:", bd=1, relief='sunken', padx=5, pady=5, font=helv)
self.widgets.append(self.data_frame)
self.properties_labels = []
for i in range(1,len(self.values_list)):
self.properties_labels.append(tk.Label(self.data_frame, text='{0}({1}):\n--'.format(self.values_list[i],self.values_units[i]), font=helv, justify=tk.LEFT))
j=0
z=0
for i in range(0,len(self.properties_labels)):
self.widgets.append(self.properties_labels[i])
self.properties_labels[i].grid(row = j, column = z, padx=1, pady=1)
if z<10:
z+=1
else:
z=0
j+=1
self.data_frame.pack(side=tk.LEFT)
self.f_size_frame = tk.Frame(self.base_frame, padx=5,pady=5)
self.f_size_label = tk.Label(self.f_size_frame, text='Font Size ('+str(self.f_size)+'):', font=helv)
self.widgets.append(self.f_size_label)
self.f_size_label.grid(row=0,column=0)
self.b_f_size_minus = tk.Button(self.f_size_frame,text="-", command=self.font_size_down, font=helv)
self.widgets.append(self.b_f_size_minus)
self.b_f_size_minus.grid(row=0, column=1, padx=1, pady=1)
self.b_f_size_plus = tk.Button(self.f_size_frame,text="+", command=self.font_size_up, font=helv)
self.widgets.append(self.b_f_size_plus)
self.b_f_size_plus.grid(row=0, column=2, padx=1, pady=1)
self.f_size_frame.pack(side=tk.LEFT)
self.value_def_frame = tk.Frame(self.base_frame, padx=5,pady=5)
self.value_def = tk.StringVar()
self.value_def.set(self.values_list[1])
self.value_def_menu = tk.OptionMenu(self.value_def_frame, self.value_def, *self.values_list[1:], command=self.value_definitions)
self.value_def_menu.config(font=helv)
self.value_def_menu.grid(row=0, column=0, padx=1, pady=1)
self.value_def_label = tk.Label(self.value_def_frame, text=self.values_help[1], font=helv, wraplength=400, justify=tk.LEFT)
self.widgets.append(self.value_def_label)
self.value_def_label.grid(row=0, column=1, padx=10, pady=1)
filters = ['=','<','>','Between']
self.value_filter = tk.StringVar()
self.value_filter.set('=')
self.value_filter_menu = tk.OptionMenu(self.value_def_frame, self.value_filter, *filters, command=self.value_filter_menu_switch)
self.value_filter_menu.config(font=helv)
self.value_filter_menu.grid(row=1, column=0, padx=1, pady=1)
self.filter_a = tk.StringVar()
self.entry_filter_a = tk.Entry(self.value_def_frame,textvariable=self.filter_a, font = helv, width = 15)
self.widgets.append(self.entry_filter_a)
self.entry_filter_a.grid(row=1, column=1, padx=1, pady=1)
self.filter_b = tk.StringVar()
self.entry_filter_b = tk.Entry(self.value_def_frame,textvariable=self.filter_b, font = helv, width = 15)
self.widgets.append(self.entry_filter_b)
self.entry_filter_b.grid(row=2, column=1, padx=1, pady=1)
self.entry_filter_b.configure(state="disabled")
self.b_value_filter = tk.Button(self.value_def_frame,text="Filter By Selected Value", command=self.value_filter_function)
self.b_value_filter.grid(row=1, column=2, padx=1, pady=1)
self.widgets.append(self.b_value_filter)
self.b_reset_filter = tk.Button(self.value_def_frame,text="Reset Shape List", command=self.shape_change)
self.b_reset_filter.grid(row=1, column=3, padx=1, pady=1)
self.widgets.append(self.b_reset_filter)
self.b_export_csv = tk.Button(self.value_def_frame,text="Export Current List to CSV", command=self.export_to_csv)
self.b_export_csv.grid(row=2, column=2, padx=1, pady=1)
self.widgets.append(self.b_export_csv)
self.value_def_frame.pack(side=tk.LEFT, padx= 5, pady= 5)
self.b_quit = tk.Button(self.base_frame,text="Quit", command=self.quit_app, font=helv)
self.widgets.append(self.b_quit)
self.b_quit.pack(side=tk.RIGHT)
self.license_display()
def license_display(self, *event):
license_string = ("Copyright (c) 2019, Donald N. Bockoven III\n"
"All rights reserved.\n\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\""
" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE"
" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE"
" DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE"
" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL"
" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR"
" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER"
" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,"
" OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE"
" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
"https://github.com/buddyd16/Structural-Engineering/blob/master/LICENSE"
)
tkMessageBox.showerror("License Information",license_string)
self.master.focus_force()
def value_filter_menu_switch(self, *event):
option = self.value_filter.get()
if option == 'Between':
self.entry_filter_b.configure(state="normal")
else:
self.entry_filter_b.configure(state="disabled")
def quit_app(self):
self.master.destroy()
self.master.quit()
def shape_change(self, *event):
self.shape_menu.delete(0,tk.END)
new_shape_type = self.shape_type.get()
new_shape_type_index = self.shape_types.index(new_shape_type)
if new_shape_type_index == 2:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][5]),(float(d[k][10])),(float(d[k][19]))))
string = 'Section: - Sorted By: {0} then {1} then {2}'.format(self.values_list[5],self.values_list[10],self.values_list[19])
elif new_shape_type_index == 3:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][7]),(float(d[k][19]))))
string = 'Section: - Sorted By: {0} then {1}'.format(self.values_list[7],self.values_list[19])
elif new_shape_type_index == 6 or new_shape_type_index == 13:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][11]),(float(d[k][3])),(float(d[k][18]))))
string = 'Section: - Sorted By: {0} then {1} then {2}'.format(self.values_list[11],self.values_list[3],self.values_list[18])
elif new_shape_type_index == 8:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][1]),(float(d[k][12]))))
string = 'Section: - Sorted By: {0} then {1}'.format(self.values_list[1],self.values_list[12])
else:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][3]),(float(d[k][1]))))
string = 'Section: - Sorted By: {0} then {1}'.format(self.values_list[3],self.values_list[1])
for section in new_section_list:
self.shape_menu.insert(tk.END, section)
self.shape_menu.selection_set(0)
self.shape_click()
self.shape_frame.configure(text=string)
def shape_click(self, *event):
shape = self.shape_menu.get(self.shape_menu.curselection())
shape_index = self.shape_types.index(self.shape_type.get())
section_props = self.shape_sets[shape_index].get(shape)
if section_props[0] == 'F':
self.data_frame.configure(text="Section Properties - AISC 14th Edition: -- Selected Shape: "+shape)
else:
note = self.shape_special_note[shape_index]
self.data_frame.configure(text="Section Properties - AISC 14th Edition: -- Selected Shape: "+shape+" -- Note: "+note)
for labels in self.properties_labels:
labels.configure( text=' ')
props_counter = 0
props_list = []
for i in range(1,len(self.values_list)):
if section_props[i] == '-':
pass
else:
if self.values_units[i] == '':
string = '{0}{1}:\n{2}'.format(self.values_list[i],self.values_units[i],section_props[i])
else:
string = '{0}({1}):\n{2}'.format(self.values_list[i],self.values_units[i],section_props[i])
props_list.append(self.values_list[i])
self.properties_labels[props_counter].configure( text=string)
props_counter+=1
self.value_def_menu.destroy()
self.value_def_menu = tk.OptionMenu(self.value_def_frame, self.value_def, *props_list, command=self.value_definitions)
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.value_def_menu.config(font=helv)
self.value_def_menu.grid(row=0, column=0, padx=1, pady=1)
self.value_def.set(props_list[0])
self.value_definitions()
def font_size_up(self, *event):
self.f_size = self.f_size+1
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.f_size_label.configure(text='Font Size ('+str(self.f_size)+'):')
self.value_def_menu.config(font=helv)
self.shape_type_menu.config(font=helv)
self.value_filter_menu.config(font=helv)
for widget in self.widgets:
widget.configure(font=helv)
def font_size_down(self, *event):
if self.f_size-1 < 6:
self.f_size = 6
else:
self.f_size = self.f_size-1
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.f_size_label.configure(text='Font Size ('+str(self.f_size)+'):')
self.value_def_menu.config(font=helv)
self.shape_type_menu.config(font=helv)
self.value_filter_menu.config(font=helv)
for widget in self.widgets:
widget.configure(font=helv)
def value_definitions(self, *event):
index = self.values_list.index(self.value_def.get())
self.value_def_label.configure(text = self.values_help[index])
def value_filter_function(self, *event):
value_index = self.values_list.index(self.value_def.get())
a = self.filter_a.get()
b = self.filter_b.get()
filter_type = self.value_filter.get()
self.shape_menu.delete(0,tk.END)
new_shape_type = self.shape_type.get()
new_shape_type_index = self.shape_types.index(new_shape_type)
filtered_section_list = []
if new_shape_type_index == 2:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][value_index])))
elif new_shape_type_index == 3:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][value_index])))
elif new_shape_type_index == 6 or new_shape_type_index == 13:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][value_index])))
elif new_shape_type_index == 8:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][value_index])))
else:
d = self.shape_sets[new_shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][value_index])))
if a == '':
pass
else:
if filter_type == 'Between' and b == '':
pass
elif filter_type == 'Between':
string = 'Section: - Sorted By: {0} Between {1} and {2}'.format(self.values_list[value_index],a,b)
for key in new_section_list:
if float(a) > float(b):
a = self.filter_b.get()
b = self.filter_a.get()
else:
pass
if float(d[key][value_index]) >= float(a) and float(d[key][value_index]) <= float(b):
filtered_section_list.append(key)
else:
pass
elif filter_type == '<':
string = 'Section: - Sorted By: {0} < {1}'.format(self.values_list[value_index],a)
for key in new_section_list:
if float(d[key][value_index]) <= float(a):
filtered_section_list.append(key)
else:
pass
elif filter_type == '>':
string = 'Section: - Sorted By: {0} > {1}'.format(self.values_list[value_index],a)
for key in new_section_list:
if float(d[key][value_index]) >= float(a):
filtered_section_list.append(key)
else:
pass
elif filter_type == '=':
string = 'Section: - Sorted By: {0} = {1}'.format(self.values_list[value_index],a)
for key in new_section_list:
if float(d[key][value_index]) == float(a):
filtered_section_list.append(key)
else:
pass
if len(filtered_section_list) == 0:
self.shape_menu.delete(0,tk.END)
else:
for section in filtered_section_list:
self.shape_menu.insert(tk.END, section)
self.shape_frame.configure(text=string)
def export_to_csv(self, *events):
shapes = self.shape_menu.get(0,tk.END)
shape_index = self.shape_types.index(self.shape_type.get())
section_props = self.shape_sets[shape_index].get(shapes[0])
string = 'Section'
for i in range(1,len(self.values_list)):
if section_props[i] == '-':
pass
else:
if self.values_units[i] == '':
string =string+ ',{0}{1}:'.format(self.values_list[i],self.values_units[i])
else:
string =string+ ',{0}({1}):'.format(self.values_list[i],self.values_units[i])
export_file = open(self.shape_type.get()+'_sorted.csv','w')
export_file.write(string)
for shape in shapes:
shape_string = '\n' + shape
section_props = self.shape_sets[shape_index].get(shape)
for i in range(1,len(self.values_list)):
if section_props[i] == '-':
pass
else:
if self.values_units[i] == '':
shape_string =shape_string+ ',{0}'.format(section_props[i])
else:
shape_string =shape_string+ ',{0}'.format(section_props[i])
export_file.write(shape_string)
export_file.close()
def main():
root = tk.Tk()
root.title("AISC 14th Edition - Shape Database")
app = Master_window(root)
root.minsize(800,600)
root.mainloop()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Gia869.noreply@github.com |
20f3880290bd9cb7adef23dd1bfb678931f622cd | eb357693715fb01c8dfbea03e42e7f0a15dc669d | /tools/convolutionNN.py | e05a9e4b7dd68a1b45f691ee88a1166e59ddd853 | [] | no_license | josemariasilva/___ | 3733d9159ef6f39ea63be5e74f7b497d82354281 | 68bd5a72e93223a7daecb3dffa039fafdb997778 | refs/heads/master | 2022-04-12T19:30:13.039563 | 2020-03-29T04:32:58 | 2020-03-29T04:32:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,973 | py | import keras
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from keras.preprocessing import image
#transformando dados em categoricos
from keras.utils import to_categorical
from keras.preprocessing import image
#Inicializando a CNN
class CNN(Sequential):
def Number_conv(self, number, n_classes):
for _ in range(number):
#Passo 1 - Convolução
self.add(Convolution2D(32, 3, 3, input_shape=(64, 64, 3),activation='relu'))
#Passo 2 - MaxPooling
self.add(MaxPooling2D(pool_size=(2, 2)))
#Passo 3 - Achatamento
self.add(Flatten())
#Passo 4 - Conexão completa
self.add(Dense(output_dim=128, activation='relu'))
self.add(Dense(output_dim=n_classes, activation='softmax'))
#Compilando CNN
def optm(self):
self.compile(optimizer = "Adam", loss = 'binary_crossentropy', metrics=['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
self.training_set = train_datagen.flow_from_directory(
'C:/Users/ZZZZZZ/Desktop/TCC/new_project/images/train_',
target_size=(64, 64),
batch_size=32,
class_mode='categorical')
self.test_set = test_datagen.flow_from_directory(
'C:/Users/ZZZZZZ/Desktop/TCC/new_project/images/test_',
target_size=(64, 64),
batch_size=32,
class_mode='categorical')
def run(self,step_epoch,epoch,valid_step):
self.fit_generator(self.training_set,
steps_per_epoch = step_epoch,
epochs = epoch,
validation_data = self.test_set,
validation_steps = valid_step)
return self.history.history
def predict(self,predict_image):
test_image = image.load_img(predict_image, target_size=(64,64))
test_image = image.img_to_array(test_image)/255
test_image = np.expand_dims(test_image, axis=0)
for proba, label in zip(self.predict_proba(test_image)[0])
def clear_session(self):
keras.backend.clear_session()
| [
"zze2008@hotmail.com"
] | zze2008@hotmail.com |
85cdbb90f2c2e11a33c2378de4b34eb304d3c469 | 807f77cf61ca43f9b7f1ffd9958a2554b8ebd811 | /dvc/repo/graph.py | 41142ae770693432c9b94a556d894ad134642470 | [
"Apache-2.0"
] | permissive | kgritesh/dvc | 5113f6c14e10ed8088233a27322e80cff2c5f6dd | 6237a665c355020bf9c083851c8a89e874d2d437 | refs/heads/master | 2023-01-19T08:00:09.935965 | 2020-11-20T14:34:47 | 2020-11-20T14:34:47 | 315,026,151 | 0 | 0 | Apache-2.0 | 2020-11-22T13:43:39 | 2020-11-22T11:55:55 | null | UTF-8 | Python | false | false | 3,517 | py | def check_acyclic(graph):
import networkx as nx
from dvc.exceptions import CyclicGraphError
try:
edges = nx.find_cycle(graph, orientation="original")
except nx.NetworkXNoCycle:
return
stages = set()
for from_node, to_node, _ in edges:
stages.add(from_node)
stages.add(to_node)
raise CyclicGraphError(list(stages))
def get_pipeline(pipelines, node):
found = [i for i in pipelines if i.has_node(node)]
assert len(found) == 1
return found[0]
def get_pipelines(G):
import networkx as nx
return [G.subgraph(c).copy() for c in nx.weakly_connected_components(G)]
def build_graph(stages, outs_trie=None):
"""Generate a graph by using the given stages on the given directory
The nodes of the graph are the stage's path relative to the root.
Edges are created when the output of one stage is used as a
dependency in other stage.
The direction of the edges goes from the stage to its dependency:
For example, running the following:
$ dvc run -o A "echo A > A"
$ dvc run -d A -o B "echo B > B"
$ dvc run -d B -o C "echo C > C"
Will create the following graph:
ancestors <--
|
C.dvc -> B.dvc -> A.dvc
| |
| --> descendants
|
------- pipeline ------>
|
v
(weakly connected components)
Args:
stages (list): used to build a graph from
Raises:
OutputDuplicationError: two outputs with the same path
StagePathAsOutputError: stage inside an output directory
OverlappingOutputPathsError: output inside output directory
CyclicGraphError: resulting graph has cycles
"""
import networkx as nx
from dvc.exceptions import StagePathAsOutputError
from ..path_info import PathInfo
from .trie import build_outs_trie
G = nx.DiGraph()
# Use trie to efficiently find overlapping outs and deps
outs_trie = outs_trie or build_outs_trie(stages)
for stage in stages:
out = outs_trie.shortest_prefix(PathInfo(stage.path).parts).value
if out:
raise StagePathAsOutputError(stage, str(out))
# Building graph
G.add_nodes_from(stages)
for stage in stages:
for dep in stage.deps:
if dep.path_info is None:
continue
dep_key = dep.path_info.parts
overlapping = [n.value for n in outs_trie.prefixes(dep_key)]
if outs_trie.has_subtrie(dep_key):
overlapping.extend(outs_trie.values(prefix=dep_key))
G.add_edges_from((stage, out.stage) for out in overlapping)
check_acyclic(G)
return G
# NOTE: using stage graph instead of just list of stages to make sure that it
# has already passed all the sanity checks like cycles/overlapping outputs and
# so on.
def build_outs_graph(graph, outs_trie):
import networkx as nx
G = nx.DiGraph()
G.add_nodes_from(outs_trie.values())
for stage in graph.nodes():
for dep in stage.deps:
dep_key = dep.path_info.parts
overlapping = [n.value for n in outs_trie.prefixes(dep_key)]
if outs_trie.has_subtrie(dep_key):
overlapping.extend(outs_trie.values(prefix=dep_key))
for from_out in stage.outs:
G.add_edges_from((from_out, out) for out in overlapping)
return G
| [
"noreply@github.com"
] | kgritesh.noreply@github.com |
c8c056feea76407baa71f0ffcabeac65336d474b | ca564f1873e4547ad41bd2342530cd790945a184 | /main.py | a1f4fd1439d7138b3f36c634817f4cca18fec464 | [
"Apache-2.0"
] | permissive | absolutarin/opnshft | 9af3854a2589d1d119f1c3c04170eb5bcf1e1bac | b76ba5956d6be176b1054a2c06aa339db5f35152 | refs/heads/master | 2021-01-23T08:20:13.231674 | 2017-03-28T19:03:38 | 2017-03-28T19:03:38 | 86,494,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #Test python program to generate hello world 4 times
def hey():
for i in range(4):
print ("Hello World")
if __name__=='__main__':
hey()
| [
"quattro@QuattroS4-Mac.local"
] | quattro@QuattroS4-Mac.local |
1dd5adc4a90e7b50ff0185e05af4fc3547f8b7ea | 9c95fb5c8c37d808f3fb926128786817a36f9aa1 | /app/controller/Cert.py | 69589fe7f175259bb26820b08a93f7a3a8619f17 | [] | no_license | Sprizs/name.sb | d53fd6f4e8a9ec0f08095d37a3f6639c52bd9287 | 2da05fd114f09cddd0a631329414bea670f78c7a | refs/heads/master | 2020-03-27T01:40:53.427732 | 2018-08-22T16:07:43 | 2018-08-22T16:07:43 | 145,732,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,871 | py | from flask import request, Blueprint, session, jsonify
from sqlalchemy import desc
from app.model import Challenges, Certificates, Domain
from app import db
from app.utils import json_resp_only, login_required
from datetime import datetime, timedelta
import base64, json, requests, OpenSSL, binascii, hashlib, time, idna
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.hazmat.backends import default_backend
KEY1 = '-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDPgytXhv2mp4JA' \
'\nABX9imTsRpX7V/TeB3ttoQlJ6Zaik3CYZc0uJjnTQEENVQvaPRLFO3wuSWbLFc8Z\nY8fTGzxuYRcRc+4vB0' \
'+e2n13r6x5xLIAjVdzzQdDcJDqBbfC8gEMCXX5MJ2/IsVI\nMrtUnr6eSQH1BTUsE94eAO6IdzH3j75BMa2iS21nmUjoyR0bdy' \
'+8qrTVn0DF+xe4\nEH5qLNLeJdOzA/uJg9Lhtp62PvQZXaD5HP8RXxnTi81OiLatswFQP8Lw2cCAn+3c' \
'\nvsL2AXjw14HtboPglGVNArynInMCFwd2TXxv4j8ZipDiosVbnXr1yfKANFHhrRIU\nCfwPltCPAgMBAAECggEAN3LtWe4QST' \
'/pZgCf36fjX99cpFTUcZ++M4UcXku0nKyZ\nIZ/SO8qrGO/Kci2PhTlckqdaf2PNu+aP+FDZTGeytivrZhZ8RsTFWcU4UYr3o3IT' \
'\nvmIGREM89aBWmLH+cHEKJpVAmN2MyU4ZOTmVJP9mIBWSGE7T7ntAlvPYyU5QY73h\nriDYPnDo8+xmFMwkiMDxqFRw24ycs+BbCP' \
'/1Q6j3zv8503t7837M0athzuYOUPpp\nlQoAr0r1l4iEjzCqupPI5HBbhn+McrnfOXg328F3J+6V/rdp7M+lcW/8dMZ105TW' \
'\nm9y3Wrplj3Zv7atpzeeJeG4vWTHkKq/Uxon/YfGEQQKBgQD4FdTdsHXn0SCcoiCS\ndCW3QJD3q11XjG5cTXC5cZPbrBADvqXMZ' \
'/dur00JVzGPM1OjWgroaXaGeQg3A1sO\n5m23Ptmu3K+bzRcm5Ulf0nWiN30WTZHrhkPfPCZY7N4HqahDK5y7BCbqjnf5vHM' \
'/\nyOGN98VCtb7oFVY5ExxCCT/19QKBgQDWIfhCrnR2qihM1CImAaOmbUahNuzE2MJt' \
'\nZGHP6U2ZozA7CijdKkeBE4j948psZbxqoBIfLJd1fsuvNTuLUAFKpkF5X283fyp9\nAaQlrs' \
'/6I5NJocVQIkX3I5w3N7XSOuw4aUhGDGSq0u/7jDuTPKP14XzpsHpdGdFM\n7F0Xhp5V8wKBgAQmGVE7yjz' \
'+OlVkQLcySg8ufT4nF4CHULEqemAfjiF2Vy442fz5\nICIxvFATrTh/2z44G0aXvOuyynhhDfzJzbvqySkrd6RbYa' \
'+81eVMV7tGwkjFM1OF\nA02Qa/PAwlXOeInnCM/32c7CYy9B/4tpiJwfMKVU9MRc1vxNXYOdM/yhAoGADHOv' \
'\nPxlr9laQv176mWExBgWGvOs3u36rV7clpPR5Kbz+mgBOPgYuYEgliDYN2F3WJhEm' \
'\n3J84M4HrEEY1LzW4zYF7fzZYfk6rxtcol3Rh7bbR4s9AbReBIAz3EZLwxMfeYq1k\noYYo+HIJuIQAFuDI3Ax/ugskInPU4vc' \
'/tpWCcZcCgYEAzrrAL5Ch+5V82/FwXeyC\nIq2GI3CtdBv0fFPi6RFXFw33hgHoXHw2kJW5E3gIFSBEyy98oTzlY7zwHLV9PKAE' \
'\ncwUtKYWwlgC5iZ5Nk5Iv+pnDF4oITKDhAss3/6z7fmV6f8SO656nlONm5Rtc0j3p\njdY1t7opcJQWF+YYDasbMok=\n-----END ' \
'PRIVATE KEY-----\n '
DIRECTORY_URL = "https://acme-staging.api.letsencrypt.org/directory"
STAGING_DIRECTORY_URL = "https://acme-staging-v02.api.letsencrypt.org/directory"
PRODUCTION_DIRECTORY_URL = "https://acme-v02.api.letsencrypt.org/directory"
BITS = 2048
bp = Blueprint('cert', __name__)
@bp.route('/get_certificate', methods=['POST'])
@login_required
@json_resp_only
def request_certificate():
try:
domain = request.form.get('domain', None)
if domain is not None:
domain = domain.strip()
is_ok = Domain.query.filter_by(name=domain).filter_by(belongs=session['username']).first()
if is_ok is None:
return jsonify({'error': 'domain non exist!'}), 404
if not is_ok.validated:
return jsonify({'error': 'domain not validated'}), 404
latest_cert = Certificates.query.filter_by(domain=domain).order_by(desc(Certificates.create_time)).first()
if latest_cert is not None and datetime.now() - latest_cert.create_time < timedelta(days=60):
return jsonify({'error': 'no reason to issue new certificate, validity over 60 days'}), 429
privkey = request.form.get('privkey', None) # PEM encoded
if latest_cert is not None and privkey is None:
privkey = latest_cert.certificate_key
client = Client(Account(KEY1, 'hrx@bupt.moe'))
client.account_register()
# IDN encode
domain = idna.encode(domain).decode()
cert, cert_key = client.obtain_certificate([domain, "www." + domain], privkey)
cert_now = Certificates(domain, cert, cert_key)
db.session.add(cert_now)
db.session.commit()
return jsonify({}), 200
except ValueError as e:
return jsonify({'error': str(e)}), 403
class Account(object):
def __init__(self, key, email):
self.key = key
self.email = email
class Client(object):
UA = "Cat.net/acme-client"
DIRECTORY_URL = PRODUCTION_DIRECTORY_URL
TIMEOUT = 5
DIGEST_METHOD = 'sha256'
ACME_AUTH_STATUS_MAX_CHECK = 5
ACME_AUTH_STATUS_WAIT = 5
def __init__(self, account: Account):
if account is None:
raise RuntimeError('Account can not be None')
self.account = account
try:
resp = requests.get(self.DIRECTORY_URL, timeout=self.TIMEOUT, headers={'User-Agent': self.UA})
if resp.status_code not in [200, 201]:
raise ValueError('get endpoints error')
endpoints = resp.json()
self.ACME_GET_NONCE_URL = endpoints['newNonce']
self.ACME_TOS_URL = endpoints['meta']['termsOfService']
self.ACME_KEY_CHANGE_URL = endpoints['keyChange']
self.ACME_NEW_ACCOUNT_URL = endpoints['newAccount']
self.ACME_NEW_ORDER_URL = endpoints['newOrder']
self.ACME_REVOKE_CERT_URL = endpoints['revokeCert']
self.keyid = None
except:
exit(1)
@staticmethod
def _b64(data: bytes or str) -> str:
if isinstance(data, str):
data = data.encode()
return base64.urlsafe_b64encode(data).rstrip(b'=').decode()
@staticmethod
def stringfy_items(payload):
if isinstance(payload, str):
return payload
for k, v in payload.items():
if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, bytes):
v = v.decode('utf-8')
payload[k] = v
return payload
@staticmethod
def create_key(key_type=OpenSSL.crypto.TYPE_RSA, size=2048) -> bytes:
key = OpenSSL.crypto.PKey()
key.generate_key(key_type, size)
private_key = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
return private_key
def get_keyauthorization(self, dns_token):
acme_header_jwk_json = json.dumps(self.get_jws_protected_header('GET_THUMBPRINT')['jwk'], sort_keys=True
, separators=(',', ':'))
acme_thumbprint = self._b64(hashlib.sha256(acme_header_jwk_json.encode('utf-8')).digest())
acme_keyauthorization = "%s.%s" % (dns_token, acme_thumbprint)
acme_keyauthorization_base64 = self._b64(hashlib.sha256(acme_keyauthorization.encode("utf-8")).digest())
return acme_keyauthorization, acme_keyauthorization_base64
def get_nonce(self):
resp = requests.get(self.ACME_GET_NONCE_URL, timeout=self.TIMEOUT, headers={'User-Agent': self.UA})
return resp.headers['Replay-Nonce']
def get_jws_protected_header(self, url):
header = {"alg": "RS256", "nonce": self.get_nonce(), "url": url}
if url in [self.ACME_NEW_ACCOUNT_URL, self.ACME_REVOKE_CERT_URL, 'GET_THUMBPRINT']:
privkey = load_pem_private_key(self.account.key.encode(), password=None, backend=default_backend())
public_key_public_numbers = privkey.public_key().public_numbers()
exponent = "{0:x}".format(public_key_public_numbers.e)
exponent = "0{0}".format(exponent) if len(exponent) % 2 else exponent
modulus = "{0:x}".format(public_key_public_numbers.n)
jwk = {"kty": "RSA", "e": self._b64(binascii.unhexlify(exponent)),
"n": self._b64(binascii.unhexlify(modulus))}
header["jwk"] = jwk
else:
header["kid"] = self.keyid
return header
def _sign_message(self, message):
pk = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, self.account.key.encode())
return OpenSSL.crypto.sign(pk, message.encode('utf-8'), self.DIGEST_METHOD)
def _send_signed_request(self, url, payload):
headers = {'User-Agent': self.UA}
if payload in ['GET_Z_CHALLENGE', 'DOWNLOAD_Z_CERTIFICATE']:
resp = requests.get(url, timeout=self.TIMEOUT, headers=headers)
else:
payload_base64 = self._b64(json.dumps(payload).encode())
protected_base64 = self._b64(json.dumps(self.get_jws_protected_header(url)).encode())
signature_base64 = self._b64(self._sign_message("{0}.{1}".format(protected_base64, payload_base64)))
data = json.dumps({"protected": protected_base64, "payload": payload_base64, "signature": signature_base64})
headers.update({'Content-Type': 'application/jose+json'})
resp = requests.post(url, data=data, timeout=self.TIMEOUT, headers=headers)
return resp
def account_register(self):
payload = {"termsOfServiceAgreed": True, "contact": ["mailto:%s" % self.account.email]}
resp = self._send_signed_request(self.ACME_NEW_ACCOUNT_URL, payload)
setattr(self, 'keyid', resp.headers['Location'])
return resp
def new_issuance(self, domains):
ids = []
for x in domains:
ids.append({"type": "dns", "value": x})
payload = {"identifiers": ids}
url = self.ACME_NEW_ORDER_URL
resp_raw = self._send_signed_request(url, payload)
if resp_raw.status_code != 201:
raise ValueError('error on create new_issuance')
resp = resp_raw.json()
return resp['finalize'], resp['authorizations']
def get_ids_authorization(self, url):
headers = {'User-Agent': self.UA}
resp_raw = requests.get(url, timeout=self.TIMEOUT, headers=headers)
if resp_raw.status_code not in [200, 201]:
raise ValueError('get_ids_authorization error')
resp = resp_raw.json()
ret = {'domain': resp['identifier']['value']}
for x in resp['challenges']:
if x['type'] == "dns-01":
dns_challenge = x
ret['dns_token'] = dns_challenge['token']
ret['dns_challenge_url'] = dns_challenge['url']
ret['url'] = url
return ret
def check_authorization_status(self, authorization_url):
time.sleep(self.ACME_AUTH_STATUS_WAIT)
check_t = 0
while True:
headers = {'User-Agent': self.UA}
check_resp = requests.get(authorization_url, timeout=self.TIMEOUT, headers=headers)
auth_status = check_resp.json()['status']
check_t += 1
if check_t == self.ACME_AUTH_STATUS_MAX_CHECK:
raise StopIteration('max check reached')
if auth_status in ["pending", "valid"]:
break
else:
time.sleep(self.ACME_AUTH_STATUS_WAIT)
def respond_to_challenge(self, acme_keyauth, dns_challenge_url):
payload = {'keyAuthorzation': "%s" % acme_keyauth}
resp_raw = self._send_signed_request(dns_challenge_url, payload)
return resp_raw
def send_csr(self, finalize_url, domains, privkey_pem: str):
x509_req = OpenSSL.crypto.X509Req()
x509_req.get_subject().CN = domains[0]
SAN = ', '.join('DNS:' + x for x in domains).encode('utf-8')
x509_req.add_extensions([OpenSSL.crypto.X509Extension('subjectAltName'.encode('utf-8'),
critical=False, value=SAN)])
privkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey_pem.encode())
x509_req.set_pubkey(privkey)
x509_req.set_version(2)
x509_req.sign(privkey, self.DIGEST_METHOD)
csr = OpenSSL.crypto.dump_certificate_request(OpenSSL.crypto.FILETYPE_ASN1, x509_req)
resp_raw = self._send_signed_request(finalize_url, {'csr': self._b64(csr)})
if resp_raw.status_code not in [200, 201]:
raise ValueError('error in sending csr, return code:%s body:%s' % (resp_raw.status_code, resp_raw.text))
resp = resp_raw.json()
return resp['certificate']
def download_certificate(self, certificate_url):
resp_raw = self._send_signed_request(certificate_url, payload='DOWNLOAD_Z_CERTIFICATE')
if resp_raw.status_code not in [200, 201]:
raise ValueError('error fetching certificate: code=%d resp=%s' % (resp_raw.status_code, resp_raw.content))
pem_certificate = resp_raw.content.decode('utf-8')
return pem_certificate
def obtain_certificate(self, domains: list, certificate_privkey: str = None):
if len(domains) == 0:
return
print("Requested Domain Issue:%s" % str(domains))
finalize_url, authorizations = self.new_issuance(domains)
dns_delete = []
responders = []
for url in authorizations:
ids_auth = self.get_ids_authorization(url)
authorization_url = ids_auth['url']
dns_name = ids_auth['domain']
dns_name_idn = idna.encode(dns_name).decode()
dns_token = ids_auth['dns_token']
dns_challenge_url = ids_auth['dns_challenge_url']
acme_keyauthorization, domain_dns_value = self.get_keyauthorization(dns_token)
new_challenge = Challenges(dns_name_idn, domain_dns_value)
db.session.add(new_challenge)
dns_delete.append({'dns_name': dns_name_idn, 'value': domain_dns_value})
responders.append({
'authorization_url': authorization_url,
'acme_keyauthorization': acme_keyauthorization,
'dns_challenge_url': dns_challenge_url
})
db.session.commit()
for x in responders:
self.check_authorization_status(x['authorization_url'])
self.respond_to_challenge(x['acme_keyauthorization'], x['dns_challenge_url'])
certificate_key = self.create_key().decode() if certificate_privkey is None else certificate_privkey
certificate_url = self.send_csr(finalize_url, domains, certificate_key)
certificate = self.download_certificate(certificate_url)
for x in dns_delete:
records = Challenges.query.filter_by(domain=x['dns_name']).filter_by(txt_record=x['value']).all()
if records is not None:
for y in records:
db.session.delete(y)
db.session.commit()
return certificate, certificate_key
if __name__ == "__main__":
# This is only for testing
acct = Account(KEY1, "hrx@bupt.moe")
cl = Client(acct)
cl.DIRECTORY_URL = STAGING_DIRECTORY_URL
cl.account_register()
cert, cert_key = cl.obtain_certificate(['www.osn.me', 'osn.me'])
cert_now = Certificates("osn.me", cert, cert_key)
db.session.add(cert_now)
db.session.commit()
| [
"42616156+Sprizs@users.noreply.github.com"
] | 42616156+Sprizs@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.