content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import torch
import torch.nn as nn
import numpy as np
from ..utils import export
from torch.nn import functional as F
@export
@export
@export
@export
@export
@export
I = Id()
@export
@export
@export
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11485,
26791,
1330,
10784,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
198,
31,
39344,
198,
198,
31,
39344,
198,
198,
... | 2.759494 | 79 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.loadUserHome, name='loadUsersHome'),
url(r'^viewProfile', views.viewProfile, name='viewProfile'),
url(r'^createUser', views.createUser, name='createUser'),
url(r'^submitUserCreation', views.submitUserCreation, name='submitUserCreation'),
url(r'^modifyUsers',views.modifyUsers,name="modifyUsers"),
url(r'^fetchUserDetails',views.fetchUserDetails,name="fetchUserDetails"),
url(r'^saveModifications',views.saveModifications,name="saveModifications"),
url(r'^deleteUser',views.deleteUser,name="deleteUser"),
url(r'^submitUserDeletion',views.submitUserDeletion,name="submitUserDeletion"),
url(r'^createNewACL$',views.createNewACL,name="createNewACL"),
url(r'^createACLFunc$',views.createACLFunc,name="createACLFunc"),
url(r'^deleteACL$',views.deleteACL,name="deleteACL"),
url(r'^deleteACLFunc$',views.deleteACLFunc,name="deleteACLFunc"),
url(r'^modifyACL$',views.modifyACL,name="modifyACL"),
url(r'^fetchACLDetails$',views.fetchACLDetails,name="fetchACLDetails"),
url(r'^submitACLModifications$',views.submitACLModifications,name="submitACLModifications"),
url(r'^changeUserACL$',views.changeUserACL,name="changeUserACL"),
url(r'^changeACLFunc$',views.changeACLFunc,name="changeACLFunc"),
url(r'^resellerCenter$',views.resellerCenter,name="resellerCenter"),
url(r'^saveResellerChanges$',views.saveResellerChanges,name="saveResellerChanges"),
url(r'^apiAccess$', views.apiAccess, name="apiAccess"),
url(r'^saveChangesAPIAccess$', views.saveChangesAPIAccess, name="saveChangesAPIAccess"),
url(r'^listUsers$', views.listUsers, name="listUsers"),
url(r'^fetchTableUsers$', views.fetchTableUsers, name="fetchTableUsers"),
url(r'^controlUserState$', views.controlUserState, name="controlUserState"),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
628,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
5009,
13,
2220,
12982,
16060,
11,
1438,
11639,
2220,
14490,... | 2.629888 | 716 |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import hashlib
import json
import logging
from userservice.user import get_original_user, get_override_user
logger = logging.getLogger(__name__)
| [
2,
15069,
33448,
33436,
12,
2043,
11,
2059,
286,
2669,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
11748,
12234,
8019,
198,
11748,
33918,
198,
11748,
18931,
198,
6738,
2985,
712,
501,
13,
7220,
133... | 3.263889 | 72 |
import numpy as np
import random
import copy
from utils import *
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
4866,
198,
6738,
3384,
4487,
1330,
1635,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
... | 1.635135 | 74 |
import pytest
from Application import application
@pytest.fixture | [
11748,
12972,
9288,
198,
6738,
15678,
1330,
3586,
198,
198,
31,
9078,
9288,
13,
69,
9602
] | 4.125 | 16 |
#!/usr/bin/python
import numpy
from textblob import Word
from cogpheno.apps.assessments.models import BehavioralTrait
from cogpheno.apps.assessments.views import
for term in terms:
make_new_concept(term)
from textblob.wordnet import Synset
# Add parts of speech
for behavior in BehavioralTrait.objects.all():
if behavior.wordnet_synset:
synset = Synset(behavior.wordnet_synset)
behavior.pos = synset.pos()
behavior.save()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
299,
32152,
198,
6738,
2420,
2436,
672,
1330,
9678,
198,
198,
6738,
43072,
31024,
78,
13,
18211,
13,
562,
408,
902,
13,
27530,
1330,
38483,
51,
12907,
198,
6738,
43072,
31024,
78,
13,
... | 2.606557 | 183 |
# This problem was recently asked by Uber:
# Given a number of integers, combine them so it would create the largest number.
from itertools import permutations
print(largestNum([17, 7, 2, 45, 72]))
# 77245217
| [
2,
220,
770,
1917,
373,
2904,
1965,
416,
12024,
25,
198,
198,
2,
11259,
257,
1271,
286,
37014,
11,
12082,
606,
523,
340,
561,
2251,
262,
4387,
1271,
13,
198,
6738,
340,
861,
10141,
1330,
9943,
32855,
628,
198,
198,
4798,
7,
28209,
... | 3.396825 | 63 |
# -*- coding: utf-8 -*-
from . import database
from . import sitecfg
from . import sleeper
from . import loot_prices
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
764,
1330,
6831,
198,
6738,
764,
1330,
2524,
37581,
198,
6738,
764,
1330,
46594,
198,
6738,
764,
1330,
16702,
62,
1050,
1063,
628,
628,
198
] | 2.97561 | 41 |
print('-' * 28)
print('{:^28}'.format('BANCO DANIMONEY'))
print('-' * 28)
valor = int(input('Quanto deseja sacar? R$'))
total = valor
ced50 = total // 50
total %= 50
ced20 = total // 20
total %= 20
ced10 = total // 10
total %= 10
ced1 = total // 1
total %= 1
if ced50 > 0:
print(f'Total de {ced50} cédulas de R$50')
if ced20 > 0:
print(f'Total de {ced20} cédulas de R$20')
if ced10 > 0:
print(f'Total de {ced10} cédulas de R$10')
if ced1 > 0:
print(f'Total de {ced1} cédulas de R$1')
| [
4798,
10786,
19355,
1635,
2579,
8,
201,
198,
4798,
10786,
90,
25,
61,
2078,
92,
4458,
18982,
10786,
33,
1565,
8220,
360,
1565,
3955,
48399,
6,
4008,
201,
198,
4798,
10786,
19355,
1635,
2579,
8,
201,
198,
2100,
273,
796,
493,
7,
1541... | 2.06746 | 252 |
from rest_framework.test import APITestCase
from api.models import Favourite, Category
from ..mocks import jellof_rice
| [
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2043,
395,
20448,
198,
6738,
40391,
13,
27530,
1330,
376,
29023,
578,
11,
21743,
198,
6738,
11485,
76,
3320,
1330,
474,
695,
1659,
62,
20970,
628
] | 3.529412 | 34 |
import os
import warnings
import torch
import pydicom
import numpy as np
from natsort import natsorted
from torch.utils.data import Dataset
class Patient:
"""
3D-IRCADb-01 patient.
Parameters
----------
path : str
Path to patient records.
tissue : str, optional
Type of tissue mask to load.
References
----------
https://www.ircad.fr/research/3d-ircadb-01/
"""
def _list_dicoms(self):
"""
Get dicom paths in proper order.
Returns
-------
dicoms : list
List of dicom paths for the patient.
"""
dicompath = os.path.join(self.path, 'PATIENT_DICOM')
dicoms = [os.path.join(dicompath, img) for img in os.listdir(dicompath)]
# os sorts things lexicographically
dicoms = natsorted(dicoms)
return dicoms
def _list_masks(self):
"""
Get mask paths for a particular tissue.
If no tissue is specified, these paths will not be set.
Returns
-------
masks : list
List of mask paths for a tissue.
"""
if self.tissue:
maskpath = os.path.join(self.path, 'MASKS_DICOM')
maskpath = os.path.join(maskpath, self.tissue)
else:
maskpath = os.path.join(self.path, 'LABELLED_DICOM')
masks = [os.path.join(maskpath, img) for img in os.listdir(maskpath)]
# os sorts things lexicographically
masks = natsorted(masks)
return masks
def load_3d(self):
"""
Load 3D pixel array for the patient.
Returns
-------
arry : np.ndarray
3D pixel array for patient's CT scan.
"""
imgs = [pydicom.read_file(dicom) for dicom in self.dicoms]
arry = np.stack([img.pixel_array for img in imgs])
tensor = torch.tensor(arry)
return tensor
def load_slices(self):
"""
Load patient CT scan slices.
Returns
-------
slices : list of np.arrays
All 2D CT scans for a patient.
"""
dicoms = [pydicom.read_file(dicom) for dicom in self.dicoms]
slices = [dicom.pixel_array for dicom in dicoms]
slices = torch.tensor(slices)
return slices
def load_masks(self):
"""
Load all masks for a patient.
Returns
-------
masks : list of np.arrays or torch.tensors
All 2D segmentation masks of a given tissue for a patient.
Note: If using the original masks, they remain in np.arrays.
If binary masks are used, they are returned as torch.tensors.
"""
masks = [pydicom.read_file(mask) for mask in self.masks]
masks = [mask.pixel_array for mask in masks]
masks = torch.tensor(masks)
if self.binarymask:
# Not all masks in IRCAD are binary
if not self.tissue:
raise ValueError(f'Binary masks are not supported for multiple masks!')
masks = [torch.tensor(mask) for mask in masks]
ones = torch.ones_like(masks[0])
masks = [torch.where(mask > 0, ones, mask) for mask in masks]
return masks
def load_masks3D(self):
"""
Load all masks for a patient.
Returns
-------
masks : list of np.arrays or torch.tensors
All 2D segmentation masks of a given tissue for a patient.
Note: If using the original masks, they remain in np.arrays.
If binary masks are used, they are returned as torch.tensors.
"""
masks = [pydicom.read_file(mask) for mask in self.masks]
masks = np.stack([mask.pixel_array for mask in masks])
masks = torch.tensor(masks)
if self.binarymask:
# Not all masks in IRCAD are binary
if not self.tissue:
raise ValueError(f'Binary masks are not supported for multiple masks!')
masks = [torch.tensor(mask) for mask in masks]
ones = torch.ones_like(masks[0])
masks = np.stack([torch.where(mask > 0, ones, mask) for mask in masks])
return masks
class IRCAD:
"""
3D-IRCADb-01 dataset.
Parameters
----------
path : str
Path to IRCAD dataset.
References
----------
https://www.ircad.fr/research/3d-ircadb-01/
"""
def _list_patients(self):
"""
Get patient paths in proper order.
Returns
-------
patients : list
List of patient paths for the dataset.
"""
patients = [os.path.join(self.path, patient) for patient in os.listdir(self.path)]
# os sorts things lexicographically
patients = natsorted(patients)
return patients
class IRCAD3D(Dataset):
"""
3D IRCAD dataset.
Parameters
----------
path : str
Path to IRCAD dataset.
References
----------
https://www.ircad.fr/research/3d-ircadb-01/
"""
class IRCAD2D(Dataset):
"""
2D IRCAD dataset.
Instance of all patients' 2D slices of dicom images. Labels are masks for
either a single tissue, or, if no tissue is specified, for all tissues.
Parameters
----------
path : str
Path to IRCAD dataset.
tissue : str, optional
Type of tissue to segment. Options found in IRCAD `/MASKS_DICOM`.
transform : Pytorch transforms, optional
Pytorch transfrom for images.
References
----------
https://www.ircad.fr/research/3d-ircadb-01/
"""
def _load_slices(self):
"""
Loads all 2D CT slices in memory.
Returns
-------
all_slices : list of np.ndarrays
All 2D CT scans in natural order for each patient.
"""
all_slices = []
for path in self.ircad.patients:
patient = Patient(path)
all_slices.extend(patient.load_slices())
return all_slices
def _load_masks(self):
"""
Loads all 2D segmentation masks in memory.
If a given tissue is specified, then only that tissue mask will
be loaded. If no tissue is specified, then all masks will be loaded.
Returns
-------
all_labels : list of np.ndarrays
All 2D segmentation masks of a given tissue for each patient.
"""
all_masks = []
for path in self.ircad.patients:
try:
patient = Patient(path, self.tissue, self.binarymask)
all_masks.extend(patient.load_masks())
except:
FileNotFoundError('Patient {path} does not have masks for {self.tissue}')
pass
return all_masks
| [
11748,
28686,
198,
11748,
14601,
198,
198,
11748,
28034,
198,
11748,
279,
5173,
291,
296,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
299,
1381,
419,
1330,
299,
1381,
9741,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292... | 2.212378 | 3,070 |
from jina.peapods.pods.factory import PodFactory
from jina.peapods.pods import Pod
from jina.peapods.pods.compoundpod import CompoundPod
from jina.parsers import set_pod_parser
| [
6738,
474,
1437,
13,
431,
499,
12978,
13,
79,
12978,
13,
69,
9548,
1330,
17437,
22810,
198,
6738,
474,
1437,
13,
431,
499,
12978,
13,
79,
12978,
1330,
17437,
198,
6738,
474,
1437,
13,
431,
499,
12978,
13,
79,
12978,
13,
5589,
633,
... | 2.825397 | 63 |
from pyapprox.nataf_transformation import *
from pyapprox.probability_measure_sampling import rejection_sampling
from scipy.stats import norm as normal_rv
from scipy.stats import beta as beta_rv
from scipy.stats import gamma as gamma_rv
from functools import partial
import unittest
from pyapprox.utilities import get_tensor_product_quadrature_rule
if __name__=='__main__':
nataf_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestNatafTransformation)
unittest.TextTestRunner(verbosity=2).run(nataf_test_suite)
| [
6738,
12972,
1324,
13907,
13,
77,
1045,
69,
62,
7645,
1161,
1330,
1635,
198,
6738,
12972,
1324,
13907,
13,
1676,
65,
1799,
62,
1326,
5015,
62,
37687,
11347,
1330,
17927,
62,
37687,
11347,
198,
6738,
629,
541,
88,
13,
34242,
1330,
2593... | 2.729064 | 203 |
'''
Web Class Helper Speed Testing Module 2.0.1
This source code file is under MIT License.
Copyright (c) 2022 Class Tools Develop Team
Contributors: jsh-jsh ren-yc
'''
import psutil
import sys
from tkinter import *
from tkinter.messagebox import showerror
from time import sleep
root = Tk()
root.geometry('200x100')
root.title('Speed Test')
root.resizable(width = False, height = False)
Label(width = 100, height = 50, name = 'speed_label', text = '-.- KB / S', font = ('Hack', 20, 'bold')).pack()
root.after(1000, lambda :ui_updata(speed_test))
root.mainloop()
| [
7061,
6,
198,
13908,
5016,
5053,
525,
8729,
23983,
19937,
362,
13,
15,
13,
16,
198,
1212,
2723,
2438,
2393,
318,
739,
17168,
13789,
13,
198,
15269,
357,
66,
8,
33160,
5016,
20003,
6013,
4816,
198,
37146,
669,
25,
474,
1477,
12,
73,
... | 3.048649 | 185 |
import logging
import random
from types import CodeType, new_class
import uuid
import os
from flask import Blueprint, jsonify, session, request, current_app
from datetime import datetime, timedelta
from decimal import Decimal
from sqlalchemy.sql.expression import null
from app.models.model import Class, StuCls, Student, Teacher, Log, User, ClsWd
from app.utils.core import db
from sqlalchemy import or_, and_
from app.api.tree import Tree
from app.api.api_stu_cls import add_stu_cls, delete_stu_cls
from app.api.api_log import add_log
from app.utils.code import ResponseCode
from app.utils.response import ResMsg
from app.utils.util import route, Redis, CaptchaTool, PhoneTool
from app.utils.auth import Auth, login_required
from app.api.report import excel_write, word_write, pdf_write
from app.api.wx_login_or_register import get_access_code, get_wx_user_info, wx_login_or_register
from app.api.phone_login_or_register import SendSms, phone_login_or_register
from app.celery import add, flask_app_context
bp = Blueprint("class", __name__, url_prefix='/class/')
logger = logging.getLogger(__name__)
@route(bp, '/list', methods=["GET"])
@login_required
def class_list():
"""
获取班级列表
:return:
"""
res = ResMsg()
obj = request.args
class_name = obj.get("name") or ''
# student = obj.get("student") or ''
teacher_id = obj.get("teacher_id") or None
weekday = obj.get("weekday") or None
status = obj.get("status") or None
page_index = int(obj.get("page"))
page_size = int(obj.get("count"))
# 找出符合星期的所有数据
n_cls_wd = db.session.query(ClsWd).filter(or_(ClsWd.weekday == weekday, weekday == None)).all()
all_cls_wd = db.session.query(ClsWd).all()
db_class_id = db.session.query(Class).all()
fit_ids = []
if weekday == None:
for cls in db_class_id:
fit_ids.append(cls.id)
else:
if len(n_cls_wd) > 0:
for cw in n_cls_wd:
fit_ids.append(cw.class_id)
filters = {
or_(Class.class_name.like('%' + class_name + '%'), class_name == None),
or_(Class.teacher_id == teacher_id, teacher_id == None),
or_(Class.id.in_(fit_ids), fit_ids == []),
or_(Class.status == status, status == None)
}
db_class = db.session.query(Class, Teacher).\
outerjoin(Teacher, Class.teacher_id == Teacher.id).\
filter(*filters).order_by(Class.id).\
limit(page_size).offset((page_index-1)*page_size).all()
total_count = db.session.query(Class, Teacher).\
outerjoin(Teacher, Class.teacher_id == Teacher.id).\
filter(*filters).count()
all_student = db.session.query(Student).all()
# filter_student = db.session.query(Student).filter(or_(Student.name.like('%' + student + '%'), student == None)).all()
# current_app.logger.debug(db.session.query(Class, Teacher).\
# outerjoin(Teacher, Class.teacher_id == Teacher.id).\
# filter(*filters))
# total_count = len(db_class)
class_list = []
for o in db_class:
# 处理班级下面的学员信息
student_id_arr = []
n_stu_cls = db.session.query(StuCls).filter(StuCls.class_id == o[0].id).all()
for stu in n_stu_cls:
student_id_arr.append(str(stu.student_id))
student_list = []
if len(student_id_arr) > 0:
for sid in student_id_arr:
for stu in all_student:
if str(sid) == str(stu.id):
student_list.append(stu)
# 处理班级的星期信息
weekdayArr = []
for cw in all_cls_wd:
if cw.class_id == o[0].id:
weekdayArr.append(cw.weekday)
class_list.append({
"id": o[0].id,
"class_name": o[0].class_name,
"min_num": o[0].min_num,
"max_num": o[0].max_num,
"now_num": len(student_list),
"teacher_name": o[1].name if o [1] != None else None,
"teacher_id": o[1].id if o [1] != None else None,
"total_hour": o[0].total_hour,
"weekday": weekdayArr,
"begin_time": o[0].begin_time,
"end_time": o[0].end_time,
"classroom": o[0].classroom,
"status": o[0].status,
"target": o[0].target,
"student_id": student_list
})
data = {
"classes": class_list,
"page": page_index,
"count": page_size,
"total": total_count
}
res.update(data=data)
return res.data
@route(bp, '/add', methods=["POST"])
@login_required
def class_add():
"""
新增班级信息
:return:
"""
res = ResMsg()
obj = request.json
n_class = Class()
n_class.class_name = obj["name"]
n_class.target = obj["target"]
# n_class.weekday = obj["weekday"] or None
n_class.begin_time = obj["begin_time"]
n_class.end_time = obj["end_time"]
n_class.min_num = obj["min_num"] or None
n_class.max_num = obj["max_num"] or None
n_class.classroom = obj["classroom"]
n_class.total_hour = obj["total_hour"] or None
n_class.teacher_id = obj["teacher_id"] or None
n_class.status = obj["status"]
n_class.create_time = datetime.now()
n_class.update_time = datetime.now()
try:
db.session.add(n_class)
db.session.flush()
# 处理班级,星期对应数据
if len(obj['weekday']) > 0:
for day in obj['weekday']:
n_cls_wd = ClsWd()
n_cls_wd.class_id = n_class.id
n_cls_wd.weekday = day
db.session.add(n_cls_wd)
db.session.commit()
# 处理班级,学员对应数据
if len(obj["studentArr"]) > 0:
for o in obj["studentArr"]:
add_stu_cls(o["id"], n_class.id)
db.session.commit()
except:
db.session.rollback()
return res.data
@route(bp, '/edit', methods=["POST"])
@login_required
def class_edit():
"""
编辑班级信息
:return:
"""
res = ResMsg()
obj = request.json
n_class = db.session.query(Class).filter(Class.id == obj["id"]).first()
n_class.class_name = obj["name"]
n_class.target = obj["target"]
# n_class.weekday = obj["weekday"] or None
n_class.begin_time = obj["begin_time"]
n_class.end_time = obj["end_time"]
n_class.min_num = obj["min_num"] or None
n_class.max_num = obj["max_num"] or None
n_class.classroom = obj["classroom"]
n_class.total_hour = obj["total_hour"] or None
n_class.teacher_id = obj["teacher_id"] or None
n_class.status = obj["status"]
n_class.update_time = datetime.now()
studentIdArr = []
if len(obj["studentArr"]) > 0:
for o in obj["studentArr"]:
studentIdArr.append(str(o["id"]))
stuClsIdArr = []
n_stu_cls = db.session.query(StuCls).filter(StuCls.class_id == n_class.id).all()
for stu in n_stu_cls:
stuClsIdArr.append(str(stu.student_id))
user = db.session.query(User).filter(User.name == session["user_name"]).first()
try:
db.session.add(n_class)
db.session.commit()
# 处理班级,星期对应数据
if len(obj['weekday']) > 0:
db.session.query(ClsWd).filter(ClsWd.class_id == n_class.id).delete(synchronize_session=False)
db.session.commit()
for day in obj['weekday']:
n_cls_wd = ClsWd()
n_cls_wd.class_id = n_class.id
n_cls_wd.weekday = day
db.session.add(n_cls_wd)
db.session.commit()
# 处理班级,学员对应数据
if len(studentIdArr) > 0:
for sid in studentIdArr:
if sid not in stuClsIdArr:
add_stu_cls(sid, n_class.id)
# 添加日志
add_log(2, user.id, sid, n_class.teacher_id, n_class.id, '将其添加到了班级:' + n_class.class_name + '中')
for ssid in stuClsIdArr:
if ssid not in studentIdArr:
delete_stu_cls(ssid, n_class.id)
# 添加日志
add_log(2, user.id, ssid, n_class.teacher_id, n_class.id, '将其从班级:' + n_class.class_name + '中移除')
except:
db.session.rollback()
return res.data
@route(bp, '/delete', methods=["POST"])
@login_required
def class_delete():
"""
删除班级信息
:return:
"""
res = ResMsg()
obj = request.json
n_class = db.session.query(Class).filter(Class.id == obj["id"]).first()
n_stu_cls = db.session.query(StuCls).filter(StuCls.class_id == obj["id"]).all()
try:
db.session.delete(n_class)
db.session.delete(n_stu_cls)
db.session.commit()
except:
db.session.rollback()
return res.data
@route(bp, '/verify', methods=["POST"])
@login_required
def class_verify():
"""
销课
:return:
"""
res = ResMsg()
obj = request.json
student_ids = obj['studentIds']
hour = int(obj['hour'])
remark = obj['remark']
class_id = obj['classId']
teacher_id = obj['teacherId']
n_class = db.session.query(Class).filter(Class.id == class_id).first()
for id in student_ids:
student = db.session.query(Student).filter(Student.id == id).first()
if student.left_hour < hour:
res.update(code=ResponseCode.Fail, msg='某学生剩余课时已不足以销课,请检查后再操作')
break
for id in student_ids:
student = db.session.query(Student).filter(Student.id == id).first()
student.used_hour += hour
student.left_hour -= hour
student.update_time = datetime.now()
user = db.session.query(User).filter(User.name == session["user_name"]).first()
try:
add_log(3, user.id, id, teacher_id, class_id, '在班级:' + n_class.class_name + ' 消耗课时:' + str(hour) + '课时,销课备注为:' + remark)
db.session.add(student)
db.session.commit()
except:
db.session.rollback()
return res.data
| [
11748,
18931,
198,
11748,
4738,
198,
6738,
3858,
1330,
6127,
6030,
11,
649,
62,
4871,
198,
11748,
334,
27112,
198,
11748,
28686,
198,
6738,
42903,
1330,
39932,
11,
33918,
1958,
11,
6246,
11,
2581,
11,
1459,
62,
1324,
198,
6738,
4818,
... | 1.950315 | 5,072 |
import numpy as np
import matplotlib.pyplot as plt
import gym
#discretize the spaces
pole_theta_space = np.linspace(-0.20943951, 0.20943951, 10)
pole_theta_velocity_space = np.linspace(-4, 4, 10)
cart_position_space = np.linspace(-2.4, 2.4, 10)
cart_velocity_space = np.linspace(-4, 4, 10)
if __name__ == '__main__':
env = gym.make('CartPole-v0')
# model hyperparamters
ALPHA = 0.1
GAMMA = 0.9
EPS = 1.0
#construct state space
states = []
for i in range(len(cart_position_space)+1):
for j in range(len(cart_velocity_space)+1):
for k in range(len(pole_theta_space)+1):
for l in range(len(pole_theta_velocity_space)+1):
states.append((i,j,k,l))
Q1, Q2 = {}, {}
for s in states:
for a in range(2):
Q1[s,a] = 0
Q2[s,a] = 0
number_of_games = 10000
total_rewards = np.zeros(number_of_games)
for i in range(number_of_games):
if i % 1000 == 0:
print('starting game', i)
# cart x position, cart velocity, pole theta, pole velocity
observation = env.reset()
done = False
episode_rewards = 0
while not done:
if i % 500 == 0:
env.render()
s = getState(observation)
rand = np.random.random()
a = maxAction(Q1,Q2,s) if rand < (1-EPS) else env.action_space.sample()
observation_, reward, done, info = env.step(a)
episode_rewards += reward
s_ = getState(observation_)
rand = np.random.random()
if rand <= 0.5:
a_ = maxAction(Q1,Q1,s)
Q1[s,a] = Q1[s,a] + ALPHA*(reward + GAMMA*Q2[s_,a_] - Q1[s,a])
elif rand > 0.5:
a_ = maxAction(Q2,Q2,s)
Q2[s,a] = Q2[s,a] + ALPHA*(reward + GAMMA*Q1[s_,a_] - Q2[s,a])
observation = observation_
EPS -= 2/(number_of_games) if EPS > 0 else 0
total_rewards[i] = episode_rewards
plot_running_avg(total_rewards)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
11550,
198,
198,
2,
15410,
1186,
1096,
262,
9029,
198,
36869,
62,
1169,
8326,
62,
13200,
796,
45941,
13,
21602,
10223,
32590,
15,
... | 1.920074 | 1,076 |
import angr
import sys
"""
Solving reserial - NOT WORKING
flag: EZAV
```
python solve.py
```
"""
if __name__ == "__main__":
main()
| [
11748,
281,
2164,
198,
11748,
25064,
198,
198,
37811,
198,
50,
10890,
581,
48499,
532,
5626,
30936,
2751,
198,
32109,
25,
412,
57,
10116,
198,
198,
15506,
63,
198,
29412,
8494,
13,
9078,
198,
15506,
63,
198,
37811,
628,
198,
198,
361,... | 2.413793 | 58 |
from tkinter import *
from misc.findrobots import findrobots
Application()
| [
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
12747,
13,
19796,
22609,
1747,
1330,
1064,
22609,
1747,
628,
198,
198,
23416,
3419,
198
] | 3.391304 | 23 |
#!/usr/bin/python
import subprocess
import bottle
from bottle import route, static_file, debug, run, get, redirect
from bottle import post, request, template, response
import os, inspect, json, time, sys
import random
from threading import Thread, RLock
#enable bottle debug
debug(True)
# WebApp route path
# get directory of WebApp (bottleJQuery.py's dir)
rootPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
wifis = []
wifis_mutex = RLock()
IMAGEPATH = '/mnt/pictures'
IMAGEURLPREFIX = '/image/'
image_regex = '\\w+_[0-9]+\\.((gif)|(jpg))'
## for w in wifis:
## print w
@route('/')
# return redirect('/setup')
@route('{}<name:re:{}>'.format(IMAGEURLPREFIX, image_regex))
@route('/assets/<filename:re:.*svg>')
@route('/assets/<filename:re:.*woff2>')
@route('/assets/<filename:re:.*>')
#@route('/<filename:re:.*>')
#def html_file(filename):
# print 'root=%s' % rootPath
# return static_file(filename, root=rootPath + '/assets/' )
@route('/setup')
@route('/api/v1/wifis')
@route('/api/v1/delete/<filename:re:{}>'.format(image_regex))
@post('/api/v1/setup')
if __name__ == "__main__":
if len(sys.argv)>1:
rootPath=sys.argv[1]
print bottle.TEMPLATE_PATH
bottle.TEMPLATE_PATH.append(rootPath + "/views")
print bottle.TEMPLATE_PATH
print "using %s as root path" % rootPath
#only start thread in child process
#if is_child():
# thread = Thread(target=wifi_update_thread)
# thread.start()
run(host='0.0.0.0', port=80, reloader=True)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
850,
14681,
198,
11748,
9294,
198,
6738,
9294,
1330,
6339,
11,
9037,
62,
7753,
11,
14257,
11,
1057,
11,
651,
11,
18941,
198,
6738,
9294,
1330,
1281,
11,
2581,
11,
11055,
11,
288... | 2.495082 | 610 |
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
__author__ = 'Sebastian'
from pycloud.pycloud.mongo import Model, ObjectID
import os
################################################################################################################
# Represents a user.
################################################################################################################
| [
2,
509,
15996,
12,
3106,
29704,
540,
10130,
1616,
357,
42,
35,
12,
18839,
1616,
8,
220,
198,
2,
15069,
357,
66,
8,
1853,
33976,
49808,
2059,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
220,
198,
2,
12680,
47466,
3180,
36592,
2389,
... | 3.665094 | 424 |
#
# Copyright (c) 2008-2015 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_file.interfaces.archive module
This module provides a single helper interface to handle archives.
"""
from zope.interface import Interface
__docformat__ = 'restructuredtext'
class IArchiveExtractor(Interface):
"""Archive contents extractor"""
def get_contents(self, data, mode='r'):
"""Get iterator over archive contents
Each iteration is a tuple containing data and file name.
"""
| [
2,
198,
2,
15069,
357,
66,
8,
3648,
12,
4626,
536,
959,
563,
4432,
330,
1279,
83,
2704,
273,
330,
5161,
14856,
400,
283,
13,
3262,
29,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
2... | 3.239437 | 284 |
from inspect import isfunction, signature, ismethod
from types import MethodType
from typing import Callable, List
from bobocep.rules.events.bobo_event import BoboEvent
from bobocep.rules.events.histories.bobo_history import BoboHistory
from bobocep.rules.predicates.bobo_predicate import BoboPredicate
class BoboPredicateCallable(BoboPredicate):
"""A predicate that evaluates using a custom function or method.
:param call: A callable function that is used to evaluate the
predicate. It must have exactly 3 parameters in its
signature and return a bool response.
:type call: Callable
:raises RuntimeError: Callable is not a function or method.
:raises RuntimeError: Callable does not have exactly 3 parameters
in its signature.
"""
PARAMETERS = 3
| [
6738,
10104,
1330,
318,
8818,
11,
9877,
11,
318,
24396,
198,
6738,
3858,
1330,
11789,
6030,
198,
6738,
19720,
1330,
4889,
540,
11,
7343,
198,
198,
6738,
275,
20391,
344,
79,
13,
38785,
13,
31534,
13,
65,
20391,
62,
15596,
1330,
5811,
... | 2.996441 | 281 |
import rotation_matrix
x_apt = 30
y_apt = -7.6
z_apt = 22
ii = -0.5
jj = -0.6
kk = 0.33
last_b = 30.234
rotation_matrix.transf(x_apt, y_apt, z_apt, ii, jj, kk, last_b)
| [
11748,
13179,
62,
6759,
8609,
201,
198,
201,
198,
87,
62,
2373,
796,
1542,
201,
198,
88,
62,
2373,
796,
532,
22,
13,
21,
201,
198,
89,
62,
2373,
796,
2534,
201,
198,
4178,
796,
532,
15,
13,
20,
201,
198,
41098,
796,
532,
15,
1... | 1.759615 | 104 |
#!/usr/bin/python3
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0 OR MIT
import re
import sys
if __name__ == "__main__":
filenames = sys.argv[1:]
checks = [copyright_check(fname) for fname in filenames]
for i in range(len(filenames)):
print(f'Copyright check - {filenames[i]}: ', end='')
print('PASS') if checks[i] else print('FAIL')
if all(checks):
sys.exit(0)
else:
sys.exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
6375,
17168,
198,
11748,
... | 2.331776 | 214 |
from random import randint, sample, choice
import string
from Crypto.Cipher import AES
import struct
import select
import logging
from socket_bytes_producer import SocketBytesProducer
ENCODING = 'utf-8'
LEN_COLUMN_LEN = 6
iv_param = 'This is an IV456'
CRYPTO_INPUT_LEN_UNIT = 16
CIPHER_LEN_UNIT = 16 # aes密文长度为16的倍数
MAX_SALT_LEN = CIPHER_LEN_UNIT * 100 - 1 # 必须为此形式,否则pack数据长度(对CIPHER_LEN_UNIT取模)分布将具备可检测特征
logging.basicConfig(level=logging.INFO)
# 要加密的明文数据,长度必须是16的倍数,在data后添加salt是指满足
if __name__ == '__main__':
sock = SecureSocket('', 'ThisIs SecretKey')
original_data = b'1234567890123456'
print('original_data = ' + str(original_data))
encoded_data = sock._encode_data(original_data)
print('encoded_data = ' + str(encoded_data))
decoded_data = sock._decode_data(encoded_data)
print('decoded_data = ' + str(decoded_data))
| [
198,
6738,
4738,
1330,
43720,
600,
11,
6291,
11,
3572,
198,
11748,
4731,
198,
6738,
36579,
13,
34,
10803,
1330,
34329,
198,
11748,
2878,
198,
11748,
2922,
198,
11748,
18931,
198,
6738,
17802,
62,
33661,
62,
18230,
2189,
1330,
47068,
459... | 1.928412 | 447 |
"""Local Api tests."""
import json
import os
import pytest # type: ignore
import requests_mock # type: ignore
from airzone.localapi import Machine, OperationMode, Speed
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
response_test_path = os.path.join(THIS_DIR, "data/response.json")
@pytest.fixture
def mock_machine():
"""Fixture localapi Machine init with the data/response.json file."""
with requests_mock.Mocker() as mock_resp:
f = open(response_test_path,)
data = json.load(f)
machine_ipaddr = "0.0.0.0"
mock_addr = f"http://{machine_ipaddr}:3000/api/v1/hvac"
mock_resp.post(mock_addr, json=data)
return Machine(machine_ipaddr)
def test_create_machine(mock_machine):
"""Test the creation of a machine and zones with a valid json."""
assert mock_machine.speed == Speed.AUTO
assert mock_machine.operation_mode == OperationMode.COOLING
| [
37811,
14565,
5949,
72,
5254,
526,
15931,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
11748,
12972,
9288,
220,
1303,
2099,
25,
8856,
198,
11748,
7007,
62,
76,
735,
220,
1303,
2099,
25,
8856,
198,
198,
6738,
1633,
11340,
13,
12001,
... | 2.651297 | 347 |
'''Spectral Modelling'''
from __future__ import print_function, division
import numpy as np
import numpy.lib.recfunctions as rf
from mla.spectral import *
from mla.timing import *
import scipy.stats
from mla import tools
class PSinjector(object):
r'''injector of point source'''
def __init__(self, spectrum, mc , signal_time_profile = None , background_time_profile = (0,1)):
r'''initial the injector with a spectum and signal_time_profile. background_time_profile can be generic_profile or the time range.
args:
Spectrum: object inherited from BaseSpectrum.
mc: Monte Carlo simulation set
signal_time_profile(optional):Object inherited from generic_profile.Default is the same as background_time_profile.
background_time_profile(optional):Object inherited from generic_profile.Default is a uniform_profile with time range from 0 to 1.
'''
self.spectrum = spectrum
self.mc = mc
if isinstance(background_time_profile,generic_profile):
self.background_time_profile = background_time_profile
else:
self.background_time_profile = uniform_profile(background_time_profile[0],background_time_profile[1])
if signal_time_profile == None:
self.signal_time_profile = self.background_time_profile
else:
self.signal_time_profile = signal_time_profile
return
def set_backround(self, background ,grl ,background_window = 14):
r'''Setting the background information which will later be used when drawing data as background
args:
background:Background data
grl:The good run list
background_window: The time window(days) that will be used to estimated the background rate and drawn sample from.Default is 14 days
'''
start_time = self.background_time_profile.get_range()[0]
fully_contained = (grl['start'] >= start_time-background_window) &\
(grl['stop'] < start_time)
start_contained = (grl['start'] < start_time-background_window) &\
(grl['stop'] > start_time-background_window)
background_runs = (fully_contained | start_contained)
if not np.any(background_runs):
print("ERROR: No runs found in GRL for calculation of "
"background rates!")
raise RuntimeError
background_grl = grl[background_runs]
# Get the number of events we see from these runs and scale
# it to the number we expect for our search livetime.
n_background = background_grl['events'].sum()
n_background /= background_grl['livetime'].sum()
n_background *= self.background_time_profile.effective_exposure()
self.n_background = n_background
self.background = background
return
def draw_data(self):
r'''Draw data sample
return:
background: background sample
'''
n_background_observed = np.random.poisson(self.n_background)
background = np.random.choice(self.background, n_background_observed).copy()
background['time'] = self.background_time_profile.random(len(background))
return background
def update_spectrum(self, spectrum):
r"""Updating the injection spectrum.
args:
spectrum: Object inherited from BaseSpectrum.
"""
self.spectrum = spectrum
return
def add_background(self, background ,grl):
r''' Add Background data into the injector such that it can also inject background
args:
background: background dataset.
grl: Good run list.
'''
self.background = background
self.background_rate = len(background)/np.sum(grl['livetime'])
def produce_background(self,time_window):
r''' Samples background given the time_window'''
n_background = self.background_rate*time_window
n_background_observed = scipy.stats.poisson.rvs(n_background)
background = np.random.choice(data, n_background_observed)
background['time'] = self.background_time_profile.random(len(background))
return background
def _select_and_weight(self, ra, dec ,sampling_width = np.radians(1)):
r'''Prune the simulation set to only events close to a given source and calculate the
weight for each event. Add the weights as a new column to the simulation set
args:
ra: Right accension in radians
dec: Declination in radians
sampling_width(optional):The width that would be included. Default is 1 degree.
return:
The reduced Monte Carlo set with only events within the sampling width.
'''
assert('ow' in self.mc.dtype.names)
# Pick out only those events that are close in
# declination. We only want to sample from those.
sindec_dist = np.abs(dec-self.mc['trueDec'])
close = sindec_dist < sampling_width
reduced_sim = self.mc[close].copy()
#rescale ow
omega = 2*np.pi * (np.min([np.sin(dec+sampling_width), 1]) -\
np.max([np.sin(dec-sampling_width), -1]))
reduced_sim['ow'] /= omega
#append weight field but only fill it with zero
if "weight" not in reduced_sim.dtype.names:
reduced_sim = rf.append_fields(reduced_sim.copy(),
'weight',
np.zeros(len(reduced_sim)),
dtypes=np.float32)
return reduced_sim
def set_source_location(self, ra, dec, sampling_width = np.radians(1)):
r'''set the source location and select events in that dec band
args:
ra: Right accension in radians
dec: Declination in radians
sampling_width(optional):The width that would be included. Default is 1 degree.
'''
self.ra = ra
self.dec = dec
self.reduce_mc = self._select_and_weight(ra, dec, sampling_width)
def sample_from_spectrum(self,seed=None,poisson=True):
r''' Samples events from spectrum
args:
seed(optional): Random seed
poisson(optional): Whether enable poisson fluctuation in number of events drawns.Default is True.
returns
Events drawn from the Monte Carlo following the spectrum.
'''
if seed != None: np.random.seed(seed)
self.reduce_mc['weight']=self.spectrum(self.reduce_mc['trueE'])*self.reduce_mc['ow']*self.signal_time_profile.effective_exposure() * 3600 * 24
total = self.reduce_mc['weight'].sum()
if poisson:
n_signal_observed = scipy.stats.poisson.rvs(total)
else:
n_signal_observed = int(round(total)) #round to nearest integer if no poisson fluctuation
signal = np.random.choice(self.reduce_mc, n_signal_observed,
p = self.reduce_mc['weight']/total,
replace = False).copy() #Sample events
n_signal_observed = len(signal)
if n_signal_observed > 0:
#Rotate events to source location
ones = np.ones_like(signal['trueRa'])
signal['ra'], signal['dec'] = tools.rotate(signal['trueRa'],
signal['trueDec'],
ones*self.ra,
ones*self.dec,
signal['ra'],
signal['dec'])
signal['trueRa'], signal['trueDec'] = tools.rotate(signal['trueRa'],
signal['trueDec'],
ones*self.ra,
ones*self.dec,
signal['trueRa'],
signal['trueDec'])
signal['time'] = self.signal_time_profile.random(len(signal))
bgrange = self.background_time_profile.get_range()
contained_in_background = ((signal['time'] >= bgrange[0]) &\
(signal['time'] < bgrange[1]))
signal = signal[contained_in_background]
return signal
def sample_nevents(self,n_signal_observed,seed=None):
r''' Samples events from spectrum
args:
n_signal_observed: Sample size
seed(optional): Random seed
returns
n_signal_observed of Events drawn from the Monte Carlo following the spectrum shape.
'''
if seed != None: np.random.seed(seed)
self.reduce_mc['weight']=self.spectrum(self.reduce_mc['trueE'])*self.reduce_mc['ow']
total = self.reduce_mc['weight'].sum()
signal = np.random.choice(self.reduce_mc, n_signal_observed,
p = self.reduce_mc['weight']/total,
replace = False).copy()
n_signal_observed = len(signal)
if n_signal_observed > 0:
ones = np.ones_like(signal['trueRa'])
signal['ra'], signal['dec'] = tools.rotate(signal['trueRa'],
signal['trueDec'],
ones*self.ra,
ones*self.dec,
signal['ra'],
signal['dec'])
signal['trueRa'], signal['trueDec'] = tools.rotate(signal['trueRa'],
signal['trueDec'],
ones*self.ra,
ones*self.dec,
signal['trueRa'],
signal['trueDec'])
signal['time'] = self.signal_time_profile.random(len(signal))
bgrange = self.background_time_profile.get_range()
contained_in_background = ((signal['time'] >= bgrange[0]) &\
(signal['time'] < bgrange[1]))
signal = signal[contained_in_background]
return signal
| [
7061,
6,
49738,
1373,
3401,
9417,
7061,
6,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
8019,
13,
8344,
12543,
2733,
355,
374,
69,
198,
6738,
285,
... | 2.018641 | 5,311 |
import os, sys
import numpy as np
import cv2
import torch
import torch.nn as nn
from .ppyolov2_base import PPYOLOv2Base
from .utils import Decode
from .pt_utils import yolo_box, matrix_nms
ALL_CLASSES_PubLayNet = [
'Text', 'Title', 'List', 'Table', 'Figure',
] # PubLayNet
ALL_CLASSES_TableBank = ['Table'] # TableBank
| [
11748,
28686,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
764,
14097,
349,
709,
17,
62,
8692,
1330,
21082,
56,
3535,
46,
85,
17,
1... | 2.572519 | 131 |
import sys
from heuristica import *
from adapter import Adapter
from guloso import rotaMinima
if __name__=='__main__':
main(sys.argv) | [
11748,
25064,
198,
6738,
339,
27915,
64,
1330,
1635,
198,
6738,
21302,
1330,
43721,
198,
6738,
47161,
28213,
1330,
5724,
64,
9452,
8083,
198,
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
7,
17597,
1... | 3.044444 | 45 |
from ._fasttree2 import TreeBuilderFastTree2 | [
6738,
47540,
7217,
21048,
17,
1330,
12200,
32875,
22968,
27660,
17
] | 4 | 11 |
import requests
import pandas as pd
import numpy as np
import io
from datetime import date, timedelta
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import zipfile
gauth = GoogleAuth()
gauth.LocalWebserverAuth() # Creates local webserver and auto handles authentication.
drive = GoogleDrive(gauth)
# Download all CaBi Tracker files from our Google Drive
data_folder = '175Zhy6KRdgOwVhwqeZPANHCv6GvJJfvv'
query = "'{}' in parents and trashed=false".format(data_folder)
file_list = drive.ListFile({'q': query}).GetList()
for file_obj in file_list:
file_create = drive.CreateFile({'id': file_obj['id']})
file_content = file_create.GetContentFile(file_obj['title'])
print("{} has been downloaded".format(file_obj['title']))
# TODO: Unzip all files and load into dataframe
if ".zip" in file_obj['title']:
zf = zipfile.ZipFile(file_obj['title'])
# Assume that there is only one file per zip and has same name, load as dataframe
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
# Ensure that datatime fields are datetime, not strings
data_df = pd.read_csv(file_obj['title'], sep=',', quotechar='"', parse_dates=['Start', 'End'], date_parser=dateparse)
# Calculate duration as a float
data_df['duration_calc'] = ((data_df['End'] - data_df['Start']) / np.timedelta64(1, 'm')).astype(float)
print(data_df.head())
print(data_df.describe())
print(data_df.dtypes)
import sys
sys.exit() | [
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33245,
198,
6738,
4818,
8079,
1330,
3128,
11,
28805,
12514,
198,
6738,
279,
5173,
11590,
13,
18439,
1330,
3012,
30515,
198,
6738,
279,
51... | 2.623064 | 581 |
task = [[i + 1, 0] for i in range(12)]
for i in range(int(input())):
task[int(input()) - 1][1] += 1
for i, t in sorted(filter(lambda t: t[1] != 0, task), key = lambda x: x[1], reverse = True):
print(i, t)
| [
35943,
796,
16410,
72,
1343,
352,
11,
657,
60,
329,
1312,
287,
2837,
7,
1065,
15437,
198,
198,
1640,
1312,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
220,
220,
220,
4876,
58,
600,
7,
15414,
28955,
532,
352,
7131,
16,
60,
15853... | 2.336957 | 92 |
#!/usr/bin/env python3
#
# Copyright 2017 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Query charmstore for subordinate
# TODO: Query charmstore for openstack-origin vs source
# TODO: Ready yaml config file for VIPS and other HA config
# TODO: Feature: The RenderedBundle object could be easily generalized
# Creating an OSRenderedBundle(RenderedBundle) with openstack specific options
import logging
import os
import yaml
from os_charms_tools.charm import Charm
from os_charms_tools.tools_common import render_target_inheritance
from os_charms_tools.base_constants import (
BASE_CHARMS,
BASE_RELATIONS,
LOCATION_OVERRIDES,
)
__author__ = 'David Ames <david.ames@canonical.com>'
VALID_SOURCES = [
'stable',
'next',
'github',
]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
2177,
19507,
605,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 3.316062 | 386 |
import os, sys, random, warnings
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'utils'))
from sorting_visualise import vis
if __name__=='__main__':
n = 100
item=[None]*n
for i in range(n):
item[i] = random.random()*100
insertion_sort(item, begin=0, end=len(item)-1, visualise=True)
more = False
if more:
item2 = [(314, 214),
(2141, 4),
(1242, 124),
(421, 124),
(411, 4),
(4124, 414),
(24124, 4),]
print(item2)
insertion_sort(item2, begin=0, end=len(item2)-1, condition = lambda a, b: a[1] >= b[1])
print(item2)
| [
11748,
28686,
11,
25064,
11,
4738,
11,
14601,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
492,
3256,
705,
492,
3256,
705,
26791,
6,
4008,
198,
6738,... | 1.865435 | 379 |
"""HACS Sensor Test Suite."""
# pylint: disable=missing-docstring
import pytest
from custom_components.hacs.repositories import HacsIntegrationRepository
from custom_components.hacs.sensor import (
HACSSensor,
async_setup_entry,
async_setup_platform,
)
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| [
37811,
39,
2246,
50,
35367,
6208,
26264,
526,
15931,
198,
2,
279,
2645,
600,
25,
15560,
28,
45688,
12,
15390,
8841,
198,
11748,
12972,
9288,
198,
198,
6738,
2183,
62,
5589,
3906,
13,
71,
16436,
13,
260,
1930,
270,
1749,
1330,
367,
1... | 2.653543 | 127 |
'''
Given a non-empty array of digits representing a
non-negative integer, plus one to the integer.
The digits are stored such that the most significant digit is at the
head of the list, and each element in the array contain a single digit.
You may assume the integer does not contain any leading zero, except the number 0 itself.
'''
| [
7061,
6,
198,
15056,
257,
1729,
12,
28920,
7177,
286,
19561,
10200,
257,
220,
198,
13159,
12,
31591,
18253,
11,
5556,
530,
284,
262,
18253,
13,
198,
198,
464,
19561,
389,
8574,
884,
326,
262,
749,
2383,
16839,
318,
379,
262,
220,
19... | 4.059524 | 84 |
from typing import List
from sort.simple.helper import Helper
from sort.simple.sort import Sort
from util.generic_type import C
| [
6738,
19720,
1330,
7343,
201,
198,
201,
198,
6738,
3297,
13,
36439,
13,
2978,
525,
1330,
5053,
525,
201,
198,
6738,
3297,
13,
36439,
13,
30619,
1330,
33947,
201,
198,
6738,
7736,
13,
41357,
62,
4906,
1330,
327,
201,
198,
201,
198,
2... | 2.678571 | 56 |
import numpy as np
import matplotlib.pyplot as plt
import numba as nb
@nb.njit
def lumpvor2d(xcol, zcol, xvor, zvor, circvor=1):
"""
Compute the velocity at an arbitrary collocation point (xcol, zcol) due
to vortex element of circulation circvor, placed at (xvor, zvor).
:param xcol: x-coordinate of the collocation point
:param zcol: z-coordinate of the collocation point
:param xvor: x-coordinate of the vortex
:param zvor: z-coordinate of the vortex
:param circvor: circulation strength of the vortex (base units)
:return: 1D array containing the velocity vector (u, w) (x-comp., z-comp.)
:rtype: ndarray
"""
# transformation matrix for the x, z distance between two points
dcm = np.array([[0.0, 1.0],
[-1.0, 0.0]])
# magnitude of the distance between two points
r_vortex_sq = (xcol - xvor) ** 2 + (zcol - zvor) ** 2
if r_vortex_sq < 1e-9: # some arbitrary threshold
return np.array([0.0, 0.0])
# the distance in x, and z between two points
dist_vec = np.array([xcol - xvor, zcol - zvor])
norm_factor = circvor / (2.0 * np.pi * r_vortex_sq) # circulation at
# vortex element / circumferential distance
# induced velocity of vortex element on collocation point
vel_vor = norm_factor * dcm @ dist_vec
return vel_vor
def plot_circulatory_loads(alpha_circ, dalpha_circ, cl_circ, cl_ss, x_wake, y_wake, u_inf, c, time_arr, plot_wake=False):
"""
Plots the circulatory and non-circulatory CL as function of the angle of attack, alpha
:param alpha_circ: corresponding AoA for the circulatory CLs
:param dalpha_circ: diervative of alpha
:param cl_circ: Array of circulatory CLs
:param alpha_ss: corresponding AoA for the non-circulatory CLs
:param cl_ss: Array of non-circulatory (steady-state) CLs
"""
fig, ax = plt.subplots(1, 2, dpi=150, constrained_layout=True, sharey=True)
# compute quasi-steady CL
alpha_qs = (alpha_circ + c / (2 * u_inf) * dalpha_circ)
cl_qs = 2 * np.pi * alpha_qs
# only plot 1 period of unsteady CL and steady CL
idx = np.where(alpha_circ[1:] * alpha_circ[:-1] < 0)[0][2] + 1
idx = None
# unsteady CL
ax[0].plot(np.degrees(alpha_circ)[:idx], cl_circ[:idx], label=r"Unsteady $C_l$", linestyle='-.')
ax[1].plot(np.degrees(alpha_circ)[:idx], cl_circ[:idx], label=r"Unsteady $C_l$", linestyle='-.')
# steady CL
ax[0].plot(np.degrees(alpha_circ)[:idx], cl_ss[:idx], label=r"Steady $C_l$", c='r')
# quasi-steady CL
ax[1].plot(np.degrees(alpha_circ), cl_qs, label=r"Quasi-steady $C_l$", c='r', linestyle='--')
ax[0].grid()
ax[1].grid()
ax[0].legend(prop={"size": 14}, loc="lower right")
ax[1].legend(prop={"size": 14}, loc="lower right")
ax[0].set_ylabel(r"$C_l$ [-]", fontsize=14)
ax[0].set_xlabel(r"$\alpha$ $[\circ]$", fontsize=14)
ax[1].set_xlabel(r"$\alpha$ $[\circ]$", fontsize=14)
if plot_wake:
fig, ax = plt.subplots(1, 1, dpi=150, constrained_layout=True)
ax.scatter(x_wake, y_wake, label="Wake vortices")
ax.grid()
ax.set_xlabel("Horizontal distance [m]", fontsize=14)
ax.set_ylabel("Vertical distance [m]", fontsize=14)
ax.legend(prop={"size": 14}, loc=1)
# compute equivalent AoA
x_lag_old = 0
y_lag_old = 0
a1 = 0.165
a2 = 0.335
b1 = 0.045
b2 = 0.3
dt = 0.1
idx = np.where(alpha_circ[1:] * alpha_circ[:-1] < 0)[0][1] + 1
dalpha_qs = alpha_qs[1:] - alpha_qs[:-1]
alpha_e = []
for i, alpha_curr in enumerate(alpha_qs[1:]):
ds = 2 * dt * u_inf / c
x_lag = x_lag_old * np.exp(-b1 * ds) + dalpha_qs[i] * a1 * np.exp(-b1 * ds / 2)
y_lag = y_lag_old * np.exp(-b2 * ds) + dalpha_qs[i] * a2 * np.exp(-b2 * ds / 2)
alpha_e.append(alpha_curr - x_lag - y_lag)
x_lag_old = x_lag
y_lag_old = y_lag
# compute s
s = 2 * time_arr * u_inf / c
fig, ax = plt.subplots(1, 1, dpi=150, constrained_layout=True)
ax.plot(s[1:idx], np.degrees(alpha_e)[1:idx], label="Unsteady", linestyle='-.')
ax.plot(s[1:idx], np.degrees(alpha_qs)[1:idx], label="Quasi-steady", linestyle='--')
ax.plot(s[1:idx], np.degrees(alpha_circ)[1:idx], label="Steady", c='r')
ax.grid()
ax.legend(prop={"size": 14})
ax.set_xlabel("semi-chord s [-]", fontsize=14)
ax.set_ylabel(r"$\alpha$ [$\circ$]", fontsize=14)
@nb.njit()
@nb.njit() | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
997,
7012,
355,
299,
65,
628,
198,
31,
46803,
13,
77,
45051,
198,
4299,
23844,
20867,
17,
67,
7,
87,
4033,
11,
1976,
4033,
11,
... | 2.168326 | 2,109 |
from RestrictionTypeDetector import RestrictionTypeDetector
from RestrictionTypeDetector import TYPE_INT
from RestrictionTypeDetector import TYPE_FLOAT
from RestrictionTypeDetector import MEASURE_OCCURRENCE
class DisjointClassDetector(RestrictionTypeDetector):
"""
This class serves as interface for all Restriction Type Statistics of disjoint class expressions.
It defines the statistical metrics amount, average, median, min and max.
Subclasses of this class, should implement the compute method in which they should perform
their computation and call the set* methods of this class here.
""" | [
198,
6738,
37163,
295,
6030,
11242,
9250,
1330,
37163,
295,
6030,
11242,
9250,
198,
6738,
37163,
295,
6030,
11242,
9250,
1330,
41876,
62,
12394,
198,
6738,
37163,
295,
6030,
11242,
9250,
1330,
41876,
62,
3697,
46,
1404,
198,
6738,
37163,
... | 3.955414 | 157 |
"""
These are all of the signatures related to decoding
"""
from signatures.abstracts import Signature
| [
37811,
201,
198,
4711,
389,
477,
286,
262,
17239,
3519,
284,
39938,
201,
198,
37811,
201,
198,
6738,
17239,
13,
397,
8709,
82,
1330,
34894,
201,
198,
201,
198
] | 3.758621 | 29 |
# stdlib imports
import sys
import time
# FAPWS imports
import config
import errors
access_logfid = sys.stdout
error_logfid = sys.stdout
def setup():
'''Update some global variables based on command-line configuration.
'''
try:
if config.conf['error_log']:
error_logfid = open(config.conf['error_log'], 'a', 1)
else:
error_logfid = sys.stdout
except Exception, e:
errors.log_setup("can't log errors to file '%s': %s" %
(config.conf['error_log'], e))
try:
if config.conf['access_log']:
access_logfid = open(config.conf['access_log'], 'a', 1)
else:
access_logfid = sys.stdout
except Exception, e:
errors.log_setup("can't log access to file '%s': %s" %
(config.cong['access_log'], e))
| [
2,
14367,
8019,
17944,
201,
198,
11748,
25064,
201,
198,
11748,
640,
201,
198,
201,
198,
2,
376,
2969,
19416,
17944,
201,
198,
11748,
4566,
201,
198,
11748,
8563,
201,
198,
201,
198,
15526,
62,
6404,
69,
312,
796,
25064,
13,
19282,
... | 2.110565 | 407 |
"""
File: my_drawing.py
Name: Ethan Huang
----------------------
TODO:
This program is a drawing I create for the drawing competition of StanCode101
It builds an interface from the background to the front layer,
mimicking the "Kahoot" game to ask users to choose the real Karel.
"""
from campy.graphics.gobjects import GOval, GRect, GLabel, GPolygon, GLine
from campy.graphics.gwindow import GWindow
window = GWindow(800, 550, title='Which One IS Karel?')
def main():
"""
TODO:
This program builds an interface from the background to the front layer,
from background to blocks, karels and texts.
"""
build_background()
build_blocks()
build_karels()
build_labels()
def build_background():
"""
This function builds the background of the drawing with three parts.
"""
layer_1 = GRect(800, 550)
layer_1.filled = True
layer_1.color = 'silver'
layer_1.fill_color = 'silver'
window.add(layer_1)
layer_2 = GRect(800, 90)
layer_2.filled = True
layer_2.color = 'whitesmoke'
layer_2.fill_color = 'whitesmoke'
window.add(layer_2)
layer_3 = GRect(800, 40, x=0, y=510)
layer_3.filled = True
layer_3.color = 'whitesmoke'
layer_3.fill_color = 'whitesmoke'
window.add(layer_3)
def build_blocks():
"""
This function builds the blocks of the drawing
"""
block_1 = GRect(375, 80, x=20, y=330)
block_1.filled = True
block_1.color = 'firebrick'
block_1.fill_color = 'firebrick'
window.add(block_1)
block_2 = GRect(375, 80, x=405, y=330)
block_2.filled = True
block_2.color = 'steelblue'
block_2.fill_color = 'steelblue'
window.add(block_2)
block_3 = GRect(375, 80, x=20, y=420)
block_3.filled = True
block_3.color = 'goldenrod'
block_3.fill_color = 'goldenrod'
window.add(block_3)
block_4 = GRect(375, 80, x=405, y=420)
block_4.filled = True
block_4.color = 'forestgreen'
block_4.fill_color = 'forestgreen'
window.add(block_4)
block_5 = GRect(60, 40, x=720, y=120)
block_5.filled = True
block_5.color = 'dodgerblue'
block_5.fill_color = 'dodgerblue'
window.add(block_5)
circle_1 = GOval(90, 90, x=20, y=170)
circle_1.filled = True
circle_1.color = 'blueviolet'
circle_1.fill_color = 'blueviolet'
window.add(circle_1)
def build_karels():
"""
This function builds four Karels
"""
build_karel1()
build_karel2()
build_karel3()
build_karel4()
def build_karel1():
"""
This function builds the first karel
"""
head = GOval(80, 55, x=190, y=167)
head.filled = True
head.color = 'black'
head.fill_color = 'gray'
window.add(head)
r_eye = GRect(13, 13, x=212, y=189)
r_eye.filled = True
r_eye.color = 'black'
r_eye.fill_color = 'blue'
window.add(r_eye)
l_eye = GRect(13, 13, x=235, y=189)
l_eye.filled = True
l_eye.color = 'black'
l_eye.fill_color = 'blue'
window.add(l_eye)
r_eyeb = GLine(212, 185, 225, 185)
window.add(r_eyeb)
l_eyeb = GLine(235, 185, 248, 185)
window.add(l_eyeb)
hands = GRect(105, 45, x=177, y=237)
hands.filled = True
hands.color = 'black'
hands.fill_color = 'lime'
window.add(hands)
body_1 = GRect(60, 65, x=201, y=223)
body_1.filled = True
body_1.color = 'black'
body_1.fill_color = 'blue'
window.add(body_1)
body_2 = GRect(80, 60, x=190, y=230)
body_2.filled = True
body_2.color = 'black'
body_2.fill_color = 'blue'
window.add(body_2)
r_foot = GOval(29, 24, x=190, y=290)
r_foot.filled = True
r_foot.color = 'black'
r_foot.fill_color = 'red'
window.add(r_foot)
l_foot = GOval(29, 24, x=241, y=290)
l_foot.filled = True
l_foot.color = 'black'
l_foot.fill_color = 'red'
window.add(l_foot)
label = GPolygon()
label.add_vertex((230, 130))
label.add_vertex((218, 150))
label.add_vertex((242, 150))
label.filled = True
label.fill_color = 'firebrick'
label.color = 'firebrick'
window.add(label)
def build_karel2():
"""
This function builds the second karel
"""
add = 1
head = GOval(80, 55, x=190+120*add, y=167)
head.filled = True
head.color = 'black'
head.fill_color = 'gray'
window.add(head)
r_eye = GRect(13, 13, x=212+120*add, y=189)
r_eye.filled = True
r_eye.color = 'black'
r_eye.fill_color = 'blue'
window.add(r_eye)
l_eye = GRect(13, 13, x=235+120*add, y=189)
l_eye.filled = True
l_eye.color = 'black'
l_eye.fill_color = 'blue'
window.add(l_eye)
mouth = GPolygon()
mouth.filled = True
mouth.fill_color = 'red'
mouth.color = 'black'
mouth.add_vertex((350, 205))
mouth.add_vertex((353, 211))
mouth.add_vertex((347, 211))
window.add(mouth)
hands = GRect(105, 45, x=177+120*add, y=237)
hands.filled = True
hands.color = 'black'
hands.fill_color = 'lime'
window.add(hands)
body_1 = GRect(60, 65, x=201+120*add, y=223)
body_1.filled = True
body_1.color = 'black'
body_1.fill_color = 'blue'
window.add(body_1)
body_2 = GRect(80, 60, x=190+120*add, y=230)
body_2.filled = True
body_2.color = 'black'
body_2.fill_color = 'blue'
window.add(body_2)
r_foot = GOval(29, 24, x=190+120*add, y=290)
r_foot.filled = True
r_foot.color = 'black'
r_foot.fill_color = 'red'
window.add(r_foot)
l_foot = GOval(29, 24, x=241+120*add, y=290)
l_foot.filled = True
l_foot.color = 'black'
l_foot.fill_color = 'red'
window.add(l_foot)
label1 = GPolygon()
label1.add_vertex((230+120*add, 128))
label1.add_vertex((218+120*add, 140))
label1.add_vertex((242+120*add, 140))
label1.filled = True
label1.fill_color = 'steelblue'
label1.color = 'steelblue'
window.add(label1)
label2 = GPolygon()
label2.add_vertex((230 + 120 * add, 152))
label2.add_vertex((218 + 120 * add, 140))
label2.add_vertex((242 + 120 * add, 140))
label2.filled = True
label2.fill_color = 'steelblue'
label2.color = 'steelblue'
window.add(label2)
def build_karel3():
"""
This function builds the third karel
"""
add = 2
head = GOval(80, 55, x=190 + 120 * add, y=167)
head.filled = True
head.color = 'black'
head.fill_color = 'gray'
window.add(head)
r_eye = GRect(13, 13, x=212 + 120 * add, y=189)
r_eye.filled = True
r_eye.color = 'black'
r_eye.fill_color = 'blue'
window.add(r_eye)
l_eye = GRect(13, 13, x=235 + 120 * add, y=189)
l_eye.filled = True
l_eye.color = 'black'
l_eye.fill_color = 'blue'
window.add(l_eye)
hands = GRect(105, 45, x=177 + 120 * add, y=237)
hands.filled = True
hands.color = 'black'
hands.fill_color = 'lime'
window.add(hands)
body_1 = GRect(60, 65, x=201 + 120 * add, y=223)
body_1.filled = True
body_1.color = 'black'
body_1.fill_color = 'blue'
window.add(body_1)
body_2 = GRect(80, 60, x=190 + 120 * add, y=230)
body_2.filled = True
body_2.color = 'black'
body_2.fill_color = 'blue'
window.add(body_2)
r_foot = GOval(29, 24, x=190 + 120 * add, y=290)
r_foot.filled = True
r_foot.color = 'black'
r_foot.fill_color = 'red'
window.add(r_foot)
l_foot = GOval(29, 24, x=241 + 120 * add, y=290)
l_foot.filled = True
l_foot.color = 'black'
l_foot.fill_color = 'red'
window.add(l_foot)
label = GOval(22, 22, x=218+120*add, y=130)
label.filled = True
label.fill_color = 'goldenrod'
label.color = 'goldenrod'
window.add(label)
def build_karel4():
"""
This function builds the fourth karel
"""
add = 3
head = GOval(80, 55, x=190 + 120 * add, y=167)
head.filled = True
head.color = 'black'
head.fill_color = 'gray'
window.add(head)
hair1 = GLine(590, 167, 590, 161)
hair2 = GLine(588, 168, 585, 162)
hair3 = GLine(592, 168, 595, 162)
hair4 = GLine(585, 168, 582, 162)
hair5 = GLine(595, 168, 598, 162)
window.add(hair1)
window.add(hair2)
window.add(hair3)
window.add(hair4)
window.add(hair5)
r_eye = GOval(14, 14, x=212 + 120 * add, y=189)
r_eye.filled = True
r_eye.color = 'black'
r_eye.fill_color = 'blue'
window.add(r_eye)
l_eye = GOval(14, 14, x=235 + 120 * add, y=189)
l_eye.filled = True
l_eye.color = 'black'
l_eye.fill_color = 'blue'
window.add(l_eye)
hands = GRect(105, 45, x=177 + 120 * add, y=237)
hands.filled = True
hands.color = 'black'
hands.fill_color = 'lime'
window.add(hands)
body_1 = GRect(60, 65, x=201 + 120 * add, y=223)
body_1.filled = True
body_1.color = 'black'
body_1.fill_color ='blue'
window.add(body_1)
body_2 = GRect(80, 60, x=190 + 120 * add, y=230)
body_2.filled = True
body_2.color = 'black'
body_2.fill_color = 'blue'
window.add(body_2)
r_foot = GOval(29, 24, x=190 + 120 * add, y=290)
r_foot.filled = True
r_foot.color = 'black'
r_foot.fill_color = 'red'
window.add(r_foot)
l_foot = GOval(29, 24, x=241 + 120 * add, y=290)
l_foot.filled = True
l_foot.color = 'black'
l_foot.fill_color = 'red'
window.add(l_foot)
label = GRect(20, 20, x=218+120*add, y=130)
label.filled = True
label.fill_color = 'forestgreen'
label.color = 'forestgreen'
window.add(label)
def build_labels():
"""
This function creates the texts on the canvas
"""
l_title = GLabel('Which one is Karel?')
l_title.font = 'Courier-25'
l_title.color = 'black'
window.add(l_title, x=260, y=60)
l_num = GLabel('19')
l_num.font = 'Courier-50'
l_num.color = 'whitesmoke'
window.add(l_num, x=37, y=242)
l_skip = GLabel('skip')
l_skip.font = 'Courier-20'
l_skip.color = 'whitesmoke'
window.add(l_skip, x=726, y=152)
l_ans1 = GLabel('Answers')
l_ans1.font = 'Courier-20-italic'
l_ans1.color = 'black'
window.add(l_ans1, x=698, y=270)
l_ans2 = GLabel('0')
l_ans2.font = 'Courier-50-italic'
l_ans2.color = 'black'
window.add(l_ans2, x=722, y=252)
l_game_pin = GLabel('Game PIN: SC101')
l_game_pin.font = 'Courier-20'
l_game_pin.color = 'black'
window.add(l_game_pin, x=20, y=540)
l_1 = GPolygon()
l_1.add_vertex((210, 360))
l_1.add_vertex((197, 380))
l_1.add_vertex((221, 380))
l_1.filled = True
l_1.color = 'whitesmoke'
l_1.fill_color= 'whitesmoke'
window.add(l_1)
l_2_1 = GPolygon()
l_2_1.add_vertex((210+380, 359))
l_2_1.add_vertex((198+380, 370))
l_2_1.add_vertex((221+380, 370))
l_2_1.filled = True
l_2_1.fill_color = 'whitesmoke'
l_2_1.color = 'whitesmoke'
window.add(l_2_1)
l_2_2 = GPolygon()
l_2_2.add_vertex((210+380, 381))
l_2_2.add_vertex((198+380, 370))
l_2_2.add_vertex((221+380, 370))
l_2_2.filled = True
l_2_2.fill_color = 'whitesmoke'
l_2_2.color = 'whitesmoke'
window.add(l_2_2)
l_3 = GOval(23, 23, x=198, y=450)
l_3.filled = True
l_3.fill_color = 'whitesmoke'
l_3.color = 'whitesmoke'
window.add(l_3)
l_4 = GRect(20, 20, x=583, y=450)
l_4.filled = True
l_4.fill_color = 'whitesmoke'
l_4.color = 'whitesmoke'
window.add(l_4)
if __name__ == '__main__':
main()
| [
37811,
198,
8979,
25,
616,
62,
19334,
278,
13,
9078,
198,
5376,
25,
28926,
31663,
198,
19351,
438,
198,
51,
3727,
46,
25,
198,
1212,
1430,
318,
257,
8263,
314,
2251,
329,
262,
8263,
5449,
286,
7299,
10669,
8784,
198,
1026,
12188,
28... | 2.108268 | 5,394 |
import sys
import numpy as np
sys.path.append('..')
from Game import Game
from .DotsAndBoxesLogic import Board
| [
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
17597,
13,
6978,
13,
33295,
10786,
492,
11537,
198,
6738,
3776,
1330,
3776,
198,
6738,
764,
35,
1747,
1870,
14253,
274,
11187,
291,
1330,
5926,
198
] | 3.111111 | 36 |
###############################################################################
# Name: Siebren Kazemier
# Student number: 12516597
# School: Uva
# Project: Assignment week 4, Converting CSV file to JSON
###############################################################################
import pandas as pd
# csv_file = read_csv("voorraad_woningen.csv", sep=";")
if __name__ == "__main__":
main("voorraad_woningen.csv", "voorraad_woningen.json")
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
6530,
25,
48931,
65,
918,
16385,
368,
959,
198,
2,
13613,
1271,
25,
13151,
1433,
43239,
198,
2,
3961,
25,
471,
6862,
198,
2,
4935,
25,
50144,
1285,
604,
11,
35602,
889,
44189,
2393,
284,
1944... | 3.554688 | 128 |
from Posicao import Posicao
from AEstrela import AEstrela
from QuebraCabeca import QuebraCabeca
from QuebraCabecaImp import QuebraCabecaImp
import math
import queue
import heapq
from random import gammavariate, shuffle | [
6738,
18574,
3970,
78,
1330,
18574,
3970,
78,
198,
6738,
317,
22362,
2411,
64,
1330,
317,
22362,
2411,
64,
198,
6738,
4670,
16057,
34,
397,
31047,
1330,
4670,
16057,
34,
397,
31047,
198,
6738,
4670,
16057,
34,
397,
31047,
26950,
1330,
... | 3.205882 | 68 |
#Challenge 11
#The program asks the user to input two numbers.
#It will then outputthe larger of these two numbers.
num1 = int(input("please enter a number: "))
num2 = int(input("please enter a second number: "))
if num1 > num2:
print("Num1 is bigger")
elif num2 > num1:
print("Num2 is bigger")
else:
print("there the same")
| [
2,
41812,
3540,
1367,
201,
198,
2,
464,
1430,
7893,
262,
2836,
284,
5128,
734,
3146,
13,
201,
198,
2,
1026,
481,
788,
5072,
1169,
4025,
286,
777,
734,
3146,
13,
201,
198,
201,
198,
22510,
16,
796,
493,
7,
15414,
7203,
29688,
3802,... | 2.811475 | 122 |
from relogic.textkit.semparse.sql.crawled_sql.sql_helper import get_query_tables, get_query_columns, get_query_tokens, generalize_sql
from relogic.textkit.semparse.sql.crawled_sql.verify_sequence import verify
import sqlparse
import copy
from moz_sql_parser import format, parse
import re
import json
if __name__ == "__main__":
sqls = []
sql = "SELECT feature_id AS glycine_trna_primary_transcript_id, feature.* FROM feature INNER JOIN cvterm ON (feature.type_id = cvterm.cvterm_id) WHERE cvterm.name = 'glycine_tRNA_primary_transcript'"
sqls.append(sql)
sql = "select * from students except all (select StudentID, StudentName, GroupID from students natural join marks natural join courses where (courses.coursename = 'Bazy dannykh'))"
sqls.append(sql)
sql = "SELECT feature_id AS glycine_trna_primary_transcript_id, feature.* FROM feature INNER JOIN cvterm ON (feature.type_id = cvterm.cvterm_id) WHERE cvterm.name = 'glycine_tRNA_primary_transcript'"
sqls.append(sql)
sql = "SELECT film.film_id ,film.title ,inventory.inventory_id FROM film LEFT OUTER JOIN inventory ON film.film_id = inventory.film_id"
sqls.append(sql)
sql = "SELECT first_name, replace(phone_number, '.', '-') from employees"
sqls.append(sql)
sql = "select v_EntGid, v_ModelGid, v_TaskGid_T4, '**CreateGid**', '**CreateCode**', '@@', 1 from dual"
sqls.append(sql)
sql = "select deleteXML(V_RESOURCE_XML,'/r Resource/r Parents',XDB_NAMESPACES.RESOURCE_PREFIX_R) into V_RESOURCE_XML from dual"
sqls.append(sql)
sql = "select * from table(DBMS_XPLAN.DISPLAY_CURSOR('4suk9kmn1wjh5', null, 'SERIAL'))"
sqls.append(sql)
sql = "SELECT B.YWLSH INTO V_YWLSH FROM UTB_YH_FUNDTRADE_DETAIL B WHERE B.PLLSH = V_D.PLLSH AND ROWNUM = 1"
sqls.append(sql)
sql = "SELECT _tenantId,id,'ALun ','50','50'/*Pai Xu */, '1', SYSDATE(), 1, '0' FROM dictionary WHERE value ='dic_project_round' AND foo IN ('880987','882618','708228','522330')"
sqls.append(sql)
sql = "select firstname, lastname, Description, salary from job join employee on employee.jobid = job.id where description = @JobDescription"
sqls.append(sql)
sql = "select sum(gets) \"Gets\", avg(getmisses) \"Get Misses\", (1-(sum(getmisses)/sum(gets))) * 100 \"Hit Ratio\" from v$rowcache"
sqls.append(sql)
sql = "SELECT * FROM json_test WHERE JSON_LENGTH( JSON_KEYS( col_jsonvalue ) ) = 4 LIMIT 0 /* QNO 417 CON_ID 14 */"
sqls.append(sql)
sql = "SELECT DATE( ( SUBDATE( col_varchar_64_key , col_varchar_1_key ) ) ) AS field1, -6184005238333112320 / 'owpqdtjcxesnizzfscpdejljmtjjobtqvwgjsqfuhsxzqyeimorouyryszsaheqttgayltcuslluunjvtfaz' AS field2 FROM table1000_int_autoinc WHERE ADDDATE( col_time , '2026-11-16 14 43 00.008148' ) ORDER BY field1, field2 LIMIT 3 /* QNO 915 CON_ID 164 */"
sqls.append(sql)
sql = "select count(p1.line#) as lines_present from plsql_profiler_lines_cross_run p1 where (p1.unit_type in ( 'PACKAGE BODY', 'TYPE BODY', 'PROCEDURE', 'FUNCTION' ) )"
sqls.append(sql)
# We will ignore all sql that contains #.
sql = "SELECT @AUDIT_LOG_TRANSACTION_ID, convert(nvarchar(1500), IsNull('Cust_ID='+CONVERT(nvarchar(4000), NEW.Cust_ID, 0), 'Cust_ID Is Null')), 'City', CONVERT(nvarchar(4000), NEW.City, 0), 'A' , CONVERT(nvarchar(500), CONVERT(nvarchar(4000), NEW.Cust_ID, 0)) FROM inserted NEW WHERE NEW.City Is Not Null"
sqls.append(sql)
sql = "SELECT id,(SELECT app_roles.id FROM toasthub_client1.app_roles WHERE role_name='user' AND domain='toasthub-social') as roleid from toasthub_client1.app_users where username='freddy.jones@gmail.com'"
sqls.append(sql)
sql = "SELECT l.AD_Language,t.AD_Column_ID, t.Name, 'N',t.AD_Client_ID,t.AD_Org_ID,t.Created,t.Createdby,t.Updated,t.UpdatedBy FROM AD_Language l, AD_Column t WHERE l.IsActive='Y' AND l.IsSystemLanguage='Y' AND l.IsBaseLanguage='N' AND t.AD_Column_ID=1120193 AND NOT EXISTS (SELECT * FROM AD_Column_Trl tt WHERE tt.AD_Language=l.AD_Language AND tt.AD_Column_ID=t.AD_Column_ID)"
sqls.append(sql)
sql = "SELECT patients_tested.gender AS \"Gender\", patients_tested.patients_count AS \"TB Patients Tested for HIV\"\nFROM\n(SELECT person_gender.gender, COUNT(DISTINCT person.person_id) AS patients_count\nFROM visit\nINNER JOIN person ON visit.patient_id = person.person_id\nAND DATE(visit.date_started) BETWEEN @start_date AND @end_date\nINNER JOIN encounter ON visit.visit_id = encounter.visit_id\nINNER JOIN coded_obs_view ON coded_obs_view.person_id = person.person_id\nAND coded_obs_view.concept_full_name = 'Coded Diagnosis'\nAND coded_obs_view.value_concept_full_name IN ('Tuberculosis','Multi Drug Resistant Tuberculosis', 'Extremely Drug Resistant Tuberculosis')\nAND coded_obs_view.obs_datetime BETWEEN @start_date AND @end_date\nINNER JOIN coded_obs_view AS certainty_obs ON coded_obs_view.obs_group_id = certainty_obs.obs_group_id\nAND certainty_obs.concept_full_name = 'Diagnosis Certainty'\nAND certainty_obs.value_concept_full_name = 'Confirmed'\nINNER JOIN orders ON orders.patient_id = person.person_id\nAND orders.order_type_id = 3\nAND orders.order_action IN ('NEW', 'REVISED')\nAND orders.date_created BETWEEN @start_date AND @end_date\nINNER JOIN concept_view ON orders.concept_id = concept_view.concept_id\nAND concept_view.concept_full_name IN ('HIV (Blood)', 'HIV (Serum)')\nRIGHT OUTER JOIN (SELECT DISTINCT gender FROM person WHERE gender != '' ) AS person_gender ON person_gender.gender = person.gender\nGROUP BY person_gender.gender) AS patients_tested"
sqls.append(sql)
sql = "SELECT\nd.id id,\nCONCAT(se.name, '_', sr.name, '_', g.name, '_', r.name) name,\nd.description description,\nse.name satellite,\nsr.name sensor,\ng.name geometric_processing,\nr.name radiometric_processing\nFROM\n_dataset d,\nsatellite se,\nsensor sr,\ngeometric_processing g,\nradiometric_processing r\nWHERE\nd.satellite_id = se.id\nAND d.sensor_id = sr.id\nAND d.geometric_processing_id = g.id\nAND d.radiometric_processing_id = r.id\nORDER BY d.id"
sqls.append(sql)
sql = "SELECT title, AVG(stars) AS average\nFROM Movie\nINNER JOIN Rating USING(mId)\nGROUP BY mId\nHAVING average = (\nSELECT MAX(average_stars)\nFROM (\nSELECT title, AVG(stars) AS average_stars\nFROM Movie\nINNER JOIN Rating USING(mId)\nGROUP BY mId\n)\n)"
sqls.append(sql)
sql = 'SELECT (abs(case when 11 not between t1.d and case coalesce((select case c+case when b in (case when a<=t1.b then (t1.a) when f not in (d,t1.f, -(a)) then (t1.a) else c end,t1.a,b) then 11 when (t1.c not between 17 and -d) then t1.d else e end+e*t1.a when 19 then b else t1.e end from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where -(19)=d))),t1.d) when c then a else -17 end or a>=19 then 17 else b end+ -13)/abs(b)) FROM t1 WHERE not exists(select 1 from t1 where not e+case 13 when 11 then +t1.d else t1.f+11 end*a-19+t1.c+a in (~e | coalesce((select ~a*+(abs( -case case b*e when f then e else -13 end when t1.d then t1.b else t1.b end)/abs(17))+t1.b from t1 where t1.a=e),d),e,e))'
sqls.append(sql)
for sql in sqls:
sql = " ".join(sql.split())
output = process({"sql": sql})
if output is not None:
print(json.dumps(output, indent=2))
else:
print(sql)
print("----")
| [
6738,
823,
519,
291,
13,
5239,
15813,
13,
43616,
29572,
13,
25410,
13,
66,
49263,
62,
25410,
13,
25410,
62,
2978,
525,
1330,
651,
62,
22766,
62,
83,
2977,
11,
651,
62,
22766,
62,
28665,
82,
11,
651,
62,
22766,
62,
83,
482,
641,
... | 2.62715 | 2,733 |
import os
# from extune.settings import EXPERIMENTS_DIR, MODULE_DIR
from extune.sacred.utils import config_logger
# from extune.model import model_fn, train_fn, input_fn
from extune.sacred.ingredients import data_ingredient
from network.train_network import train_network
# from network.config import Config
from tensorflow.python.keras.callbacks import Callback
from sacred import Experiment
# from run_hyperband import cfg
# class trainingConfig(Config):
# # GPUs and IMAGES_PER_GPU needs to be configured here!
# GPUs = ''
# IMAGES_PER_GPU = 1
#
#
# cfg = trainingConfig()
# define our sacred experiment (ex) and add our data_ingredient
ex = Experiment('kidney_microscopy', interactive=True, ingredients=[data_ingredient])
# provide the configurable parameters from the JSON config file.
# ex.add_config(os.path.join(MODULE_DIR, 'model', 'config.json'))
@ex.config
@ex.capture
def metrics(_run, _config, logs):
'''
Arguments:
_run (sacred _run): the current run
_config (sacred _config): the current configuration
logs (keras.logs): the logs from a keras model
Returns: None
'''
_run.log_scalar('loss', float(logs.get('loss')))
_run.log_scalar('val_loss', float(logs.get('val_loss')))
_run.log_scalar(_config['METRIC'][_config['MONITORED_METRIC']],
float(logs.get(_config['METRIC'][_config['MONITORED_METRIC']])))
_run.log_scalar('val_'+_config['METRIC'][_config['MONITORED_METRIC']],
float(logs.get('val_'+_config['METRIC'][_config['MONITORED_METRIC']])))
class LogMetrics(Callback):
'''
A wrapper over the capture method `metrics` to have keras's logs be
integrated into sacred's log.
'''
@ex.automain
def main(_log, _run, _config, reporter, cfg):
'''
Notes:
variables starting with _ are automatically passed via sacred due to
the wrapper.
I prefer to return, at most, a single value. The returned value will be
stored in the Observer (file or mongo) and if large weight matricies or
the model itself, will be very inefficient for storage. Those files
should be added via 'add_artifact' method.
Arguments:
_log (sacred _log): the current logger
_run (sacred _run): the current run
_config (sacred _config): the current configuration file
reporter (keras callback function): the callback function to report to ray tune
Returns:
result (float): accuracy if classification, otherwise mean_squared_error
'''
cfg.MODEL_PATH = _config['MODEL_PATH']
cfg.SEGMENTATION_TASK = _config['SEGMENTATION_TASK']
cfg.NUM_OUTPUT_CH = _config['NUM_OUTPUT_CH']
cfg.UNET_FILTERS = _config['UNET_FILTERS']
cfg.UNET_LAYERS = _config['UNET_LAYERS']
cfg.DROPOUT_ENC_DEC = _config['DROPOUT_ENC_DEC']
cfg.DROPOUT_BOTTOM = _config['DROPOUT_BOTTOM']
cfg.UNET_SKIP = _config['UNET_SKIP']
cfg.DROPOUT_SKIP = _config['DROPOUT_SKIP']
cfg.BATCH_NORM = _config['BATCH_NORM']
cfg.LOSS = _config['LOSS']
cfg.METRIC = _config['METRIC']
cfg.MONITORED_METRIC = _config['MONITORED_METRIC']
cfg.LEARNING_RATE = _config['LEARNING_RATE']
cfg.WEIGHTING = _config['WEIGHTING']
cfg.OVERSAMPLING = _config['OVERSAMPLING']
cfg.OPTIMIZER = _config['OPTIMIZER']
cfg.LR_DECAY = _config['LR_DECAY']
cfg.TRAINING_DATA_PERCENTAGE = _config['TRAINING_DATA_PERCENTAGE']
cfg.IMG_GENERATOR_SEED = _config['IMG_GENERATOR_SEED']
cfg.SAVE_WEIGHTS = _config['SAVE_WEIGHTS']
cfg.EPOCHS = _config['EPOCHS']
cfg.PRETRAINED = _config['PRETRAINED']
cfg.PRETRAINED_PATH = _config['PRETRAINED_PATH']
# the subdirectory for this particular experiment
run_dir = os.path.join(cfg.MODEL_PATH, str(_run._id))
# inform the logger to dump to run_dir
config_logger(run_dir)
hist = train_network(cfg, _config['data_path'], LogMetrics(), reporter)
result = hist.history[_config['METRIC'][_config['MONITORED_METRIC']]][-1]
return result
| [
11748,
28686,
198,
198,
2,
422,
1070,
1726,
13,
33692,
1330,
7788,
18973,
3955,
15365,
62,
34720,
11,
33893,
62,
34720,
198,
6738,
1070,
1726,
13,
30584,
445,
13,
26791,
1330,
4566,
62,
6404,
1362,
198,
198,
2,
422,
1070,
1726,
13,
... | 2.508653 | 1,618 |
#!/usr/bin/env python
# coding:utf-8
"""
Name : check_db_connection.py
Author : Dmitry Kruchinin
Date : 7/1/2021
Desc:
"""
from fixture.orm import ORMFixture
from model.group import Group
db = ORMFixture(host="localhost", database="addressbook", user="root", password="")
try:
groups = db.get_groups_list()
contacts = db.get_contacts_list()
contacts_in_group = db.get_contacts_in_group(Group(id="248"))
contacts_not_in_group = db.get_contacts_not_in_group(Group(id="248"))
print("####### Groups")
for group in groups:
print(group)
print(len(groups))
print("####### Contacts")
for contact in contacts:
print(contact)
print(len(contacts))
print("####### Contacts in group")
for contact_in_group in contacts_in_group:
print(contact_in_group)
print(len(contacts_in_group))
print("####### Contacts NOT in group")
for contact_not_in_group in contacts_not_in_group:
print(contact_not_in_group)
print(len(contacts_not_in_group))
finally:
pass # db.destroy()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
40477,
12,
23,
198,
37811,
198,
5376,
220,
220,
220,
1058,
2198,
62,
9945,
62,
38659,
13,
9078,
198,
13838,
220,
1058,
45181,
33909,
24658,
259,
198,
10430,
220,
220,
2... | 2.530952 | 420 |
# coding: utf-8
import io
import magic
from typing import List
from deprecated import deprecated
import requests
import urllib3
import datetime
import dateutil.parser
import json
import logging
from pycti.api.opencti_api_connector import OpenCTIApiConnector
from pycti.api.opencti_api_job import OpenCTIApiJob
from pycti.utils.constants import ObservableTypes
from pycti.utils.opencti_stix2 import OpenCTIStix2
from pycti.entities.opencti_tag import Tag
from pycti.entities.opencti_marking_definition import MarkingDefinition
from pycti.entities.opencti_external_reference import ExternalReference
from pycti.entities.opencti_kill_chain_phase import KillChainPhase
from pycti.entities.opencti_stix_entity import StixEntity
from pycti.entities.opencti_stix_domain_entity import StixDomainEntity
from pycti.entities.opencti_stix_observable import StixObservable
from pycti.entities.opencti_stix_relation import StixRelation
from pycti.entities.opencti_stix_observable_relation import StixObservableRelation
from pycti.entities.opencti_identity import Identity
from pycti.entities.opencti_threat_actor import ThreatActor
from pycti.entities.opencti_intrusion_set import IntrusionSet
from pycti.entities.opencti_campaign import Campaign
from pycti.entities.opencti_incident import Incident
from pycti.entities.opencti_malware import Malware
from pycti.entities.opencti_tool import Tool
from pycti.entities.opencti_vulnerability import Vulnerability
from pycti.entities.opencti_attack_pattern import AttackPattern
from pycti.entities.opencti_course_of_action import CourseOfAction
from pycti.entities.opencti_report import Report
from pycti.entities.opencti_indicator import Indicator
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class OpenCTIApiClient:
"""
Python API for OpenCTI
:param url: OpenCTI URL
:param token: The API key
"""
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
# TODO Move to StixObservable
@deprecated(version="2.1.0", reason="Replaced by the StixRelation class in pycti")
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the StixDomainEntity class in pycti"
)
@deprecated(version="2.1.0", reason="Replaced by the StixRelation class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixRelation class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixRelation class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixRelation class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixRelation class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixRelation class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixRelation class in pycti")
@deprecated(
version="2.1.0", reason="Replaced by the MarkingDefinition class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the MarkingDefinition class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the MarkingDefinition class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the MarkingDefinition class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the MarkingDefinition class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the ExternalReference class in pycti"
)
# TODO Move to ExternalReference
@deprecated(
version="2.1.0", reason="Replaced by the ExternalReference class in pycti"
)
@deprecated(
version="2.1.0", reason="Replaced by the ExternalReference class in pycti"
)
@deprecated(version="2.1.0", reason="Replaced by the KillChainPhase class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the KillChainPhase class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the KillChainPhase class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Identity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Identity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Identity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Identity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the ThreatActor class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the ThreatActor class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Threat-Actor class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Threat-Actor class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the IntrusionSet class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the IntrusionSet class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the IntrusionSet class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the IntrusionSet class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Campaign class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Campaign class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Campaign class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Campaign class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Incident class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Incident class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Incident class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Incident class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Malware class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Malware class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Malware class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Malware class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Tool class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Tool class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Tool class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Tool class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Vulnerability class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Vulnerability class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Vulnerability class in pycti")
# TODO Move to Vulnerability
@deprecated(version="2.1.0", reason="Replaced by the AttackPattern class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the AttackPattern class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the AttackPattern class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the AttackPattern class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the CourseOfAction class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the CourseOfAction class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the CourseOfAction class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the CourseOfAction class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixObservable class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixObservable class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixObservable class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixObservable class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixObservable class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixEntity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixEntity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixEntity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixEntity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the StixEntity class in pycti")
@deprecated(version="2.1.0", reason="Replaced by the Report class in pycti")
@deprecated(
version="2.1.0",
reason="Replaced by the same method in class OpenCTIStix2 in pycti",
)
@deprecated(
version="2.1.0",
reason="Replaced by the same method in class OpenCTIStix2 in pycti",
)
@deprecated(
version="2.1.0",
reason="Replaced by the same method in class OpenCTIStix2 in pycti",
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
33245,
198,
11748,
5536,
198,
6738,
19720,
1330,
7343,
198,
6738,
39224,
1330,
39224,
198,
198,
11748,
7007,
198,
11748,
2956,
297,
571,
18,
198,
11748,
4818,
8079,
198,
11748,
3128,
22... | 2.731197 | 3,776 |
from __future__ import print_function, unicode_literals
import uuid
from itertools import chain
from numbers import Number
from antelope import CatalogRef, BaseEntity, PropertyExists
from synonym_dict import LowerDict
entity_types = ('process', 'flow', 'quantity', 'fragment')
entity_refs = {
'process': 'exchange',
'flow': 'quantity',
'quantity': 'unit',
'fragment': 'fragment'
}
class LcEntity(BaseEntity):
"""
All LC entities behave like dicts, but they all have some common properties, defined here.
"""
_pre_fields = ['Name']
_new_fields = []
_ref_field = ''
_post_fields = ['Comment']
_origin = None
@property
@property
@property
@property
def is_entity(self):
"""
Used to distinguish between entities and catalog refs (which answer False)
:return: True for LcEntity subclasses
"""
return True
def map_origin(self, omap, fallback=None):
"""
This is used to propagate a change in origin semantics. Provide a dict that maps old origins to new origins.
External ref should remain the same with respect to the new origin.
:param omap: dict mapping old origin to new origin
:param fallback: if present, use in cases where old origin not found
:return:
"""
if self._origin in omap:
self._origin = omap[self._origin]
elif fallback is not None:
self._origin = fallback
@origin.setter
@property
@property
@property
@uuid.setter
def _set_reference(self, ref_entity):
"""
set the entity's reference value. Can be overridden
:param ref_entity:
:return:
"""
self._validate_reference(ref_entity)
self._reference_entity = ref_entity
def get_properties(self):
"""
dict of properties and values for a given entity
:return:
"""
d = dict()
for i in self.properties():
d[i] = self._d[i]
return d
@property
def __hash__(self):
"""
External ref is set by the end of __init__ and is immutable (except for fragments-- which use uuid for hash)
:return:
"""
if self._origin is None:
raise AttributeError('Origin not set!')
return hash(self.link)
def __eq__(self, other):
"""
two entities are equal if their types, origins, and external references are the same.
internal refs do not need to be equal; reference entities do not need to be equal
:return:
"""
if other is None:
return False
# if not isinstance(other, LcEntity): # taking this out so that CatalogRefs and entities can be compared
# return False
try:
is_eq = (self.external_ref == other.external_ref
and self.origin == other.origin
and self.entity_type == other.entity_type)
except AttributeError:
is_eq = False
return is_eq
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
334,
27112,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
6738,
3146,
1330,
7913,
198,
6738,
1885,
47329,
1330,
44515,
8134,
11,
7308,
32398... | 2.448576 | 1,264 |
import time
import datetime
import copy
import json
from collections import defaultdict
import numpy as np
from pycocotools.cocoeval import COCOeval
class myCOCOeval(COCOeval):
'''
Make COCOeval more flexible
'''
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
p = self.params
if p.useCats:
imgids = set(p.imgIds)
catids = set(p.catIds)
gts = [gt for gt in self.gt_json['annotations'] if \
(gt['image_id'] in imgids and gt['category_id'] in catids)]
dts = [dt for dt in self.dt_json if \
(dt['image_id'] in imgids and dt['category_id'] in catids)]
else:
raise NotImplementedError()
# gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
# dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# convert ground truth to mask if iouType == 'segm'
if p.iouType == 'segm':
raise NotImplementedError()
# _toMask(gts, self.cocoGt)
# _toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
if p.iouType == 'keypoints':
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
tic = time.time()
if self.verbose:
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if not p.useSegm is None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
if self.verbose:
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
if self.verbose:
print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
if self.verbose:
print('DONE (t={:0.2f}s).'.format(toc-tic))
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
if self.verbose:
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs and self.verbose:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
scores = -np.ones((T,R,K,A,M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0*A0*I0
for a, a0 in enumerate(a_list):
Na = a0*I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0 )
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg) )
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
scores[t,:,k,a,m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
if self.verbose:
print('DONE (t={:0.2f}s).'.format( toc-tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
self.summary = ''
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
class Params:
'''
Params for coco evaluation api
'''
| [
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
4866,
198,
11748,
33918,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12972,
66,
420,
313,
10141,
13,
66,
25634,
18206,
1330,
327,
4503,
46,
18206,
6... | 1.79715 | 5,122 |
import numpy as np
from numba import njit
import utility
# consav
from consav import linear_interp # for linear interpolation
@njit | [
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
299,
45051,
198,
198,
11748,
10361,
198,
198,
2,
762,
615,
198,
6738,
762,
615,
1330,
14174,
62,
3849,
79,
1303,
329,
14174,
39555,
341,
198,
198,
31,
77,
45051
] | 3.35 | 40 |
# Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import uuid
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import requests
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
import cinder.volume.drivers.datera.datera_api2 as api2
import cinder.volume.drivers.datera.datera_api21 as api21
import cinder.volume.drivers.datera.datera_common as datc
LOG = logging.getLogger(__name__)
d_opts = [
cfg.StrOpt('datera_api_port',
default='7717',
help='Datera API port.'),
cfg.StrOpt('datera_api_version',
default='2',
deprecated_for_removal=True,
help='Datera API version.'),
cfg.IntOpt('datera_503_timeout',
default='120',
help='Timeout for HTTP 503 retry messages'),
cfg.IntOpt('datera_503_interval',
default='5',
help='Interval between 503 retries'),
cfg.BoolOpt('datera_debug',
default=False,
help="True to set function arg and return logging"),
cfg.BoolOpt('datera_debug_replica_count_override',
default=False,
help="ONLY FOR DEBUG/TESTING PURPOSES\n"
"True to set replica_count to 1"),
cfg.StrOpt('datera_tenant_id',
default=None,
help="If set to 'Map' --> OpenStack project ID will be mapped "
"implicitly to Datera tenant ID\n"
"If set to 'None' --> Datera tenant ID will not be used "
"during volume provisioning\n"
"If set to anything else --> Datera tenant ID will be the "
"provided value")
]
CONF = cfg.CONF
CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
CONF.register_opts(d_opts)
@interface.volumedriver
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi):
"""The OpenStack Datera Driver
Version history:
1.0 - Initial driver
1.1 - Look for lun-0 instead of lun-1.
2.0 - Update For Datera API v2
2.1 - Multipath, ACL and reorg
2.2 - Capabilites List, Extended Volume-Type Support
Naming convention change,
Volume Manage/Unmanage support
2.3 - Templates, Tenants, Snapshot Polling,
2.1 Api Version Support, Restructure
"""
VERSION = '2.3'
CI_WIKI_NAME = "datera-ci"
HEADER_DATA = {'Datera-Driver': 'OpenStack-Cinder-{}'.format(VERSION)}
# =================
# =================
# = Create Volume =
# =================
@datc._api_lookup
def create_volume(self, volume):
"""Create a logical volume."""
pass
# =================
# = Extend Volume =
# =================
@datc._api_lookup
# =================
# =================
# = Cloned Volume =
# =================
@datc._api_lookup
# =================
# = Delete Volume =
# =================
@datc._api_lookup
# =================
# = Ensure Export =
# =================
@datc._api_lookup
def ensure_export(self, context, volume, connector):
"""Gets the associated account, retrieves CHAP info and updates."""
# =========================
# = Initialize Connection =
# =========================
@datc._api_lookup
# =================
# = Create Export =
# =================
@datc._api_lookup
# =================
# = Detach Volume =
# =================
@datc._api_lookup
# ===================
# = Create Snapshot =
# ===================
@datc._api_lookup
# ===================
# = Delete Snapshot =
# ===================
@datc._api_lookup
# ========================
# = Volume From Snapshot =
# ========================
@datc._api_lookup
# ==========
# = Manage =
# ==========
@datc._api_lookup
def manage_existing(self, volume, existing_ref):
"""Manage an existing volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
(existing_ref['source-name'] ==
tenant:app_inst_name:storage_inst_name:vol_name)
if using Datera 2.1 API
or
(existing_ref['source-name'] ==
app_inst_name:storage_inst_name:vol_name)
if using 2.0 API
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
pass
# ===================
# = Manage Get Size =
# ===================
@datc._api_lookup
def manage_existing_get_size(self, volume, existing_ref):
"""Get the size of an unmanaged volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
existing_ref == app_inst_name:storage_inst_name:vol_name
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume on the Datera backend
"""
pass
# =========================
# = Get Manageable Volume =
# =========================
@datc._api_lookup
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder.
Returns a list of dictionaries, each specifying a volume in the host,
with the following keys:
- reference (dictionary): The reference for a volume, which can be
passed to "manage_existing".
- size (int): The size of the volume according to the storage
backend, rounded up to the nearest GB.
- safe_to_manage (boolean): Whether or not this volume is safe to
manage according to the storage backend. For example, is the volume
in use or invalid for any reason.
- reason_not_safe (string): If safe_to_manage is False, the reason why.
- cinder_id (string): If already managed, provide the Cinder ID.
- extra_info (string): Any extra information to return to the user
:param cinder_volumes: A list of volumes in this host that Cinder
currently manages, used to determine if
a volume is manageable or not.
:param marker: The last item of the previous page; we return the
next results after this value (after sorting)
:param limit: Maximum number of items to return
:param offset: Number of items to skip after marker
:param sort_keys: List of keys to sort results by (valid keys are
'identifier' and 'size')
:param sort_dirs: List of directions to sort by, corresponding to
sort_keys (valid directions are 'asc' and 'desc')
"""
pass
# ============
# = Unmanage =
# ============
@datc._api_lookup
def unmanage(self, volume):
"""Unmanage a currently managed volume in Cinder
:param volume: Cinder volume to unmanage
"""
pass
# ================
# = Volume Stats =
# ================
@datc._api_lookup
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data.
"""
pass
# =========
# = Login =
# =========
@datc._api_lookup
# =======
# = QoS =
# =======
# ============================
# = Volume-Types/Extra-Specs =
# ============================
def _init_vendor_properties(self):
"""Create a dictionary of vendor unique properties.
This method creates a dictionary of vendor unique properties
and returns both created dictionary and vendor name.
Returned vendor name is used to check for name of vendor
unique properties.
- Vendor name shouldn't include colon(:) because of the separator
and it is automatically replaced by underscore(_).
ex. abc:d -> abc_d
- Vendor prefix is equal to vendor name.
ex. abcd
- Vendor unique properties must start with vendor prefix + ':'.
ex. abcd:maxIOPS
Each backend driver needs to override this method to expose
its own properties using _set_property() like this:
self._set_property(
properties,
"vendorPrefix:specific_property",
"Title of property",
_("Description of property"),
"type")
: return dictionary of vendor unique properties
: return vendor name
prefix: DF --> Datera Fabric
"""
properties = {}
if self.configuration.get('datera_debug_replica_count_override'):
replica_count = 1
else:
replica_count = 3
self._set_property(
properties,
"DF:replica_count",
"Datera Volume Replica Count",
_("Specifies number of replicas for each volume. Can only be "
"increased once volume is created"),
"integer",
minimum=1,
default=replica_count)
self._set_property(
properties,
"DF:acl_allow_all",
"Datera ACL Allow All",
_("True to set acl 'allow_all' on volumes created. Cannot be "
"changed on volume once set"),
"boolean",
default=False)
self._set_property(
properties,
"DF:ip_pool",
"Datera IP Pool",
_("Specifies IP pool to use for volume"),
"string",
default="default")
self._set_property(
properties,
"DF:template",
"Datera Template",
_("Specifies Template to use for volume provisioning"),
"string",
default="")
# ###### QoS Settings ###### #
self._set_property(
properties,
"DF:read_bandwidth_max",
"Datera QoS Max Bandwidth Read",
_("Max read bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:default_storage_name",
"Datera Default Storage Instance Name",
_("The name to use for storage instances created"),
"string",
default="storage-1")
self._set_property(
properties,
"DF:default_volume_name",
"Datera Default Volume Name",
_("The name to use for volumes created"),
"string",
default="volume-1")
self._set_property(
properties,
"DF:write_bandwidth_max",
"Datera QoS Max Bandwidth Write",
_("Max write bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_bandwidth_max",
"Datera QoS Max Bandwidth Total",
_("Max total bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:read_iops_max",
"Datera QoS Max iops Read",
_("Max read iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:write_iops_max",
"Datera QoS Max IOPS Write",
_("Max write iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_iops_max",
"Datera QoS Max IOPS Total",
_("Max total iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
# ###### End QoS Settings ###### #
return properties, 'DF'
def _get_policies_for_resource(self, resource):
"""Get extra_specs and qos_specs of a volume_type.
This fetches the scoped keys from the volume type. Anything set from
qos_specs will override key/values set from extra_specs.
"""
volume_type = self._get_volume_type_obj(resource)
# Handle case of volume with no type. We still want the
# specified defaults from above
if volume_type:
specs = volume_type.get('extra_specs')
else:
specs = {}
# Set defaults:
policies = {k.lstrip('DF:'): str(v['default']) for (k, v)
in self._init_vendor_properties()[0].items()}
if volume_type:
# Populate updated value
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
ctxt = context.get_admin_context()
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
policies.update(qos_kvs)
# Cast everything except booleans int that can be cast
for k, v in policies.items():
# Handle String Boolean case
if v == 'True' or v == 'False':
policies[k] = policies[k] == 'True'
continue
# Int cast
try:
policies[k] = int(v)
except ValueError:
pass
return policies
# ================
# = API Requests =
# ================
@datc._authenticated
def _issue_api_request(self, resource_url, method='get', body=None,
sensitive=False, conflict_ok=False,
api_version='2', tenant=None):
"""All API requests to Datera cluster go through this method.
:param resource_url: the url of the resource
:param method: the request verb
:param body: a dict with options for the action_type
:param sensitive: Bool, whether request should be obscured from logs
:param conflict_ok: Bool, True to suppress ConflictError exceptions
during this request
:param api_version: The Datera api version for the request
:param tenant: The tenant header value for the request (only applicable
to 2.1 product versions and later)
:returns: a dict of the response from the Datera cluster
"""
host = self.configuration.san_ip
port = self.configuration.datera_api_port
api_token = self.datera_api_token
payload = json.dumps(body, ensure_ascii=False)
payload.encode('utf-8')
header = {'Content-Type': 'application/json; charset=utf-8'}
header.update(self.HEADER_DATA)
protocol = 'http'
if self.configuration.driver_use_ssl:
protocol = 'https'
if api_token:
header['Auth-Token'] = api_token
if tenant == "all":
header['tenant'] = tenant
elif tenant and '/root' not in tenant:
header['tenant'] = "".join(("/root/", tenant))
elif tenant and '/root' in tenant:
header['tenant'] = tenant
elif self.tenant_id and self.tenant_id.lower() != "map":
header['tenant'] = self.tenant_id
client_cert = self.configuration.driver_client_cert
client_cert_key = self.configuration.driver_client_cert_key
cert_data = None
if client_cert:
protocol = 'https'
cert_data = (client_cert, client_cert_key)
connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
api_version, resource_url)
response = self._request(connection_string,
method,
payload,
header,
cert_data)
data = response.json()
if not response.ok:
self._handle_bad_status(response,
connection_string,
method,
payload,
header,
cert_data,
conflict_ok=conflict_ok)
return data
| [
2,
15069,
1584,
360,
729,
64,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
239... | 2.217428 | 8,389 |
# main.py
import module1
# even though test was added to sys.modules
# in module1, we can still access it from here
import test
print(test())
# don't do this! It's a bad hack to illustrate how import looks
# in sys.modules for the symbol we are importing
| [
2,
1388,
13,
9078,
198,
11748,
8265,
16,
198,
198,
2,
772,
996,
1332,
373,
2087,
284,
25064,
13,
18170,
198,
2,
287,
8265,
16,
11,
356,
460,
991,
1895,
340,
422,
994,
198,
11748,
1332,
198,
198,
4798,
7,
9288,
28955,
198,
198,
2... | 3.513514 | 74 |
from __future__ import absolute_import, print_function, unicode_literals
import contextlib
import six
from django import template
from django.template.base import token_kwargs
from django.utils.module_loading import import_string
import django_ftl
register = template.Library()
MODE_SERVER = 'server'
MODES = [
MODE_SERVER
]
MODE_VAR_NAME = '__ftl_mode'
BUNDLE_VAR_NAME = '__ftl_bundle'
@register.simple_tag(takes_context=True)
@register.simple_tag(takes_context=True)
@register.tag('withftl')
@contextlib.contextmanager
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
4732,
8019,
198,
198,
11748,
2237,
198,
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
28243,
13,
8692,... | 2.837696 | 191 |
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import scrolledtext
from datetime import datetime
import logging
import queue
import time
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
11748,
256,
74,
3849,
13,
926,
74,
355,
256,
30488,
198,
198,
6738,
256,
74,
3849,
1330,
629,
8375,
5239,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
18931,
198,
11748,
16834,
1... | 3.270833 | 48 |
course = ' Python for Beginners '
print(len(course)) # Genereal perpous function
print(course.upper())
print(course)
print(course.lower())
print(course.title())
print(course.lstrip())
print(course.rstrip())
# Returns the index of the first occurrence of the character.
print(course.find('P'))
print(course.find('B'))
print(course.find('o'))
print(course.find('O'))
print(course.find('Beginners'))
print(course.replace("Beginners", "Absolute Beginners"))
print(course.replace("P", "C"))
# Check existence of a character or a sequence of characters
print("Python" in course) # True
print("python" in course) # False
print("python" not in course) # True
| [
17319,
796,
705,
220,
11361,
329,
16623,
2741,
220,
220,
220,
705,
198,
4798,
7,
11925,
7,
17319,
4008,
220,
1303,
5215,
567,
282,
583,
79,
516,
2163,
198,
4798,
7,
17319,
13,
45828,
28955,
198,
4798,
7,
17319,
8,
198,
4798,
7,
17... | 3.093023 | 215 |
import pickle
import torch
import numpy as np
import shutil
import os
import torch
import cv2
from torchvision.ops.boxes import box_iou
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import json
'''
This script can be used to convert dataset to the following form
- a folder containing scene images (labeled as 0.png, 1.png and so on)
- a file instances.json containing COCO-format annotations (structure below)
'''
## Config ##
dataset_dir = '../input/robotmanipulation/complete_dataset'
mode = 'val' # dataset_dir/mode+'_images' will be looked up
scene_no = 0 # 0==input scene, 1==final_scene
get_masks = False # whether to get mask in annotations
###############
'''
Annotations needed:
instances_json
{
'images': [{
'id':,
'file_name':,
'width':,
'height':
}],
'categories': [{
'id':,
'name':
}],
'annotations': [{
'image_id':,
'bbox':,
'category_id':,
'mask':
}]
}
'''
images_dir = mode + '_images'
if os.path.isdir(images_dir):
shutil.rmtree(images_dir)
os.mkdir(images_dir)
instances_file = 'instances_' + mode +'.json'
data = load_pickle(os.path.join(dataset_dir, mode+'.pkl'))
images = []
categories = []
annotations = []
colors = ['blue','green','red','cyan','yellow','magenta','white']
types = ['small', 'lego']
category_id = 0
for typ in types:
for color in colors:
categories.append({
'id': category_id,
'name': typ + '_' + color
})
category_id += 1
for i, scene in enumerate(sorted(os.listdir(os.path.join(dataset_dir, mode)))):
if i%20==0:
print(i, scene)
image_path = os.path.join(dataset_dir, mode, scene, 'S0'+str(scene_no), 'rgba.png')
h,w,_ = cv2.imread(image_path).shape
shutil.copyfile(image_path, os.path.join(images_dir, f'{i}.png'))
images.append({
'id':i,
'file_name':f'{i}.png',
'width':w,
'height':h
})
if get_masks:
mask_path = os.path.join(dataset_dir, mode, scene, 'S0'+str(scene_no), 'mask.png')
mask = cv2.imread(mask_path)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
obj_ids = np.unique(mask)
obj_masks = []
for o_id in obj_ids:
if o_id==0:
continue
unit_mask = np.where(mask == o_id, 1, 0).astype(np.int8)
py, px = np.where(mask == o_id)
bbox = [np.min(px), np.min(py), np.max(px), np.max(py)]
unit_mask = unit_mask[bbox[1]:bbox[3], bbox[0]:bbox[2]]
bbox = torch.Tensor([bbox])
obj_masks.append((bbox, unit_mask))
masks_chosen = set()
for j in range(6):
bbox = data['objects'][i][scene_no][j][:4].tolist()
x1,y1,x2,y2 = h * bbox[0], w * bbox[1], h * bbox[2], w * bbox[3]
bbox = [y1,x1,y2-y1,x2-x1]
category = data['object_color'][i][j]
if data['object_type'][i][j] != 'small':
category = category + 7
if get_masks:
max_iou = 0
best_index = 0
for k, (bb, _) in enumerate(obj_masks):
iou = box_iou(bb, torch.Tensor([[y1,x1,y2,x2]]))
if iou > max_iou:
best_index = k
max_iou = iou
# sanity check
assert best_index not in masks_chosen
masks_chosen.add(best_index)
annotations.append({
'image_id': i,
'bbox': bbox,
'category_id': category,
'mask': obj_masks[best_index][1].tolist()
})
else:
annotations.append({
'image_id': i,
'bbox': bbox,
'category_id': category
})
instances = {
'images': images,
'categories': categories,
'annotations': annotations
}
import json
with open(instances_file, "w") as f:
json.dump(instances, f)
# mask = cv2.imread('../input/robotmanipulation/complete_dataset/val/0007/S00/mask.png')
# mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
# obj_ids = np.unique(mask)
# py, px = np.where(mask == 3)
# bbox = torch.Tensor([np.min(px), np.min(py), np.max(px), np.max(py)])
# bbox = [bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1]]
# img = cv2.imread('../input/robotmanipulation/complete_dataset/val/0007/S00/rgba.png')
# # h,w = img.shape[:2]
# # bbox = data['objects'][0][0][0][:4].tolist()
# # x1,y1,x2,y2 = h * bbox[0], w * bbox[1], h * bbox[2], w * bbox[3]
# # bbox = [y1,x1,y2-y1,x2-x1]
# print(bbox)
# show_template(img, bbox) | [
11748,
2298,
293,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4423,
346,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
269,
85,
17,
198,
6738,
28034,
10178,
13,
2840,
13,
29305,
1330,
3091,
62,
72,
280,
198,
... | 1.923203 | 2,448 |
import datetime | [
11748,
4818,
8079
] | 5 | 3 |
"""
Author: Hamza
Dated: 20.04.2019
Project: texttomap
"""
import pickle
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils import data
from util.word_encoding import getklass
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
reload = False
if reload:
enc_dict, klasses, wordarray = getklass()
with open("util/dl_logs/pytorch_data_00", 'wb') as f:
pickle.dump([enc_dict, klasses, wordarray], f)
else:
with open("Dataset_processing/split/pytorch_data_00", 'rb') as f:
[enc_dict, klasses, wordarray] = pickle.load(f)
klasses2 = [0]
index = 0
for i in range(1, len(klasses)):
if not klasses[i - 1] == klasses[i]:
index += 1
klasses2.append(index)
print("\n\nData Loaded\n\n")
x = [enc_dict[word] for word in enc_dict.keys()]
training_set = Dataset(x, klasses2)
no_classes = max(klasses2) + 1
# Network.load_state_dict(torch.load("util/dl_logs/03testingdict.pt",map_location=device))
Network = torch.load("util/dl_logs/04testingcomplete.pt", map_location=device)
with open("util/dl_logs/04log.pickle", "rb") as F:
[train_loss, train_accuracy] = pickle.load(F)
plt.figure(1)
plt.plot(train_loss)
plt.figure(2)
plt.plot(np.divide(train_accuracy, len(klasses2)))
# activation = {}
# def get_activation(name):
# def hook(model, input, output):
# activation[name] = output.detach()
# return hook
# Network.fc2.register_forward_hook(get_activation('fc2'))
# print(activation['fc2'].size())
| [
37811,
198,
13838,
25,
4345,
4496,
198,
35,
515,
25,
1160,
13,
3023,
13,
23344,
198,
16775,
25,
2420,
39532,
499,
198,
198,
37811,
198,
198,
11748,
2298,
293,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
1... | 2.450617 | 648 |
""" Integration Test File for service_framework.connection_utils.py """
import logging
import pytest
from service_framework.utils import connection_utils
from service_framework.connections.out.requester import Requester
LOG = logging.getLogger(__name__)
def test_connection_utils__base_connection__valid_validate_addresses_case():
"""
Make sure the addresses provided to a connection are validated in the base
connector.
"""
BasicAddressArgsTestingConnection(
{'connection_type': 'It honestly does not matter'},
BASIC_ADDRESSES
)
def test_connection_utils__base_connection__invalid_validate_addresses_case():
"""
Make sure the addresses provided to a connection are validated in the base
connector.
"""
not_all_required_addresses = {**BASIC_ADDRESSES}
del not_all_required_addresses['required_connection_1']
with pytest.raises(ValueError):
BasicAddressArgsTestingConnection({}, not_all_required_addresses)
def test_connection_utils__base_connection__valid_validate_creation_args_case():
"""
Make sure the creation arguments provided to a connection are validated in
the base connector.
"""
BasicCreationArgsTestingConnection(BASIC_MODEL, {})
def test_connection_utils__base_connection__invalid_validate_creation_args_case():
"""
Make sure the creation arguments provided to a connection are validated in
the base connector.
"""
no_required_args = {**BASIC_MODEL}
del no_required_args['required_creation_arguments']
with pytest.raises(ValueError):
BasicCreationArgsTestingConnection(no_required_args, {})
def test_connection_utils__base_connection__normal_send_function_throws_error():
"""
Make sure that calling the "send" function without overwriting it will throw
a Runtime error.
"""
connector = BasicCreationArgsTestingConnection(BASIC_MODEL, {})
with pytest.raises(RuntimeError):
connector.send({})
def test_connection_utils__get_connection__valid_get_replyer_case():
"""
Test if the get_connection function can make a replyer and a requester
connection that succeeds.
"""
requester_address = ADDRESSES['out']['requester_connection']
requester_model = CONNECTION_MODELS['out']['requester_connection']
requester = connection_utils.get_connection(
requester_model,
'out',
requester_address
)
assert isinstance(requester, Requester)
def test_connection_utils__setup_connections__valid_get_replyers_case():
"""
Test if the setup_connections function can make both a replyer and a requester
connection at the same time.
"""
connections = connection_utils.setup_connections(CONNECTION_MODELS, ADDRESSES)
conn_1 = connections['out']['requester_connection']
conn_2 = connections['out']['requester_connection_2']
assert isinstance(conn_1, Requester)
assert isinstance(conn_2, Requester)
class BasicAddressArgsTestingConnection(connection_utils.BaseConnection):
"""
This connection is strictly used for testing in this file...
"""
@staticmethod
def get_addresses_model():
"""
This is needed so the BaseConnector can validate the
provided addresses and throw an error if any are missing.
As well as automatically generate documentation.
NOTE: types must always be "str"
return = {
'required_addresses': {
'req_address_name_1': str,
'req_address_name_2': str,
},
'optional_addresses': {
'opt_address_name_1': str,
'opt_address_name_2': str,
},
}
"""
return {
'required_addresses': {
'required_connection_1': str,
'required_connection_2': str,
},
'optional_addresses': {
'optional_connection_1': str,
'optional_connection_2': str,
},
}
@staticmethod
def get_compatable_connection_types():
"""
This is needed so the build system knows which
connection types this connection is compatable.
return::['str'] A list of the compatable socket types.
"""
return []
@staticmethod
def get_connection_arguments_model():
"""
This is needed so the BaseConnection can validate the provided
model explicitly states the arguments to be passed on each
send message.
return = {
'required_connection_arguments': {
'required_connection_arg_1': type,
'required_connection_arg_2': type,
},
'optional_connection_arguments': {
'optional_connection_arg_1': type,
'optional_connection_arg_2': type,
},
}
"""
return {
'required_connection_arguments': {},
'optional_connection_arguments': {},
}
@staticmethod
def get_connection_type():
"""
This is needed so the build system knows what
connection type this connection is considered.
return::str The socket type of this connection.
"""
return 'basic'
@staticmethod
def get_creation_arguments_model():
"""
This is needed so the BaseConnection can validate the provided
creation arguments as well as for auto documentation.
return = {
'required_creation_arguments': {
'required_creation_arg_1': type,
'required_creation_arg_2': type,
},
'optional_creation_arguments': {
'optional_creation_arg_1': type,
'optional_creation_arg_2': type,
},
}
"""
return {
'required_creation_arguments': {},
'optional_creation_arguments': {},
}
def get_inbound_sockets_and_triggered_functions(self):
"""
Method needed so the service framework knows which sockets to listen
for new messages and what functions to call when a message appears.
return [{
'inbound_socket': zmq.Context.Socket,
'arg_validator': def(args),
'connection_function': def(args) -> args or None,
'model_function': def(args, to_send, states, config) -> return_args or None,
'return_validator': def(return_args)
'return_function': def(return_args),
}]
"""
return []
def runtime_setup(self):
"""
This method is used for the state to do any setup that must occur during
runtime. I.E. setting up a zmq.Context.
"""
class BasicCreationArgsTestingConnection(connection_utils.BaseConnection):
"""
This connection is strictly used for testing in this file...
"""
@staticmethod
def get_addresses_model():
"""
This is needed so the BaseConnector can validate the
provided addresses and throw an error if any are missing.
As well as automatically generate documentation.
NOTE: types must always be "str"
return = {
'required_addresses': {
'req_address_name_1': str,
'req_address_name_2': str,
},
'optional_addresses': {
'opt_address_name_1': str,
'opt_address_name_2': str,
},
}
"""
return {
'required_addresses': {},
'optional_addresses': {},
}
@staticmethod
def get_compatable_connection_types():
"""
This is needed so the build system knows which
connection types this connection is compatable.
return::['str'] A list of the compatable socket types.
"""
return []
@staticmethod
def get_connection_arguments_model():
"""
This is needed so the BaseConnection can validate the provided
model explicitly states the arguments to be passed on each
send message.
return = {
'required_connection_arguments': {
'required_connection_arg_1': type,
'required_connection_arg_2': type,
},
'optional_connection_arguments': {
'optional_connection_arg_1': type,
'optional_connection_arg_2': type,
},
}
"""
return {
'required_connection_arguments': {},
'optional_connection_arguments': {},
}
@staticmethod
def get_connection_type():
"""
This is needed so the build system knows what
connection type this connection is considered.
return::str The socket type of this connection.
"""
return 'basic'
@staticmethod
def get_creation_arguments_model():
"""
This is needed so the BaseConnection can validate the provided
creation arguments as well as for auto documentation.
return = {
'required': {
'required_creation_arg_1': type,
'required_creation_arg_2': type,
},
'optional': {
'optional_creation_arg_1': type,
'optional_creation_arg_2': type,
},
}
"""
return {
'required_creation_arguments': {
'required_creation_argument_1': str,
'required_creation_argument_2': int,
},
'optional_creation_arguments': {
'optional_creation_argument_1': str,
'optional_creation_argument_2': int,
},
}
def get_inbound_sockets_and_triggered_functions(self):
"""
Method needed so the service framework knows which sockets to listen
for new messages and what functions to call when a message appears.
return [{
'inbound_socket': zmq.Context.Socket,
'arg_validator': def(args),
'connection_function': def(args) -> args or None,
'model_function': def(args, to_send, states, config) -> return_args or None,
'return_validator': def(return_args)
'return_function': def(return_args),
}]
"""
return []
def runtime_setup(self):
"""
This method is used for the state to do any setup that must occur during
runtime. I.E. setting up a zmq.Context.
"""
ADDRESSES = {
'out': {
'requester_connection': {
'requester': '127.0.0.1:8877',
},
'requester_connection_2': {
'requester': '127.0.0.1:8877',
},
}
}
CONNECTION_MODELS = {
'out': {
'requester_connection': {
'connection_type': 'requester',
},
'requester_connection_2': {
'connection_type': 'requester',
},
},
}
BASIC_ADDRESSES = {
'required_connection_1': 'req_string_address_1',
'required_connection_2': 'req_string_address_2',
'optional_connection_1': 'opt_string_address_1',
'optional_connection_2': 'opt_string_address_2',
}
BASIC_MODEL = {
'connection_type': 'basic',
'required_creation_arguments': {
'required_creation_argument_1': 'foo',
'required_creation_argument_2': 7888,
},
'optional_creation_arguments': {
'optional_creation_argument_1': 'bar',
'optional_creation_argument_2': 1337,
},
}
| [
37811,
38410,
6208,
9220,
329,
2139,
62,
30604,
13,
38659,
62,
26791,
13,
9078,
37227,
198,
198,
11748,
18931,
198,
11748,
12972,
9288,
198,
6738,
2139,
62,
30604,
13,
26791,
1330,
4637,
62,
26791,
198,
6738,
2139,
62,
30604,
13,
8443,
... | 2.345183 | 4,951 |
"""
Created on 16/07/2014
@author: victor
"""
import traceback
from pyproct.tools.plugins import PluginHandler | [
37811,
198,
41972,
319,
1467,
14,
2998,
14,
4967,
198,
198,
31,
9800,
25,
2210,
273,
198,
37811,
198,
11748,
12854,
1891,
198,
6738,
12972,
1676,
310,
13,
31391,
13,
37390,
1330,
42636,
25060
] | 3.264706 | 34 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/contrib/tensorboard/plugins/trace/trace_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/contrib/tensorboard/plugins/trace/trace_info.proto',
package='tensorflow.contrib.tensorboard',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n=tensorflow/contrib/tensorboard/plugins/trace/trace_info.proto\x12\x1etensorflow.contrib.tensorboard\"y\n\tTraceInfo\x12\x33\n\x03ops\x18\x01 \x03(\x0b\x32&.tensorflow.contrib.tensorboard.OpInfo\x12\x37\n\x05\x66iles\x18\x02 \x03(\x0b\x32(.tensorflow.contrib.tensorboard.FileInfo\"\xee\x01\n\x06OpInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07op_type\x18\x02 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x03 \x01(\t\x12<\n\ttraceback\x18\x04 \x03(\x0b\x32).tensorflow.contrib.tensorboard.LineTrace\x12:\n\x06inputs\x18\x05 \x03(\x0b\x32*.tensorflow.contrib.tensorboard.TensorInfo\x12;\n\x07outputs\x18\x06 \x03(\x0b\x32*.tensorflow.contrib.tensorboard.TensorInfo\"3\n\tLineTrace\x12\x11\n\tfile_path\x18\x01 \x01(\t\x12\x13\n\x0bline_number\x18\x02 \x01(\r\"Y\n\nTensorInfo\x12\r\n\x05shape\x18\x01 \x03(\x05\x12\r\n\x05\x64type\x18\x02 \x01(\t\x12\x1a\n\x12num_bytes_per_elem\x18\x03 \x01(\r\x12\x11\n\tconsumers\x18\x04 \x03(\t\"\xcf\x01\n\x08\x46ileInfo\x12\x11\n\tfile_path\x18\x01 \x01(\t\x12\x13\n\x0bsource_code\x18\x02 \x01(\t\x12_\n\x14multiline_statements\x18\x03 \x03(\x0b\x32\x41.tensorflow.contrib.tensorboard.FileInfo.MultilineStatementsEntry\x1a:\n\x18MultilineStatementsEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\x62\x06proto3')
)
_TRACEINFO = _descriptor.Descriptor(
name='TraceInfo',
full_name='tensorflow.contrib.tensorboard.TraceInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ops', full_name='tensorflow.contrib.tensorboard.TraceInfo.ops', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='files', full_name='tensorflow.contrib.tensorboard.TraceInfo.files', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=97,
serialized_end=218,
)
_OPINFO = _descriptor.Descriptor(
name='OpInfo',
full_name='tensorflow.contrib.tensorboard.OpInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.contrib.tensorboard.OpInfo.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='op_type', full_name='tensorflow.contrib.tensorboard.OpInfo.op_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device', full_name='tensorflow.contrib.tensorboard.OpInfo.device', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='traceback', full_name='tensorflow.contrib.tensorboard.OpInfo.traceback', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='tensorflow.contrib.tensorboard.OpInfo.inputs', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='tensorflow.contrib.tensorboard.OpInfo.outputs', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=221,
serialized_end=459,
)
_LINETRACE = _descriptor.Descriptor(
name='LineTrace',
full_name='tensorflow.contrib.tensorboard.LineTrace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_path', full_name='tensorflow.contrib.tensorboard.LineTrace.file_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='line_number', full_name='tensorflow.contrib.tensorboard.LineTrace.line_number', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=461,
serialized_end=512,
)
_TENSORINFO = _descriptor.Descriptor(
name='TensorInfo',
full_name='tensorflow.contrib.tensorboard.TensorInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='tensorflow.contrib.tensorboard.TensorInfo.shape', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='tensorflow.contrib.tensorboard.TensorInfo.dtype', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_bytes_per_elem', full_name='tensorflow.contrib.tensorboard.TensorInfo.num_bytes_per_elem', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='consumers', full_name='tensorflow.contrib.tensorboard.TensorInfo.consumers', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=514,
serialized_end=603,
)
_FILEINFO_MULTILINESTATEMENTSENTRY = _descriptor.Descriptor(
name='MultilineStatementsEntry',
full_name='tensorflow.contrib.tensorboard.FileInfo.MultilineStatementsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.contrib.tensorboard.FileInfo.MultilineStatementsEntry.key', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.contrib.tensorboard.FileInfo.MultilineStatementsEntry.value', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=755,
serialized_end=813,
)
_FILEINFO = _descriptor.Descriptor(
name='FileInfo',
full_name='tensorflow.contrib.tensorboard.FileInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_path', full_name='tensorflow.contrib.tensorboard.FileInfo.file_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_code', full_name='tensorflow.contrib.tensorboard.FileInfo.source_code', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='multiline_statements', full_name='tensorflow.contrib.tensorboard.FileInfo.multiline_statements', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FILEINFO_MULTILINESTATEMENTSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=606,
serialized_end=813,
)
_TRACEINFO.fields_by_name['ops'].message_type = _OPINFO
_TRACEINFO.fields_by_name['files'].message_type = _FILEINFO
_OPINFO.fields_by_name['traceback'].message_type = _LINETRACE
_OPINFO.fields_by_name['inputs'].message_type = _TENSORINFO
_OPINFO.fields_by_name['outputs'].message_type = _TENSORINFO
_FILEINFO_MULTILINESTATEMENTSENTRY.containing_type = _FILEINFO
_FILEINFO.fields_by_name['multiline_statements'].message_type = _FILEINFO_MULTILINESTATEMENTSENTRY
DESCRIPTOR.message_types_by_name['TraceInfo'] = _TRACEINFO
DESCRIPTOR.message_types_by_name['OpInfo'] = _OPINFO
DESCRIPTOR.message_types_by_name['LineTrace'] = _LINETRACE
DESCRIPTOR.message_types_by_name['TensorInfo'] = _TENSORINFO
DESCRIPTOR.message_types_by_name['FileInfo'] = _FILEINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TraceInfo = _reflection.GeneratedProtocolMessageType('TraceInfo', (_message.Message,), dict(
DESCRIPTOR = _TRACEINFO,
__module__ = 'tensorflow.contrib.tensorboard.plugins.trace.trace_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.contrib.tensorboard.TraceInfo)
))
_sym_db.RegisterMessage(TraceInfo)
OpInfo = _reflection.GeneratedProtocolMessageType('OpInfo', (_message.Message,), dict(
DESCRIPTOR = _OPINFO,
__module__ = 'tensorflow.contrib.tensorboard.plugins.trace.trace_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.contrib.tensorboard.OpInfo)
))
_sym_db.RegisterMessage(OpInfo)
LineTrace = _reflection.GeneratedProtocolMessageType('LineTrace', (_message.Message,), dict(
DESCRIPTOR = _LINETRACE,
__module__ = 'tensorflow.contrib.tensorboard.plugins.trace.trace_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.contrib.tensorboard.LineTrace)
))
_sym_db.RegisterMessage(LineTrace)
TensorInfo = _reflection.GeneratedProtocolMessageType('TensorInfo', (_message.Message,), dict(
DESCRIPTOR = _TENSORINFO,
__module__ = 'tensorflow.contrib.tensorboard.plugins.trace.trace_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.contrib.tensorboard.TensorInfo)
))
_sym_db.RegisterMessage(TensorInfo)
FileInfo = _reflection.GeneratedProtocolMessageType('FileInfo', (_message.Message,), dict(
MultilineStatementsEntry = _reflection.GeneratedProtocolMessageType('MultilineStatementsEntry', (_message.Message,), dict(
DESCRIPTOR = _FILEINFO_MULTILINESTATEMENTSENTRY,
__module__ = 'tensorflow.contrib.tensorboard.plugins.trace.trace_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.contrib.tensorboard.FileInfo.MultilineStatementsEntry)
))
,
DESCRIPTOR = _FILEINFO,
__module__ = 'tensorflow.contrib.tensorboard.plugins.trace.trace_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.contrib.tensorboard.FileInfo)
))
_sym_db.RegisterMessage(FileInfo)
_sym_db.RegisterMessage(FileInfo.MultilineStatementsEntry)
_FILEINFO_MULTILINESTATEMENTSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
11192,
273,
11125,
14,
3642,
822,
14,
83,
22854,
3526,
14,
37390,
14,
40546,
14,
40546,
62,
10951,
13,
1676,
1462,
198,
198,
11748,
25064,
... | 2.436387 | 6,123 |
# -*- coding: utf-8 -*-
# Python 2 and 3
from __future__ import unicode_literals
import datetime
import sys
from unittest import TestCase
from mock import patch
from gapipy.client import Client
from gapipy.query import Query
from gapipy.models import DATE_FORMAT, AccommodationRoom
from gapipy.resources import (
Departure,
Promotion,
Tour,
TourDossier,
ActivityDossier,
)
from gapipy.resources.base import Resource
from .fixtures import DUMMY_DEPARTURE, PPP_TOUR_DATA, PPP_DOSSIER_DATA
class DepartureAddonTestCase(TestCase):
"""
Test that the departures.addons.product instances get appropriate resource
types.
This is a regression test for some erroneous behaviour of the AddOn model.
The AddOn._resource_fields list was defined at the class level, and as-such
it was shared among intances of that class. The effect is that if you have
three addons, they will all be using the same model for their "product"
even when those products are different type (e.g. "accommodations" versus
"activities" versus "transports" versus "single_supplements" etc.)
Desired behaviour is that each AddOn.product is represented by a model
appropriate for its type.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11361,
362,
290,
513,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
4818,
8079,
198,
11748,
25064,
198,
6738,
555,
715,
395,
1330,... | 3.213542 | 384 |
import cmd
import sys
if __name__ == '__main__':
try:
uppermethod().cmdloop()
except KeyboardInterrupt:
sys.exit() | [
11748,
23991,
198,
11748,
25064,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
6727,
24396,
22446,
28758,
26268,
3419,
198,
220,
220,
220,
2845,
31... | 2.421053 | 57 |
#! /usr/bin/env /usr/bin/python3
import subprocess
from time import sleep
import shutil
import os
import numpy
import json
if __name__ == '__main__':
data = map(
lambda s: {"interval":s[0], "primeTime":s[1], "accessTime":s[2]},
[# interval, primeTime, accessTime
(2000000, 800000, 800000),
(1000000, 400000, 400000)
][0:2])
base_dir = os.path.dirname(os.path.abspath(__file__))
print(base_dir)
data_dir = os.path.join(base_dir , "data")
result_dir = os.path.join(base_dir, "llc-results")
sender_bin = os.path.join(base_dir, "pp-llc-send")
reader_bin = os.path.join(base_dir, "pp-llc-recv")
if not os.path.isfile(sender_bin) or not os.path.isfile(reader_bin):
print("Please have this script in the same folder as the executables")
exit()
env = os.environ.copy()
try :
env["LD_LIBRARY_PATH"] += ":" + base_dir
except KeyError:
env["LD_LIBRARY_PATH"] = base_dir
channel = channel_benchmark(data
,runsPerTest=1
,readerCore=0
,senderCore=2)
channel.benchmark();
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
850,
14681,
198,
6738,
640,
1330,
3993,
198,
11748,
4423,
346,
198,
11748,
28686,
198,
11748,
299,
32152,
198,
11748,
33918,
628,
198,
361,... | 2.075044 | 573 |
#!/opt/anaconda3/bin/python
import requests
import ftfy
import glob
import argparse
import os
import jsonlines
from tqdm import tqdm
from datetime import datetime
from slugify import slugify
import json
if __name__ == "__main__":
args = parse_args()
main(args)
| [
2,
48443,
8738,
14,
272,
330,
13533,
18,
14,
8800,
14,
29412,
198,
198,
11748,
7007,
198,
11748,
10117,
24928,
198,
11748,
15095,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
33918,
6615,
198,
6738,
256,
80,
36020,
1330,
256,... | 2.967391 | 92 |
import tensorflow as tf
tf.flags.DEFINE_string('name', 'exp', '')
tf.flags.DEFINE_integer('restore_epoch', -1, '')
tf.flags.DEFINE_integer('n_jobs', 16, '')
tf.flags.DEFINE_integer('epochs', 100, '')
tf.flags.DEFINE_integer('batch_size', 1, '')
tf.flags.DEFINE_integer('node_num', 517, '') # 517 for global max, 433 for train max
tf.flags.DEFINE_integer('node_feature_size', 1108, '')
tf.flags.DEFINE_integer('edge_feature_size', 1216, '')
tf.flags.DEFINE_integer('label_num', 27, '')
tf.flags.DEFINE_integer('message_passing_iterations', 3, '')
tf.flags.DEFINE_bool('debug', False, '')
tf.flags.DEFINE_float('lr', 1e-3, '')
tf.flags.DEFINE_float('beta1', 0.99, '')
tf.flags.DEFINE_float('beta2', 0.999, '')
tf.flags.DEFINE_float('dropout', 0.5, 'Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.')
tf.flags.DEFINE_bool('negative_suppression', False, '')
tf.flags.DEFINE_string('part_weight', 'central', '')
tf.flags.DEFINE_integer('log_interval', 10, '')
tf.flags.DEFINE_string('dataset', 'vcoco', '')
flags = tf.flags.FLAGS
assert flags.part_weight in ['central', 'edge', 'uniform']
assert flags.dataset in ['vcoco', 'hico'] | [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
27110,
13,
33152,
13,
7206,
29940,
62,
8841,
10786,
3672,
3256,
705,
11201,
3256,
10148,
8,
198,
27110,
13,
33152,
13,
7206,
29940,
62,
41433,
10786,
2118,
382,
62,
538,
5374,
3256,
532,
... | 2.60307 | 456 |
from __future__ import division
from ._bracket import bracket
from ._brent import brent
inf = float("inf")
_eps = 1.4902e-08
def minimize(
f, x0=None, x1=None, a=-inf, b=+inf, gfactor=2, rtol=_eps, atol=_eps, maxiter=500
):
r"""Function minimization.
Applies :func:`brent_search.bracket` to find a bracketing interval, to which
:func:`brent_search.brent` is subsequently applied to find a local minimum.
Parameters
----------
f : callable
Function of interest.
x0 : float, optional
First point.
x1 : float, optional
Second point.
a : float, optional
Interval's lower limit. Defaults to ``-inf``.
b : float, optional
Interval's upper limit. Defaults to ``+inf``.
gfactor : float, optional
Growing factor.
rtol : float, optional
Relative tolerance. Defaults to ``1.4902e-08``.
atol : float, optional
Absolute tolerance. Defaults to ``1.4902e-08``.
maxiter : int, optional
Maximum number of iterations. Defaults to ``500``.
Returns
-------
float
Found solution (if any).
float
Function evaluation at that point.
int
The number of function evaluations.
"""
func.nfev = 0
r, _ = bracket(
func,
x0=x0,
x1=x1,
a=a,
b=b,
gfactor=gfactor,
rtol=rtol,
atol=atol,
maxiter=maxiter,
)
x0, x1, x2, f0, f1 = r[0], r[1], r[2], r[3], r[4]
x0, f0 = brent(func, x0, x2, x1, f1, rtol, atol, maxiter)[0:2]
return x0, f0, func.nfev
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
6738,
47540,
1671,
8317,
1330,
19096,
198,
6738,
47540,
65,
1156,
1330,
275,
1156,
198,
198,
10745,
796,
12178,
7203,
10745,
4943,
198,
198,
62,
25386,
796,
352,
13,
2920,
2999,
68,
12,
2... | 2.195323 | 727 |
import mysql.connector
from os import getenv as env
import urllib.parse as urlparse
import logging
from utils import setup_logging
logger = logging.getLogger()
OPENCAST_DB_URL = env("OPENCAST_DB_URL")
OPENCAST_RUNNING_JOB_STATUS = 2
@setup_logging
| [
11748,
48761,
13,
8443,
273,
198,
6738,
28686,
1330,
651,
24330,
355,
17365,
198,
11748,
2956,
297,
571,
13,
29572,
355,
19016,
29572,
198,
198,
11748,
18931,
198,
6738,
3384,
4487,
1330,
9058,
62,
6404,
2667,
198,
198,
6404,
1362,
796,... | 2.76087 | 92 |
from django import forms
| [
6738,
42625,
14208,
1330,
5107,
628
] | 4.333333 | 6 |
import pandas as pd
data = {
'ages': [14, 18, 24, 42],
'heights': [165, 180, 176, 184]
}
print(data)
df = pd.DataFrame(data, index=['ahmet',"akan","burak","duman"])
print(df)
print(df.loc["ahmet"])
print()
print(df.iloc[0])
print("------------------ READING DATA FROM CSV------------------------------")
df2 = pd.read_csv("https://www.sololearn.com/uploads/ca-covid.csv")
print(df2.head(10)) # head() ilk 5 , tail() son 5 veriyi verir
print(df2.info)
df2.drop('state',axis=1,inplace=True) # axis = 1 for column , axis = 0 for row
df2['month'] = pd.to_datetime(df2['date'], format="%d.%m.%y").dt.month_name()
df2.set_index('date', inplace=True) # inplace anında geçerli olmasını sağlar
print(df2['month'].value_counts())
print(df2.groupby('month')['cases'].sum())
print(df2.head())
print(df2.describe) | [
11748,
19798,
292,
355,
279,
67,
198,
7890,
796,
1391,
198,
220,
220,
705,
1095,
10354,
685,
1415,
11,
1248,
11,
1987,
11,
5433,
4357,
198,
220,
220,
705,
258,
2337,
10354,
685,
20986,
11,
11546,
11,
26937,
11,
28598,
60,
198,
92,
... | 2.411243 | 338 |
# author: Mukund Iyer
# date: 12/29/21
"""
This script will carry out cross-validation and hyperparameter optimization for different models
of the data.
Usage: model_tuning.py --processed_data_path=<processed_data_path> --results_folder_path=<results_path>
Options:
--processed_data_path=<processed_data_path> The path to the processed data folder
--results_path=<results_path> The path where the results of the preprocessing are saved
"""
from pandas.io.parsers import read_csv
from sklearn.tree import DecisionTreeRegressor, export_graphviz
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from sklearn.ensemble import VotingRegressor
from sklearn.ensemble import StackingRegressor
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import mean_squared_error
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import make_scorer
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import (
cross_validate,
)
import pickle
from docopt import docopt
import os
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
opt = docopt(__doc__)
# function to load preprocessor
# function to define MAPE score
# function adapted from Varada Kolhatkar
def mean_std_cross_val_scores(model, X_train, y_train, **kwargs):
"""
Returns mean and std of cross validation
Parameters
----------
model :
scikit-learn model
X_train : numpy array or pandas DataFrame
X in the training data
y_train :
y in the training data
Returns
----------
pandas Series with mean scores from cross_validation
"""
scores = cross_validate(model, X_train, y_train, **kwargs)
mean_scores = pd.DataFrame(scores).mean()
std_scores = pd.DataFrame(scores).std()
out_col = []
for i in range(len(mean_scores)):
out_col.append((f"%0.3f (+/- %0.3f)" % (mean_scores[i], std_scores[i])))
return pd.Series(data=out_col, index=mean_scores.index)
# baseline model - dummy regressor
# ridge tuning
# random forest tuning
if __name__ == "__main__":
main(opt["--processed_data_path"], opt["--results_folder_path"])
| [
2,
1772,
25,
31509,
917,
314,
9860,
198,
2,
3128,
25,
1105,
14,
1959,
14,
2481,
198,
198,
37811,
198,
1212,
4226,
481,
3283,
503,
3272,
12,
12102,
341,
290,
8718,
17143,
2357,
23989,
329,
1180,
4981,
198,
1659,
262,
1366,
13,
198,
... | 2.967548 | 832 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 18 22:56:28 2014
@author: space_000
"""
from scipy.io import loadmat
import numpy as np
import pymongo as mg
client=mg.MongoClient()
db=client['MKD']
colMKInit=db['marketInit']
#%% Create market trading days
d=loadmat('E:\\Code Laboratory\\MFpy\\MongoPy\\MarketData\\wtdays')
tdays=d['c']
daa=[int(t[0][0]) for t in tdays]
colMKInit.insert({'_id':'tdays','tdays':daa})
#%% Create 2014 stock code list
d=loadmat('D:\dbField1')
Field=[int(s) for s in d['Field']]
colMKInit.insert({'_id':'2014intStockCode','intStockCode':Field})
Field=[str(s) for s in d['Field']]
colMKInit.insert({'_id':'2014strStockCode','strStockCode':Field})
Field=np.array(Field)
mField=[]
for i in xrange(Field.shape[0]):
lf=6-len(str(Field[i]))
mField.append('0'*lf+str(Field[i]))
field=[]
for i in mField:
if i[0]=='6':
field.append(i+'.SH')
else:
field.append(i+'.SZ')
colMKInit.insert({'_id':'2014shszStockCode','shszStockCode':field})
#%% 生成是否下载了当天、对应的股票集、五个行情数据的矩阵。暂包括日数据、分钟数据
tdays=colMKInit.find({'_id':'tdays'},{'_id':0}).next()
mark={'min':0,'day':0}
query={}
for i in tdays['tdays']:
query[str(i)]=mark
colMKInit.insert(dict({'_id':'2014DateMark'},**query)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
5267,
1248,
2534,
25,
3980,
25,
2078,
1946,
198,
198,
31,
9800,
25,
2272,
62,
830,
198,
37811,
198,
6738,
629,
541,
88,
13,
952,
1330,
... | 2.055369 | 596 |
from onegov.form import _
| [
6738,
530,
9567,
13,
687,
1330,
4808,
628
] | 3.375 | 8 |
"""
Post analysis module
"""
from typing import Tuple
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
from crosswalk import CWData, CWModel
from crosswalk.scorelator import Scorelator
| [
37811,
198,
6307,
3781,
8265,
198,
37811,
198,
6738,
19720,
1330,
309,
29291,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
629,
541,
... | 3.5 | 68 |
from .crossover import NullCrossover, SBX, SP
from .mutation import NullMutation, BitFlip, Polynomial, IntegerPolynomial, Uniform, SimpleRandom
from .selection import BestSolutionSelection, BinaryTournamentSelection, BinaryTournament2Selection, \
RandomSolutionSelection, NaryRandomSolutionSelection, RankingAndCrowdingDistanceSelection
__all__ = [
'NullCrossover', 'SBX', 'SP',
'NullMutation', 'BitFlip', 'Polynomial', 'IntegerPolynomial', 'Uniform', 'SimpleRandom',
'BestSolutionSelection', 'BinaryTournamentSelection', 'BinaryTournament2Selection', 'RandomSolutionSelection',
'NaryRandomSolutionSelection', 'RankingAndCrowdingDistanceSelection'
]
| [
6738,
764,
66,
23954,
1330,
35886,
34,
23954,
11,
18056,
55,
11,
6226,
198,
6738,
764,
76,
7094,
1330,
35886,
44,
7094,
11,
4722,
7414,
541,
11,
12280,
26601,
498,
11,
34142,
34220,
26601,
498,
11,
35712,
11,
17427,
29531,
198,
6738,
... | 3.305419 | 203 |
# Packages
import numpy as np
import sys
sys.path.append('../')
import filters # Filters function used for topology optimization
import grcwa # Python RCWA Library. See downloading instructions at https://github.com/weiliangjinca/grcwa
args = {}
params = {}
## Define variables
args['nG'] = 51 # Always check convergence wrt nG
# Lattice vector (unit length is 1 um)
args['Lx'] = 0.430
args['Ly'] = args['Lx']
args['L1'] = [args['Lx'], 0.]
args['L2'] = [0., args['Ly']]
# Wavelength vector
wl_vec = np.linspace(0.4,0.8,400)
# Array of z-bound for volume integral (measured depths)
# meas_depth_vec = np.array([100.,200.,300.,400.,500.,600.,700.,800.,900.,1000.])*1e-3 # Coarse sweep
meas_depth_vec = np.linspace(0.01,1.,20) # Thin sweep
# discretization for patterned layers
# Has to be even integers with current C4v implementation
args['Nx'] = 100
args['Ny'] = args['Nx']
## Multilayer setting. Bottom/Top are half spaces, middle is for unpatterned middle layers.
eps_SiO2 = 2.13353 +1e-5*1j#
eps_vacuum = 1.
# Geometry
args['thick_max'] = 0.5 # Thickness of silicon top layer
args['etch_depth'] = 25*1e-3 # To be updated for various hole depths
thick_SiO2 = 1.0 # Thickness of silica layer
rad_etch = 0.130 # Radius of hole
# Angular parameters
theta_max = 17.5*np.pi/180. # Max. angular aperture
theta_res = 20 # Angular resolution
theta_vec = np.linspace(0, theta_max, theta_res) # Array of angles
# Two types of polarization
pol_dict = {"s", "p"}
# Loads silicon refractive index (from refractiveindex.info)
eps_Si_data = np.loadtxt(open('permittivity/Si (Silicon) - Palik.txt', "rb"), delimiter=",", skiprows=0)
def eps_Si_func(wl):
''' Permittivity of silicon '''
# wl wavelength in microns
ind = np.where(np.abs(eps_Si_data[:,0]-wl) == np.min(np.abs(eps_Si_data[:,0]-wl)))
return (eps_Si_data[ind,1] + 1j * eps_Si_data[ind,2])[0][0]
def rescaled(dof, dof_min, dof_max):
''' Rescales DOF from [0,1] to [dof_min, dof_max'''
return dof_min + dof*(dof_max-dof_min)
def Veff(freqangs, pol, meas_depth, etch_depth):
''' Calculates Veff from experimental structure (Figure 2)
Given incoming frequency, solid angle (freqangs)
and polarization (pol)
Integrates over measurement depth (meas_depth)
Circular hole with depth etch_depth
'''
# Loads frequency and angles from freqangs
freq = freqangs[0]
theta = freqangs[1]
phi = freqangs[2]
# Silicon permittivity
eps_Si = eps_Si_func(1/freq)
# Creates RCWA Object
obj = grcwa.obj(args['nG'], args['L1'], args['L2'], freq, theta, phi, verbose = 0)
# Creates multi-layer structure
obj.Add_LayerUniform(0., eps_vacuum)
obj.Add_LayerGrid(args['etch_depth'], args['Nx'], args['Ny']) # Layer with circular hole
obj.Add_LayerUniform(args['thick_max']-etch_depth, eps_Si)
obj.Add_LayerUniform(meas_depth, eps_SiO2) # Layer of interest
obj.Add_LayerUniform(thick_SiO2 - meas_depth, eps_SiO2) # Rest of silica (not measured here)
obj.Add_LayerUniform(1000., eps_Si)
obj.Add_LayerUniform(0., eps_vacuum)
obj.Init_Setup(Gmethod = 0)
# Updates DOF to make circular hole
flattened_dof = 1-filters.dof_to_pillar(rad_etch, args['Nx'], args['Ny'], args['Lx'], args['Ly'], binary_flag = True)
flattened_dof = np.array(flattened_dof).flatten()*(eps_Si - eps_vacuum) + eps_vacuum # Turns 0/1 DOF into epsilon values
obj.GridLayer_geteps(flattened_dof) # Flattens DOFs for RCWA
# Set incoming polarization
if pol == "s":
planewave = {'p_amp': 0, 's_amp': 1, 'p_phase': 0, 's_phase': 0}
if pol == "p":
planewave = {'p_amp': 1, 's_amp': 0, 'p_phase': 0, 's_phase': 0}
# Define incoming plane wave
obj.MakeExcitationPlanewave(planewave['p_amp'], planewave['p_phase'], planewave['s_amp'], planewave['s_phase'], order = 0)
SiO2_layer = 3 # index of silica layer of interest
dN = 1/args['Nx']/args['Ny'] # Integral discretization
M0 = grcwa.fft_funs.get_conv(dN, np.ones((args['Nx'], args['Ny'])), obj.G) # Defines integration kernel in Fourier space
# To be consistent with Poynting vector convention, absorbed power is vol1*omega or vol1/lambda
# See here: https://github.com/weiliangjinca/grcwa/blob/master/grcwa/rcwa.py
vol1 = obj.Volume_integral(SiO2_layer, M0, M0, M0, normalize=1) # Calculate volume integral
res = np.abs(vol1)
return res
folder_name = 'sweep' # folder to save data in /res/folder_name
# Loop to calculate over all possible parameters. Can be parallelized with multiprocessing.pool.map()
data_mat = np.zeros((len(wl_vec), len(theta_vec), len(theta_vec), len(pol_dict), len(meas_depth_vec))) # Data array
theta_mat = np.zeros((len(theta_vec), len(theta_vec))) # Theta array (for angular integration)
phi_mat = np.zeros((len(theta_vec), len(theta_vec))) # Phi array (for angular integration)
for (meas_depth_ind, meas_depth) in zip(range(len(meas_depth_vec)), meas_depth_vec):
args['meas_depth'] = meas_depth
for (polind, pol) in zip(range(len(pol_dict)), pol_dict):
for itx in range(len(theta_vec)):
for ity in range(itx+1):
# Updates theta and phi
# Triangular domain sweep within (circular) numerical aperture
thetax = theta_vec[itx]
thetay = theta_vec[ity]
theta = np.sqrt(thetax**2.+thetay**2.)
phi = np.arctan(thetay/thetax) if thetax != 0 else np.sign(thetax)*np.pi/2.
theta_mat[itx, ity] = theta
phi_mat[itx, ity] = phi
if theta <= theta_max:
# If in angular aperture, calculates RCWA
print("Calculating up to depth = {0} nm, pol = {1}, theta = {2}, phi = {3}".format(meas_depth*1e3, pol, theta*180/np.pi, phi*180/np.pi))
for (wlind, wl) in zip(range(len(wl_vec)), wl_vec):
# Calculates RCWA for a given set of parameters
freqangs0 = [1/wl, theta, phi]
res = Veff(freqangs0, pol, meas_depth, args['etch_depth'])/wl
data_mat[wlind, itx, ity, polind, meas_depth_ind] = res
# Saves data in big dictionary
data = {"data_mat":data_mat, "wl_vec": wl_vec, "pol": pol, "theta_vec": theta_vec, "theta_mat": theta_mat, "phi_mat": phi_mat, "meas_depth_vec":meas_depth_vec, "thick_SiO2": thick_SiO2, "rad_etch": rad_etch, "args": args}
# np.save("res/"+folder_name+"/rcwa_expt_analysis_etchdepth_{0}nm".format(args['etch_depth']*1e3), data, allow_pickle = True) | [
2,
6400,
1095,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
11537,
198,
11748,
16628,
1303,
7066,
1010,
2163,
973,
329,
1353,
1435,
23989,
220,
198,
11748,
1036,
66,
10247,
1... | 2.271798 | 2,936 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch of environments inside the TensorFlow graph."""
# The code was based on Danijar Hafner's code from tf.agents:
# https://github.com/tensorflow/agents/blob/master/agents/tools/in_graph_batch_env.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class InGraphBatchEnv(object):
"""Abstract class for batch of environments inside the TensorFlow graph.
"""
def __getattr__(self, name):
"""Forward unimplemented attributes to one of the original environments.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in one of the original environments.
"""
return getattr(self._batch_env, name)
def __len__(self):
"""Number of combined environments."""
return len(self._batch_env)
def __getitem__(self, index):
"""Access an underlying environment by index."""
return self._batch_env[index]
def simulate(self, action):
"""Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation.
"""
raise NotImplementedError
def reset(self, indices=None):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset.
Returns:
Batch tensor of the new observations.
"""
raise NotImplementedError
@property
def observ(self):
"""Access the variable holding the current observation."""
return self._observ
def close(self):
"""Send close messages to the external process and join them."""
self._batch_env.close()
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
383,
309,
22854,
17,
51,
22854,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2... | 3.337644 | 696 |
"""Relational algebra operators.
Used to represent a query plan before it is converted into a series of
iterators.
References:
* https://www.cs.rochester.edu/~nelson/courses/csc_173/relations/algebra.html
* http://www.databasteknik.se/webbkursen/relalg-lecture/
* https://en.wikipedia.org/wiki/Relational_algebra
* http://www.cs.toronto.edu/~faye/343/f07/lectures/wk3/03_RAlgebra.pdf
"""
import autopep8
import logging
import re
from matchpy import (
Arity,
Operation,
Symbol,
)
class Select(Operation, Relation):
"""
Returns a relation that has had some rows filtered (AKA WHERE clause)
"""
name = 'σ'
arity = Arity.binary
one_identity = False
class Cross(Operation, Relation):
"""
Returns a relation that is a result of the cartesian product of more than
one relations.
"""
name = 'X'
arity = Arity.polyadic
associative = True
commutative = True
infix = True
one_identity = True
class Unique(Operation):
"""
Returns a relation with no duplicates
"""
name = 'Unique'
arity = Arity.unary
# class Difference(Operation, Relation):
# name = '-'
# arity = Arity.binary
# # associative = True
# # commutative = True
# infix = True
# one_identity = True
| [
37811,
6892,
864,
37139,
12879,
13,
198,
198,
38052,
284,
2380,
257,
12405,
1410,
878,
340,
318,
11513,
656,
257,
2168,
286,
198,
2676,
2024,
13,
198,
198,
19927,
25,
198,
220,
220,
220,
1635,
3740,
1378,
2503,
13,
6359,
13,
305,
35... | 2.601547 | 517 |
'''
PLPCOPallet.py
Pulls fresh data from PLPCO SDE database for dissolved roads and photos.
Creates optimized data for sherlock widget by combining Class B &
D dissolved data into a single feature class for each county.
'''
from os.path import basename, join
import arcpy
from forklift.models import Pallet
counties = ['Beaver',
'BoxElder',
'Carbon',
'Daggett',
'Duchesne',
'Emery',
'Garfield',
'Grand',
'Iron',
'Juab',
'Kane',
'Millard',
'Piute',
'Rich',
'SanJuan',
'Sanpete',
'Sevier',
'Tooele',
'Uintah',
'Utah',
'Washington',
'Wayne']
fldROAD_CLASS = 'ROAD_CLASS'
fldName = 'Name'
fldYoutube_URL = 'Youtube_URL'
fldDateTimeS = 'DateTimeS'
fldCOUNTY = 'COUNTY'
shape_token = 'SHAPE@XY'
photos_name = 'Litigation_RoadPhotos'
video_log_name = 'Video_Log'
video_routes_dataset = 'VideoRoute'
| [
7061,
6,
198,
6489,
5662,
3185,
282,
1616,
13,
9078,
198,
198,
42940,
82,
4713,
1366,
422,
9297,
5662,
46,
311,
7206,
6831,
329,
26306,
9725,
290,
5205,
13,
198,
16719,
274,
23392,
1366,
329,
15059,
5354,
26295,
416,
19771,
5016,
347,... | 1.918182 | 550 |
from pcloud import pcloud
import cmdw
if __name__ == '__main__':
p = uploadtransfer()
sendermail = "todut001@gmail.com"
receivermails = "cumulus13@gmail.com"
print p.uploadtransfer(sendermail, receivermails) | [
6738,
279,
17721,
1330,
279,
17721,
198,
11748,
23991,
86,
198,
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
279,
796,
9516,
39437,
3419,
198,
220,
220,
220,
29788,
4529,
796,
... | 2.682353 | 85 |
from .reacher import Reacher3DEnv
from .pusher import PusherEnv
from collections import OrderedDict
import gym
import numpy as np
from gym import Wrapper
from gym.envs.registration import EnvSpec
| [
6738,
764,
260,
3493,
1330,
797,
3493,
18,
35,
4834,
85,
198,
6738,
764,
79,
34055,
1330,
350,
34055,
4834,
85,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
11550,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11550,
133... | 3.396552 | 58 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import cherrypy
import sickbeard
MESSAGE = 'notice'
ERROR = 'error'
class Notifications(object):
"""
A queue of Notification objects.
"""
def message(self, title, message=''):
"""
Add a regular notification to the queue
title: The title of the notification
message: The message portion of the notification
"""
self._messages.append(Notification(title, message, MESSAGE))
def error(self, title, message=''):
"""
Add an error notification to the queue
title: The title of the notification
message: The message portion of the notification
"""
self._errors.append(Notification(title, message, ERROR))
def get_notifications(self):
"""
Return all the available notifications in a list. Marks them all as seen
as it returns them. Also removes timed out Notifications from the queue.
Returns: A list of Notification objects
"""
# filter out expired notifications
self._errors = [x for x in self._errors if not x.is_expired()]
self._messages = [x for x in self._messages if not x.is_expired()]
# return any notifications that haven't been shown to the client already
return [x.see() for x in self._errors + self._messages if x.is_new()]
# static notification queue object
notifications = Notifications()
class Notification(object):
"""
Represents a single notification. Tracks its own timeout and a list of which clients have
seen it before.
"""
def is_new(self):
"""
Returns True if the notification hasn't been displayed to the current client (aka IP address).
"""
return cherrypy.request.remote.ip not in self._seen
def is_expired(self):
"""
Returns True if the notification is older than the specified timeout value.
"""
return datetime.datetime.now() - self._when > self._timeout
def see(self):
"""
Returns this notification object and marks it as seen by the client ip
"""
self._seen.append(cherrypy.request.remote.ip)
return self
class QueueProgressIndicator():
"""
A class used by the UI to show the progress of the queue or a part of it.
"""
| [
2,
6434,
25,
8377,
37013,
1279,
6988,
31,
18829,
31829,
13,
6888,
29,
201,
198,
2,
10289,
25,
2638,
1378,
8189,
13,
13297,
13,
785,
14,
79,
14,
82,
624,
39433,
14,
201,
198,
2,
201,
198,
2,
770,
2393,
318,
636,
286,
32181,
41698... | 2.69141 | 1,199 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import math
import torch
from torch import nn
import torch.nn.functional as F
from tqdm import tqdm
from .base_model import BaseModel
from .msa_embeddings import MSAEmbeddings
from .attention import ZBlock, YBlock, YAggregator, ZRefiner
from .distance_predictor import DistancePredictor
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
198,
11748,
10688,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
256,
80,
360... | 3.424528 | 106 |
import numpy as np
import math
import random
import os
import multiprocessing
from colorama import init, Fore, Back, Style
class SudokuGenerator(object) :
""" Generate unique sudoku solutions everytime for n*n grids. """
def generate_grid(self):
""" Generate a grid of n*n numbers. """
grid = np.zeros((self.num,self.num), dtype=np.int)
return grid
def generate_check_lists(self):
""" Returning a dict of n number of lists for row, column and sub-matrix.
Each list will contain numbers from 1 to n.
These lists will be used by sudoku_generator function for
tracking available possibilities of number to fill a particular cell
following the basic sudoku rules.
"""
checker= {}
for i in range(1,self.num+1):
checker['row'+str(i)]=list(range(1,self.num+1))
checker['col'+str(i)]=list(range(1,self.num+1))
checker['box'+str(i)]=list(range(1,self.num+1))
return checker
def get_submatrix_num(self, row_n, col_n, root_n):
""" Getting the num of sub-matrix using the row and coloumn number. """
if row_n % root_n == 0: # root_n is square root of n
row_t = int(row_n/root_n)
else:
row_t = int(row_n/root_n) + 1
if col_n % root_n == 0:
col_t = int(col_n/root_n)
else:
col_t = int(col_n/root_n) + 1
box_n = col_t + (row_t-1)*root_n # formula for calculating which submatrix box, a (row,column) belongs
return box_n
def sudoku_gen(self, state=None):
""" Pushing number for each cell of the generated grid, following sudoku rules.
Each number is picked randomly from the list of elements obtained by the
intersection of checker lists for that particular row, col and submatrix
"""
count = 0
while True:
if state != None and state.value == 1:
# print ('Solver',os.getpid(),'quitting')
break
else:
m = self.generate_check_lists()
sudoku = self.generate_grid()
count+=1 #to get number of attempts tried to get the solution.
try:
for row_n in range(1, self.num+1):
for col_n in range(1, self.num+1):
box_n = self.get_submatrix_num(row_n, col_n, int(math.sqrt(self.num)))
row = 'row' + str(row_n)
col = 'col' + str(col_n)
box = 'box' + str(box_n)
# print('target row, column, box => ' + row, col, box)
common_list = list(set(m[row]).intersection(m[col],m[box])) # creating commom list.
# print(common_list)
rand_num = random.choice(common_list) # picking a number from common list.
sudoku[row_n-1][col_n-1] = rand_num
m[row].remove(rand_num)
m[col].remove(rand_num)
m[box].remove(rand_num)
if sudoku[self.num-1][self.num-1]>0: # checking if solution is ready, then break out.
print('Total Number of attempts: ' + str(count))
self.display(sudoku)
if state != None:
state.value = 1 # signalling other processes to quit solving
# print ('Solver '+ str(os.getpid()), + ' solved the problem!')
break
except IndexError: # Handling Out of Index Error
continue
def cprint(self, msg, foreground = "black", background = "white"):
"""This function is used to provide color to the sudoku cells."""
fground = foreground.upper()
bground = background.upper()
style = getattr(Fore, fground) + getattr(Back, bground)
print(style + " " + msg + " " + Style.RESET_ALL, end="", flush=True)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(description='It takes number as optional argument.')
parser.add_argument('-c',dest='concurrent', action="store_true", help='For implementing concurrency')
parser.add_argument('-n', dest='gridnum', required=True, help='Grid number for generating sudoku')
parser.add_argument('-p', dest='procs',default = multiprocessing.cpu_count(),type = int, help='No. of processes to use for concurrent solution.')
args = parser.parse_args()
grid_number = int(args.gridnum)
proc = int(args.procs)
init() #initialising the colorama
if args.concurrent:
instance = SudokuConcurrentSolver(grid_number)
solution = instance.solve(proc)
else:
instance = SudokuGenerator(grid_number)
solution = instance.sudoku_gen()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
18540,
305,
919,
278,
198,
6738,
3124,
1689,
1330,
2315,
11,
4558,
11,
5157,
11,
17738,
628,
198,
4871,
14818,
11601,
8645,
1352,
7,
1525... | 2.123729 | 2,360 |
import Linarg
import numpy as np
from SimPEG import Utils | [
11748,
5164,
853,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3184,
47,
7156,
1330,
7273,
4487
] | 3.352941 | 17 |
#!/usr/bin/env python
# Imports
import os
import random
from helpers import InsertQuery
from faker import Faker
# Quantity
N_DRIVERS = 75
N_AUTOBUS = 100
N_CUSTOMERS = 7500
N_SUBSCRIPTIONS = 100000
N_RIDES = 450 * 365
N_LINES = 15
N_SERVED = 90
N_STOPS = 225
N_USED = 12000
# Fixed seed
random.seed(0)
Faker.seed(0)
# Generator and output file
fake = Faker(['it-IT'])
sql_file = '../sql/05_populate.sql'
# Remove file if exists and re-create it
if os.path.exists(sql_file):
os.unlink(sql_file)
# -------------------------------------------------------------------------------------------------- #
autobuses = InsertQuery('Autobus', 'Targa', 'InPiedi', 'Seduti')
drivers = InsertQuery('Autista', 'CodiceFiscale', 'Nome', 'Cognome', 'LuogoResidenza', 'DataNascita', 'NumeroPatente')
telephone_numbers = InsertQuery('Telefono', 'Numero', 'Autista')
#-------------#
# - Autobus - #
#-------------#
for _ in range(N_AUTOBUS):
plate = fake.unique.license_plate()
autobuses.append(plate, random.randint(10, 30), random.randint(10, 30))
#--------------------------------------#
# ------------- Autisti -------------- #
# - Numeri di telefono degli autisti - #
#--------------------------------------#
for _ in range(N_DRIVERS):
fiscal = fake.unique.ssn()
name = fake.first_name()
surname = fake.last_name()
address = fake.address().replace('\n', ' - ').replace("'", "''")
birth = fake.date_of_birth()
driver_license = random.randint(1000000000, 9999999999)
# Driver may have 0 to 3 phone numbers registered
number_of_telephone_numbers = random.randint(0, 3)
for _ in range(number_of_telephone_numbers):
telephone_number = random.randint(1000000000, 9999999999) # 10 digits phone number
telephone_numbers.append(telephone_number, fiscal)
drivers.append(fiscal, name, surname, address, birth, driver_license)
with open(sql_file, 'a') as f:
f.write('SET search_path TO SistemaTrasportoUrbano;\n')
f.write(str(drivers))
f.write(str(telephone_numbers))
f.write(str(autobuses))
# -------------------------------------------------------------------------------------------------- #
#-------------#
# - Tessere - #
#-------------#
cards = InsertQuery('Tessera', 'NumeroTessera')
for _ in range(N_CUSTOMERS):
cards.append(fake.unique.credit_card_number())
#-------------#
# - Clienti - #
#-------------#
customers = InsertQuery('Cliente', 'CodiceFiscale', 'Nome', 'Cognome', 'LuogoResidenza', 'DataNascita', 'NumeroTelefono', 'Tessera')
for i in range(N_CUSTOMERS):
fiscal = fake.unique.ssn()
name = fake.first_name()
surname = fake.last_name()
address = fake.address().replace('\n', ' - ').replace("'", "''")
birth = fake.date_of_birth()
telephone_number = random.randint(1000000000, 9999999999) # 10 digits phone number
card = cards.get('NumeroTessera', i)
customers.append(fiscal, name, surname, address, birth, telephone_number, card)
#-----------------#
# - Abbonamenti - #
# --- Validi ---- #
# --- Scaduti --- #
#-----------------#
subs = InsertQuery('Abbonamento', 'Tessera', 'DataInizio', 'TipoAbbonamento')
valids = InsertQuery('Valido', 'Tessera', 'DataInizio')
expireds = InsertQuery('Scaduto', 'Tessera', 'DataInizio')
subs_check = {}
sub_types = ['mensile', 'trimestrale', 'annuale']
for i in range(N_SUBSCRIPTIONS):
card = cards.get('NumeroTessera', i % N_CUSTOMERS)
sub_type = random.choice(sub_types)
if i < 7500: # Valid
if sub_type == 'mensile':
start_date = fake.date_between(start_date='-28d', end_date='today')
elif sub_type == 'trimestrale':
start_date = fake.date_between(start_date='-84d', end_date='today')
else:
start_date = fake.date_between(start_date='-365d', end_date='today')
valids.append(card, start_date)
else: # Expired
while True:
if sub_type == 'mensile':
start_date = fake.date_between(start_date='-100y', end_date='-30d')
elif sub_type == 'trimestrale':
start_date = fake.date_between(start_date='-100y', end_date='-85d')
else:
start_date = fake.date_between(start_date='-100y', end_date='-366d')
if start_date not in subs_check[card]:
break
expireds.append(card, start_date)
if card not in subs_check.keys():
subs_check[card] = []
subs_check[card].append(start_date)
subs.append(card, start_date, sub_type)
with open(sql_file, 'a') as f:
f.write(str(cards))
f.write(str(customers))
f.write(str(subs))
f.write(str(valids))
f.write(str(expireds))
# -------------------------------------------------------------------------------------------------- #
#----------------#
# - HaEseguito - #
#----------------#
executed = InsertQuery('HaEseguito', 'Id', 'DataOra', 'AutoBus', 'Autista')
for i in range(N_RIDES):
date = fake.date_time()
autobus = random.choice(autobuses.get_col('Targa'))
driver = random.choice(drivers.get_col('CodiceFiscale'))
executed.append(i, date, autobus, driver)
#--------------------------#
# - LineaTrasportoUrbano - #
#--------------------------#
lines = InsertQuery('LineaTrasportoUrbano', 'Numero', 'NumeroFermate')
for i in range(N_LINES):
number = str(i)
number_of_stops = random.randint(10, 30) # mean = 20
lines.append(number, number_of_stops)
#-----------#
# - Corsa - #
#-----------#
rides = InsertQuery('Corsa', 'NumeroLinea', 'EseguitoId')
for i in range(N_RIDES):
line = i % N_LINES
rides.append(line, i)
#---------------#
# - ServitaDa - #
#---------------#
served_by = InsertQuery('ServitaDa', 'Targa', 'NumeroLinea')
check = {i:[] for i in range(N_LINES)}
for i in range(N_SERVED):
line = i % N_LINES
while True:
autobus = random.choice(autobuses.get_col('Targa'))
if autobus not in check[line]:
break
check[line].append(autobus)
served_by.append(autobus, line)
#-------------#
# - Fermata - #
#-------------#
busstops = InsertQuery('Fermata', 'Nome', 'Indirizzo')
for i in range(N_STOPS):
name = fake.unique.md5()
address = fake.address().replace('\n', ' - ').replace("'", "''")
busstops.append(name, address)
#--------------#
# - Composto - #
#--------------#
composto = InsertQuery('Composto', 'NomeFermata', 'NumeroLinea', 'Posizione')
j = 0
for line, nstops in lines.get_zipped('Numero', 'NumeroFermate'):
busstops_names = busstops.get_col('Nome')
for i in range(int(nstops)):
stopname = busstops_names[j % len(busstops_names)] # random.choice(busstops.get_col('Nome'))
composto.append(stopname, line, i+1) # from 1 to nstops
j += 1
#-----------------#
# - HaUsufruito - #
#-----------------#
used = InsertQuery('HaUsufruito', 'Cliente', 'EseguitoId', 'NumeroLinea')
for _ in range(N_USED):
execd, line = random.choice(rides.get_zipped('EseguitoId', 'NumeroLinea'))
user = random.choice(customers.get_col('CodiceFiscale'))
used.append(user, execd, line)
with open(sql_file, 'a') as f:
f.write('START TRANSACTION;\n')
f.write('SET CONSTRAINTS ALL DEFERRED;\n')
f.write(str(served_by))
f.write(str(composto))
f.write(str(busstops))
f.write(str(executed))
f.write(str(rides))
f.write(str(lines))
f.write(str(used))
f.write('COMMIT;\n')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
1846,
3742,
198,
11748,
28686,
198,
11748,
4738,
198,
198,
6738,
49385,
1330,
35835,
20746,
198,
6738,
277,
3110,
1330,
376,
3110,
198,
198,
2,
39789,
198,
45,
62,
7707,
30194,... | 2.503894 | 2,953 |
from flask import jsonify
from identity import identity
from identity.models.user import User
@identity.route('/user')
| [
6738,
42903,
1330,
33918,
1958,
198,
6738,
5369,
1330,
5369,
198,
6738,
5369,
13,
27530,
13,
7220,
1330,
11787,
628,
198,
31,
738,
414,
13,
38629,
10786,
14,
7220,
11537,
198
] | 3.903226 | 31 |
"""
test configuration settings
"""
import rez.vendor.unittest2 as unittest
from rez.tests.util import TestBase
from rez.exceptions import ConfigurationError
from rez.config import Config, get_module_root_config, _replace_config
from rez.system import system
from rez.utils.data_utils import RO_AttrDictWrapper
from rez.packages_ import get_developer_package
import os
import os.path
if __name__ == '__main__':
unittest.main()
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| [
37811,
198,
9288,
8398,
6460,
198,
37811,
198,
11748,
302,
89,
13,
85,
18738,
13,
403,
715,
395,
17,
355,
555,
715,
395,
198,
6738,
302,
89,
13,
41989,
13,
22602,
1330,
6208,
14881,
198,
6738,
302,
89,
13,
1069,
11755,
1330,
28373,
... | 3.55836 | 317 |
from __future__ import division, print_function, absolute_import
import cv2
from base_camera import BaseCamera
import warnings
import numpy as np
from PIL import Image
from yolo import YOLO
from deep_sort import preprocessing
from deep_sort.detection import Detection
from deep_sort.detection_yolo import Detection_YOLO
from importlib import import_module
from collections import Counter
import datetime
warnings.filterwarnings('ignore')
| [
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
198,
11748,
269,
85,
17,
198,
6738,
2779,
62,
25695,
1330,
7308,
35632,
198,
198,
11748,
14601,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
414... | 3.810345 | 116 |
#!/usr/bin/env python
# coding: utf-8
# <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200>
# <br></br>
# <br></br>
#
# # Major Neural Network Architectures Challenge
# ## *Data Science Unit 4 Sprint 3 Challenge*
#
# In this sprint challenge, you'll explore some of the cutting edge of Data Science. This week we studied several famous neural network architectures:
# recurrent neural networks (RNNs), long short-term memory (LSTMs), convolutional neural networks (CNNs), and Generative Adverserial Networks (GANs). In this sprint challenge, you will revisit these models. Remember, we are testing your knowledge of these architectures not your ability to fit a model with high accuracy.
#
# __*Caution:*__ these approaches can be pretty heavy computationally. All problems were designed so that you should be able to achieve results within at most 5-10 minutes of runtime on Colab or a comparable environment. If something is running longer, doublecheck your approach!
#
# ## Challenge Objectives
# *You should be able to:*
# * <a href="#p1">Part 1</a>: Train a RNN classification model
# * <a href="#p2">Part 2</a>: Utilize a pre-trained CNN for objective detection
# * <a href="#p3">Part 3</a>: Describe the difference between a discriminator and generator in a GAN
# * <a href="#p4">Part 4</a>: Describe yourself as a Data Science and elucidate your vision of AI
# <a id="p1"></a>
# ## Part 1 - RNNs
#
# Use an RNN to fit a multi-class classification model on reuters news articles to distinguish topics of articles. The data is already encoded properly for use in an RNN model.
#
# Your Tasks:
# - Use Keras to fit a predictive model, classifying news articles into topics.
# - Report your overall score and accuracy
#
# For reference, the [Keras IMDB sentiment classification example](https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py) will be useful, as well the RNN code we used in class.
#
# __*Note:*__ Focus on getting a running model, not on maxing accuracy with extreme data size or epoch numbers. Only revisit and push accuracy if you get everything else done!
# In[20]:
from tensorflow.keras.datasets import reuters
(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=723812,
start_char=1,
oov_char=2,
index_from=3)
# In[21]:
# Demo of encoding
# we got the indices before now we get the word index from reuters word index.json
word_index = reuters.get_word_index(path="reuters_word_index.json")
print(f"Iran is encoded as {word_index['iran']} in the data")
print(f"London is encoded as {word_index['london']} in the data")
print("Words are encoded as numbers in our dataset.")
# In[22]:
print('# of training samples: {}'.format(len(x_train)))
print('# of text samples: {}'.format(len(x_test)))
num_classes = max(y_train) + 1
print('# of classes: {}'.format(num_classes))
# In[23]:
print(x_train[0])
print(y_train[0])
# In[24]:
# let's see which words are the most common:
print('pak:',word_index['pakistan'])
#print('us:', word_index['united states'])
print('peace:', word_index['peace'])
print('computer: ', word_index['computer'])
print('const:', word_index['constitution'])
# In[27]:
from keras.preprocessing.text import Tokenizer
max_words = 10000
tokenizer = Tokenizer(num_words =max_words)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
# In[28]:
# let's do the same with y_test
import keras
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# In[29]:
print(x_train.shape)
print(x_train[0])
print(y_train.shape)
print(y_train[0])
# In[35]:
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, Activation, Dropout
from keras.layers import LSTM
print('Build model...')
model = Sequential()
model.add(Dense(512, input_shape=(max_words, )))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
batch_size = 64
epochs = 20
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.1)
score = model.evaluate(x_test,y_test,batch_size=batch_size, verbose=1)
print('test loss: {}'.format(score[0]))
print('Test accuracy: {}'.format(score[1]))
# Conclusion - RNN runs, and gives pretty decent improvement over a naive model. To *really* improve the model, more playing with parameters would help. Also - RNN may well not be the best approach here, but it is at least a valid one.
# <a id="p2"></a>
# ## Part 2- CNNs
#
# ### Find the Frog
#
# Time to play "find the frog!" Use Keras and ResNet50 (pre-trained) to detect which of the following images contain frogs:
#
# <img align="left" src="https://d3i6fh83elv35t.cloudfront.net/newshour/app/uploads/2017/03/GettyImages-654745934-1024x687.jpg" width=400>
#
# In[36]:
get_ipython().system('pip install google_images_download')
# In[42]:
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
arguments = {"keywords": "animal pond", "limit": 50, "print_urls": True}
absolute_image_paths = response.download(arguments)
# At time of writing at least a few do, but since the Internet changes - it is possible your 5 won't. You can easily verify yourself, and (once you have working code) increase the number of images you pull to be more sure of getting a frog. Your goal is to validly run ResNet50 on the input images - don't worry about tuning or improving the model.
#
# *Hint* - ResNet 50 doesn't just return "frog". The three labels it has for frogs are: `bullfrog, tree frog, tailed frog`
#
# *Stretch goal* - also check for fish.
# In[43]:
# TODO - your code!
import numpy as np
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
from IPython.display import Image
images = absolute_image_paths[0]['animal pond']
img_list_predict(images)
# <a id="p3"></a>
# ## Part 3 - Generative Adverserial Networks (GANS)
#
# Describe the difference between a discriminator and generator in a GAN in your own words.
#
# __*Your Answer:*__
# #### Generator ####
# A generator creates an image from noise and tries to fool the discriminator in thinking that it is a real image.
#
# #### Discriminator ####
# The job of a discriminator is to look at the image and be able to tell it from real to fake
#
# How GAN works is the generator creates image by taking in random numbers/vectors. The image is then fed into the discrimnator along with other images taken from the actual dataset. It the nreturns probability between 0 and 1 of whether it is fake or not with 0 representing a fake and 1 representing an authentic image. Generator tries to get better at creating images and discriminator is trying to get better at identifying them as authentic or fake. The goal of the model is to get to the point where the generator can create images/input/features that cannot be identified as fake by the discrminator i.e. they look as real as belonging to the dataset that it is trained on.
#
# <a id="p4"></a>
# ## Part 4 - More...
# Answer the following questions, with a target audience of a fellow Data Scientist:
#
# - What do you consider your strongest area, as a Data Scientist?
# - What area of Data Science would you most like to learn more about, and why?
# - Where do you think Data Science will be in 5 years?
# - What are the treats posed by AI to our society?
# - How do you think we can counteract those threats?
# - Do you think achieving General Artifical Intelligence is ever possible?
#
# A few sentences per answer is fine - only elaborate if time allows.
# So far, I have had the most fun with neural networks and all the remarkable things you can do with them. I wouldn't say that is my strongest area but it is definitely the area I want to advance in and master. I would love to learn more about object detection and computer vision, partially because I want a Knight Rider like car that takes me places and partially because I can envision many practical applications of it from helping in surgeries to self-driving cars. I believe data science can really change the world and society for the better if put to good use. Right now, there is a lot of hype around the field and a lot of companies have to figure out how to use it in the best way possible. But if it is not used for malicious intent, it could produce great solution to a lot of problems that exist in our society like poverty, inequality, injustice, poor health, and many more.
#
# The question ofwhether AI poses threats to the society can only be answered if we know who is it controlled by. If it is facial recognition software to help police officers identify past convicts in a robbery, then it could be very beneficial. But if it to give them drones to spy on poor/crime ridden neighborhoods like they do in Baltimore, then it can be seen as a threat to personal space, freedom and privacy. Same goes for all the projects that big companies like Google, Facebook, Microsoft are working on. One of the biggest threats that you can envision is development of skynet like programs. We already have DARPA funding research on autonomous weapons and despite the outcry by activists demanding more regulation, it has been going on.
#
# The only way to counteract these and other unseen or unfathomable threats is to regulate the field by an overseeing authority. I would not recommend stopping the research or advances in the field of AI because as mentioned above, it could potentially benefit humanity in a lot of ways. But there need to be an overseeing committee that ensures that all the research that is being done is for the greater good. A committee that has authority to oversee projects going on anywhere in the world would be the best solution so there is no prisoner's dilemma for one country vs. another to use AI research for malicious intents.
#
# While I would love to have a Terminator like companion, I don't think it is entirely possible to the point where a machine can mimic every human emotion. If we want to think of ourselves as robots, which we are in many ways i.e. being told to conform to the society, going to school, getting a job, getting married, growing old. In a lot of ways, we are already like robots and sure, we can make any robots to do that. But having a machine that can perform all that plus mimic all the human emotions/subconscious is going to be complicated. While the machines can be great at one specific task like AlphaGo is great at Go and has surpassed humans in intelligence, having one specific machine to be great at everything has a long way to go.
# ## Congratulations!
#
# Thank you for your hard work, and congratulations! You've learned a lot, and you should proudly call yourself a Data Scientist.
#
# In[44]:
from IPython.display import HTML
HTML("""<iframe src="https://giphy.com/embed/26xivLqkv86uJzqWk" width="480" height="270" frameBorder="0" class="giphy-embed" allowFullScreen></iframe><p><a href="https://giphy.com/gifs/mumm-champagne-saber-26xivLqkv86uJzqWk">via GIPHY</a></p>""")
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1279,
9600,
10548,
2625,
9464,
1,
12351,
2625,
5450,
1378,
293,
332,
12,
16366,
12,
6404,
418,
13,
82,
18,
13,
33103,
8356,
13,
785,
... | 3.217322 | 3,741 |
# !/usr/local/python/bin/python
# -*- coding: utf-8 -*-
# (C) Wu Dong, 2019
# All rights reserved
# @Author: 'Wu Dong <wudong@eastwu.cn>'
# @Time: '2020-03-17 15:37'
from pre_request.exception import ParamsValueError
from pre_request.filters.base import BaseFilter
class LengthFilter(BaseFilter):
"""
判断字符串长度的过滤器
"""
length_code_gt = 574
length_code_gte = 575
length_code_lt = 576
length_code_lte = 577
illegal_code = 580
def fmt_error_message(self, code):
""" 格式化错误消息
"""
if code == 574:
return "%s field content length must be greater than %s" % (self.key, str(self.rule.gt))
if code == 575:
return "%s field content length must be greater than or equal to %s" % (self.key, str(self.rule.gte))
if code == 576:
return "%s field content length must be less than %s" % (self.key, str(self.rule.lt))
if code == 577:
return "%s field content length must be less than or equal to %s" % (self.key, str(self.rule.lte))
return "%s field fails the 'LengthFilter' filter check" % self.key
def filter_required(self):
""" 检查过滤器是否必须执行
"""
if not self.rule.required and self.value is None:
return False
if self.rule.direct_type not in [str, list]:
return False
if self.rule.gt is not None:
return True
if self.rule.gte is not None:
return True
if self.rule.lt is not None:
return True
if self.rule.lte is not None:
return True
return False
| [
2,
5145,
14,
14629,
14,
12001,
14,
29412,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
357,
34,
8,
18027,
28831,
11,
13130,
198,
2,
1439,
2489,
10395,
198,
2,
2488,
13838,
25,
705,
... | 2.105943 | 774 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""command.py: Interface to command line executables."""
from optparse import OptionParser
import logging
from os import path
from subprocess import PIPE, Popen
from time import time
__author__ = "Rami Al-Rfou"
__email__ = "rmyeid gmail"
LOG_FORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s"
class Error(Exception):
"""Basic excetion type for be extended for specific module exceptions."""
class ExecutionError(Error):
"""Raised if the command fails to execute."""
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", help="Input file")
parser.add_option("-l", "--log", dest="log", help="log verbosity level",
default="INFO")
(options, args) = parser.parse_args()
numeric_level = getattr(logging, options.log.upper(), None)
logging.basicConfig(level=numeric_level, format=LOG_FORMAT)
main(options, args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
21812,
13,
9078,
25,
26491,
284,
3141,
1627,
3121,
2977,
526,
15931,
198,
198,
6738,
2172,
29572,
1330,
160... | 2.878698 | 338 |
import numpy as np
from perfect_information_game.games import Chess
from perfect_information_game.utils import iter_product
from perfect_information_game.tablebases import get_verified_chess_subclass
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2818,
62,
17018,
62,
6057,
13,
19966,
1330,
25774,
198,
6738,
2818,
62,
17018,
62,
6057,
13,
26791,
1330,
11629,
62,
11167,
198,
6738,
2818,
62,
17018,
62,
6057,
13,
11487,
65,
1386,
1330,
651... | 3.941176 | 51 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
from pymatgen.io.pwscf import PWInput, PWInputError, PWOutput
from pymatgen.util.testing import PymatgenTest
from pymatgen.core import Lattice, Structure
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
357,
66,
8,
350,
4948,
265,
5235,
7712,
4816,
13,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
17168,
13789,
13,
198,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
... | 2.932203 | 118 |
from django.contrib import admin
from .models import Contact
admin.site.register(Contact, ContactAdmin) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
14039,
628,
198,
198,
28482,
13,
15654,
13,
30238,
7,
17829,
11,
14039,
46787,
8
] | 3.785714 | 28 |
# Generated by Django 2.0 on 2019-05-26 02:08
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
319,
13130,
12,
2713,
12,
2075,
7816,
25,
2919,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import networkx as nx
import pandas as pd
from pandapower.topology.create_graph import create_nxgraph
def connected_component(mg, bus, notravbuses=[]):
"""
Finds all buses in a NetworkX graph that are connected to a certain bus.
INPUT:
**mg** (NetworkX graph) - NetworkX Graph or MultiGraph that represents a pandapower network.
**bus** (integer) - Index of the bus at which the search for connected components originates
OPTIONAL:
**notravbuses** (list/set) - Indeces of notravbuses: lines connected to these buses are
not being considered in the graph
RETURN:
**cc** (generator) - Returns a generator that yields all buses connected to the input bus
EXAMPLE:
import pandapower.topology as top
mg = top.create_nx_graph(net)
cc = top.connected_component(mg, 5)
"""
yield bus
visited = {bus}
stack = [(bus, iter(mg[bus]))]
while stack:
parent, children = stack[-1]
try:
child = next(children)
if child not in visited:
yield child
visited.add(child)
if not child in notravbuses:
stack.append((child, iter(mg[child])))
except StopIteration:
stack.pop()
def connected_components(mg, notravbuses=set()):
"""
Clusters all buses in a NetworkX graph that are connected to each other.
INPUT:
**mg** (NetworkX graph) - NetworkX Graph or MultiGraph that represents a pandapower network.
OPTIONAL:
**notravbuses** (set) - Indeces of notravbuses: lines connected to these buses are
not being considered in the graph
RETURN:
**cc** (generator) - Returns a generator that yields all clusters of buses connected
to each other.
EXAMPLE:
import pandapower.topology as top
mg = top.create_nx_graph(net)
cc = top.connected_components(net, 5)
"""
nodes = set(mg.nodes()) - notravbuses
while nodes:
cc = set(connected_component(mg, nodes.pop(), notravbuses=notravbuses))
yield cc
nodes -= cc
# the above does not work if two notravbuses are directly connected
for f, t in mg.edges():
if f in notravbuses and t in notravbuses:
yield set([f, t])
def calc_distance_to_bus(net, bus, respect_switches=True, nogobuses=None,
notravbuses=None):
"""
Calculates the shortest distance between a source bus and all buses connected to it.
INPUT:
**net** (PandapowerNet) - Variable that contains a pandapower network.
**bus** (integer) - Index of the source bus.
OPTIONAL:
**respect_switches** (boolean, True) - True: open line switches are being considered
(no edge between nodes)
False: open line switches are being ignored
**nogobuses** (integer/list, None) - nogobuses are not being considered
**notravbuses** (integer/list, None) - lines connected to these buses are not being
considered
RETURN:
**dist** - Returns a pandas series with containing all distances to the source bus
in km.
EXAMPLE:
import pandapower.topology as top
dist = top.calc_distance_to_bus(net, 5)
"""
g = create_nxgraph(net, respect_switches=respect_switches,
nogobuses=nogobuses, notravbuses=None)
return pd.Series(nx.single_source_dijkstra_path_length(g, bus))
def unsupplied_buses(net, mg=None, in_service_only=False, slacks=None):
"""
Finds buses, that are not connected to an external grid.
INPUT:
**net** (PandapowerNet) - variable that contains a pandapower network
OPTIONAL:
**mg** (NetworkX graph) - NetworkX Graph or MultiGraph that represents a pandapower network.
RETURN:
**ub** (set) - unsupplied buses
EXAMPLE:
import pandapower.topology as top
top.unsupplied_buses(net)
"""
mg = mg or create_nxgraph(net)
slacks = slacks or set(net.ext_grid[net.ext_grid.in_service==True].bus.values)
not_supplied = set()
for cc in nx.connected_components(mg):
if not set(cc) & slacks:
not_supplied.update(set(cc))
return not_supplied
def determine_stubs(net, roots=None, mg=None):
"""
Finds stubs in a network. Open switches are being ignored. Results are being written in a new
column in the bus table ("on_stub") and line table ("is_stub") as True/False value.
INPUT:
**net** (PandapowerNet) - Variable that contains a pandapower network.
OPTIONAL:
**roots** (integer/list, None) - Indeces of buses that should be excluded (by default, the
ext_grid buses will be set as roots)
RETURN:
None
EXAMPLE:
import pandapower.topology as top
top.determine_stubs(net, roots = [0, 1])
"""
if mg is None:
mg = create_nxgraph(net, respect_switches=False)
# remove buses with degree lower 2 until none left
roots = roots or set(net.ext_grid.bus)
# mg.add_edges_from((a, b) for a, b in zip(list(roots)[:-1], list(roots)[1:]))
# while True:
# dgo = {g for g, d in list(mg.degree().items()) if d < 2} #- roots
# if not dgo:
# break
# mg.remove_nodes_from(dgo)
# n1_buses = mg.nodes()
n1_buses = get_2connected_buses(mg, roots)
net.bus["on_stub"] = True
net.bus.loc[n1_buses, "on_stub"] = False
net.line["is_stub"] = ~((net.line.from_bus.isin(n1_buses)) & (net.line.to_bus.isin(n1_buses)))
stubs = set(net.bus.index) - set(n1_buses)
return stubs | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,
15069,
357,
66,
8,
1584,
416,
2059,
286,
15035,
741,
290,
39313,
403,
71,
30288,
5136,
329,
3086,
6682,
290,
6682,
201,
198,
2,
4482,
8987,
357,
40... | 1.975084 | 3,572 |