id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
42765 | <reponame>ravi-ojha/py-unique-names-generator<filename>unique_names_generator/data/animals.py
ANIMALS = [
"aardvark",
"aardwolf",
"albatross",
"alligator",
"alpaca",
"amphibian",
"anaconda",
"angelfish",
"anglerfish",
"ant",
"anteater",
"antelope",
"antlion",
"ape",
"aphid",
"armadillo",
"asp",
"baboon",
"badger",
"bandicoot",
"barnacle",
"barracuda",
"basilisk",
"bass",
"bat",
"bear",
"beaver",
"bedbug",
"bee",
"beetle",
"bird",
"bison",
"blackbird",
"boa",
"boar",
"bobcat",
"bobolink",
"bonobo",
"booby",
"bovid",
"bug",
"butterfly",
"buzzard",
"camel",
"canid",
"canidae",
"capybara",
"cardinal",
"caribou",
"carp",
"cat",
"caterpillar",
"catfish",
"catshark",
"cattle",
"centipede",
"cephalopod",
"chameleon",
"cheetah",
"chickadee",
"chicken",
"chimpanzee",
"chinchilla",
"chipmunk",
"cicada",
"clam",
"clownfish",
"cobra",
"cockroach",
"cod",
"condor",
"constrictor",
"coral",
"cougar",
"cow",
"coyote",
"crab",
"crane",
"crawdad",
"crayfish",
"cricket",
"crocodile",
"crow",
"cuckoo",
"damselfly",
"deer",
"dingo",
"dinosaur",
"dog",
"dolphin",
"dormouse",
"dove",
"dragon",
"dragonfly",
"duck",
"eagle",
"earthworm",
"earwig",
"echidna",
"eel",
"egret",
"elephant",
"elk",
"emu",
"ermine",
"falcon",
"felidae",
"ferret",
"finch",
"firefly",
"fish",
"flamingo",
"flea",
"fly",
"flyingfish",
"fowl",
"fox",
"frog",
"galliform",
"gamefowl",
"gayal",
"gazelle",
"gecko",
"gerbil",
"gibbon",
"giraffe",
"goat",
"goldfish",
"goose",
"gopher",
"gorilla",
"grasshopper",
"grouse",
"guan",
"guanaco",
"guineafowl",
"gull",
"guppy",
"haddock",
"halibut",
"hamster",
"hare",
"harrier",
"hawk",
"hedgehog",
"heron",
"herring",
"hippopotamus",
"hookworm",
"hornet",
"horse",
"hoverfly",
"hummingbird",
"hyena",
"iguana",
"impala",
"jackal",
"jaguar",
"jay",
"jellyfish",
"junglefowl",
"kangaroo",
"kingfisher",
"kite",
"kiwi",
"koala",
"koi",
"krill",
"ladybug",
"lamprey",
"landfowl",
"lark",
"leech",
"lemming",
"lemur",
"leopard",
"leopon",
"limpet",
"lion",
"lizard",
"llama",
"lobster",
"locust",
"loon",
"louse",
"lungfish",
"lynx",
"macaw",
"mackerel",
"magpie",
"mammal",
"manatee",
"mandrill",
"marlin",
"marmoset",
"marmot",
"marsupial",
"marten",
"mastodon",
"meadowlark",
"meerkat",
"mink",
"minnow",
"mite",
"mockingbird",
"mole",
"mollusk",
"mongoose",
"moose",
"mosquito",
"moth",
"mouse",
"mule",
"muskox",
"narwhal",
"newt",
"nightingale",
"ocelot",
"octopus",
"opossum",
"orangutan",
"orca",
"ostrich",
"otter",
"owl",
"ox",
"panda",
"panther",
"parakeet",
"parrot",
"parrotfish",
"partridge",
"peacock",
"peafowl",
"pelican",
"penguin",
"perch",
"pheasant",
"pigeon",
"pike",
"pinniped",
"piranha",
"planarian",
"platypus",
"pony",
"porcupine",
"porpoise",
"possum",
"prawn",
"primate",
"ptarmigan",
"puffin",
"puma",
"python",
"quail",
"quelea",
"quokka",
"rabbit",
"raccoon",
"rat",
"rattlesnake",
"raven",
"reindeer",
"reptile",
"rhinoceros",
"roadrunner",
"rodent",
"rook",
"rooster",
"roundworm",
"sailfish",
"salamander",
"salmon",
"sawfish",
"scallop",
"scorpion",
"seahorse",
"shark",
"sheep",
"shrew",
"shrimp",
"silkworm",
"silverfish",
"skink",
"skunk",
"sloth",
"slug",
"smelt",
"snail",
"snake",
"snipe",
"sole",
"sparrow",
"spider",
"spoonbill",
"squid",
"squirrel",
"starfish",
"stingray",
"stoat",
"stork",
"sturgeon",
"swallow",
"swan",
"swift",
"swordfish",
"swordtail",
"tahr",
"takin",
"tapir",
"tarantula",
"tarsier",
"termite",
"tern",
"thrush",
"tick",
"tiger",
"tiglon",
"toad",
"tortoise",
"toucan",
"trout",
"tuna",
"turkey",
"turtle",
"tyrannosaurus",
"unicorn",
"urial",
"vicuna",
"viper",
"vole",
"vulture",
"wallaby",
"walrus",
"warbler",
"wasp",
"weasel",
"whale",
"whippet",
"whitefish",
"wildcat",
"wildebeest",
"wildfowl",
"wolf",
"wolverine",
"wombat",
"woodpecker",
"worm",
"wren",
"xerinae",
"yak",
"zebra",
]
| StarcoderdataPython |
2515 | import socketserver
import socket
import sys
import threading
import json
import queue
import time
import datetime
import traceback
class TCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class Listener(threading.Thread):
def run(self):
kwargs = self._kwargs
print("Listener: Started: %s" % kwargs)
Handler = self._kwargs["handler"]
server = self._kwargs["server"]
class Server(socketserver.BaseRequestHandler):
def handle(self):
print("Listener: Connection request received: %s" % kwargs)
Handler(server, self.request)
self.server = TCPServer((kwargs["host"], kwargs["port"]), Server)
self.server.serve_forever()
def stop(self):
self.server.shutdown()
self.server.server_close()
class Connector(threading.Thread):
def __init__(self, *arg, **kw):
self.is_stopping = False
threading.Thread.__init__(self, *arg, **kw)
def run(self):
print("Connector: Started: %s" % self._kwargs)
while not self.is_stopping:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
sock.connect((self._kwargs["host"], self._kwargs["port"]))
print("Connector: Connected: %s" % self._kwargs)
self._kwargs["handler"](self._kwargs["server"], sock)
except Exception as e:
print(e)
traceback.print_exc()
finally:
sock.close()
time.sleep(1)
def stop(self):
self.is_stopping = True
class Handler(object):
encoding = "utf-8"
binary = False
filemode = "r"
def __init__(self, server, conn):
self.server = server
self.conn = conn
self.makefile()
self.handle()
def makefile(self):
args = {"mode": self.filemode + ["", "b"][self.binary]}
if not self.binary:
args["encoding"] = self.encoding
self.file = self.conn.makefile(**args)
def handle(self):
"""self.conn is a socket object, self.file a file wrapper for that
socket"""
def __hash__(self):
return id(self)
class ReceiveHandler(Handler):
filemode = "r"
class SendHandler(Handler):
filemode = "w"
class Server(object):
def __init__(self, handlers):
self.handlers = handlers
self.config = None
self.servers = {}
def configure(self, config):
self.config = config
connections = {self.connection_key(connection): connection for connection in config["connections"]}
to_create = connections.keys() - self.servers.keys()
to_destroy = self.servers.keys() - connections.keys()
for key in to_create:
server = self.start_connection(connections[key])
server.start()
self.servers[key] = server
for key in to_destroy:
server = self.servers.pop(key)
server.stop()
def connection_key(self, connection):
return json.dumps(connection, sort_keys=True, separators=(',', ':'))
def start_connection(self, connection):
handler = self.handlers[connection["handler"]]
addr = connection["address"].split(":")
assert addr[0] == "tcp"
host = "0.0.0.0"
port = 1024
if len(addr) == 2:
port = addr[1]
if len(addr) == 3:
host, port = addr[1:]
port = int(port)
connhandler = {"listen": Listener, "connect": Connector}[connection["type"]]
return connhandler(kwargs={"server": self, "host": host, "port": port, "handler": handler})
def run(config, handlers):
server = Server(handlers)
server.configure(config)
return server
| StarcoderdataPython |
3343381 | '''
Embedded traffic light Flask server.
This module contains a Flask server containing handlers for the following paths:
- GET /store/live
Returns a stream from a .jpg generator using the Raspberry camera.
- GET /store/status
Returns the current status of the traffic light.
- POST /traffic-light/animation
Stores a new submitted animation json file.
- POST /traffic-light/change_lights
Sets the traffic lights to display certain passed animations.
Module tree:
.
├── store
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ └── views.cpython-36.pyc
│ └── views.py
├── __init__.py
├── models
│ ├── camera.py
│ ├── __init__.py
│ ├── light_controller.py
│ └── __pycache__
│ ├── camera.cpython-36.pyc
│ ├── __init__.cpython-36.pyc
│ └── light_controller.cpython-36.pyc
├── __pycache__
│ └── __init__.cpython-36.pyc
├── static
│ └── animations
│ ├── 3sec.json
│ ├── empty.json
│ └── filled.json
├── templates
└── traffic_light
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-36.pyc
│ └── views.cpython-36.pyc
└── views.py
Usage:
from server import create_app
app = create_app()
app.run(host='0.0.0.0', debug=True)
'''
from flask import Flask
from server.store.views import store_blueprint
def create_app(
name: str = __name__,
config: str = 'flask.cfg'
):
'''
Initializes the Flask server.
Parameters:
-----------
name : str (default: __name__)
Name of the server.
config : str (default: 'flask.cfg')
Configuration to be loaded.
Returns:
--------
A Flask server app.
'''
app = Flask(
name,
template_folder='static/templates',
instance_relative_config=True
)
app.config.from_pyfile(config)
app.register_blueprint(store_blueprint)
return app
| StarcoderdataPython |
197569 | <gh_stars>0
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urlparse,
)
from ..utils import (
ExtractorError,
clean_html,
get_element_by_id,
)
class VeeHDIE(InfoExtractor):
_VALID_URL = r'https?://veehd\.com/video/(?P<id>\d+)'
# Seems VeeHD videos have multiple copies on several servers, all of
# whom have different MD5 checksums, so omit md5 field in all tests
_TESTS = [{
'url': 'http://veehd.com/video/4639434_Solar-Sinter',
'info_dict': {
'id': '4639434',
'ext': 'mp4',
'title': 'Solar Sinter',
'uploader_id': 'VideoEyes',
'description': 'md5:46a840e8692ddbaffb5f81d9885cb457',
},
'skip': 'Video deleted',
}, {
'url': 'http://veehd.com/video/4905758_Elysian-Fields-Channeling',
'info_dict': {
'id': '4905758',
'ext': 'mp4',
'title': 'Elysian Fields - Channeling',
'description': 'md5:360e4e95fdab58aefbea0f2a19e5604b',
'uploader_id': 'spotted',
}
}, {
'url': 'http://veehd.com/video/2046729_2012-2009-DivX-Trailer',
'info_dict': {
'id': '2046729',
'ext': 'avi',
'title': '2012 (2009) DivX Trailer',
'description': 'md5:75435ee95255e6a9838ac6f6f3a2396b',
'uploader_id': 'Movie_Trailers',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
# VeeHD seems to send garbage on the first request.
# See https://github.com/rg3/youtube-dl/issues/2102
self._download_webpage(url, video_id, 'Requesting webpage')
webpage = self._download_webpage(url, video_id)
if 'This video has been removed<' in webpage:
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
player_path = self._search_regex(
r'\$\("#playeriframe"\).attr\({src : "(.+?)"',
webpage, 'player path')
player_url = compat_urlparse.urljoin(url, player_path)
self._download_webpage(player_url, video_id, 'Requesting player page')
player_page = self._download_webpage(
player_url, video_id, 'Downloading player page')
video_url = None
config_json = self._search_regex(
r'value=\'config=({.+?})\'', player_page, 'config json', default=None)
if config_json:
config = json.loads(config_json)
video_url = compat_urllib_parse_unquote(config['clip']['url'])
if not video_url:
video_url = self._html_search_regex(
r'<embed[^>]+type="video/divx"[^>]+src="([^"]+)"',
player_page, 'video url', default=None)
if not video_url:
iframe_src = self._search_regex(
r'<iframe[^>]+src="/?([^"]+)"', player_page, 'iframe url')
iframe_url = 'http://veehd.com/%s' % iframe_src
self._download_webpage(iframe_url, video_id, 'Requesting iframe page')
iframe_page = self._download_webpage(
iframe_url, video_id, 'Downloading iframe page')
video_url = self._search_regex(
r"file\s*:\s*'([^']+)'", iframe_page, 'video url')
title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0])
uploader_id = self._html_search_regex(
r'<a href="/profile/\d+">(.+?)</a>',
webpage, 'uploader')
thumbnail = self._search_regex(
r'<img id="veehdpreview" src="(.+?)"',
webpage, 'thumbnail')
description = self._html_search_regex(
r'<td class="infodropdown".*?<div>(.*?)<ul',
webpage, 'description', flags=re.DOTALL)
return {
'_type': 'video',
'id': video_id,
'title': title,
'url': video_url,
'uploader_id': uploader_id,
'thumbnail': thumbnail,
'description': description,
}
| StarcoderdataPython |
3245297 | import requests
import base64
class API_REST:
def inputGate(self,card_id,gate):
url = f'http://18.213.76.34/output/{card_id}'
print("RESPONSE API INPUT")
print(f'INPUT[] URL {url} gate={gate} card_id={card_id}')
response = requests.put(url,json={"gate":gate},verify=False).json()
return response
def outputGate(self,card_id,gate):
url = f'http://18.213.76.34/output/{card_id}'
print(f'OUTPUT:[] URL {url} gate={gate} card_id={card_id}')
response = requests.put(url,json={"gate":gate},verify=False).json()
print("RESPONSE API")
print(response)
return response
#api = API_REST()
#api.inputGate("0008949774",2)
| StarcoderdataPython |
66053 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
gyroid.util
===========
"""
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
from matplotlib import colors
from mayavi import mlab
from .unitcell import UnitCell
from .group import Group
from .grid import Grid
from .basis import Basis
__all__ = [
"render_structure_1d",
"render_structure_2d",
"render_structure_3d",
"prepare_scft_input"]
def prepare_scft_input(dim,grid_num_vec,cryst_system,
cryst_param_vec,sym_group,basis_grid_vec,basis_c,
data_file="field_in.mat",show_img=False,
save_img=False,img_file="field_in.png",
**kwargs):
b = "Bravais"
uc = UnitCell(dim,cryst_system,cryst_param_vec);
g = Group(dim,b,uc.shape,sym_group)
gd = Grid(basis_grid_vec,g)
bs = Basis(g,gd)
c = np.zeros(bs.N)
N = basis_c.size
if N < bs.N:
c[0:N] = basis_c
else:
c = basis_c[0:bs.N]
if dim == 1:
render_structure_1d(bs,gd,grid_num_vec[0],c,
data_name=data_file,save_img=save_img,
show_img=show_img,img_name=img_file,
**kwargs)
if dim == 2:
render_structure_2d(bs,gd,grid_num_vec[0],grid_num_vec[1],c,
data_name=data_file,save_img=save_img,
show_img=show_img,img_name=img_file,
**kwargs)
if dim == 3:
render_structure_3d(bs,gd,grid_num_vec[0],grid_num_vec[1],
grid_num_vec[2],c,
data_name=data_file,save_img=save_img,
show_img=show_img,img_name=img_file,
**kwargs)
def render_structure_1d(basis,grid,Na,c,
save_data=True,data_name="struct1d.mat",
save_img=True,show_img=True,
img_name="struct1d.png",
**kwargs):
''' Calculate and render 1D structure for given SABF and unit cell.
:param basis: a set of SABFs
:type basis: :class:`Basis`
:param Na: number of grids in **a** of the unit cell.
:type Na: integer
:param c: coefficients for each SABF
:type c: 1D `numpy.array`
:param save_data: if True, save data in file with Matlab mat format
:type save_data: bool
:param data_name: the file name of the data file
:type data_name: string
:param save_img: if True, save image in file, the format is determined by the extension of the image file name
:type save_img: bool
:param img_name: the file name of the image file
:type img_name: string
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
#struct = basis.generate_structure(Na,c)
struct = basis.generate_structure_by_fft((Na,),c,grid)
# For debug only
#print basis.fft2sabf(np.fft.fftn(struct),grid)
a = 1.0 * basis.shape.h[0,0]
rx = np.array([a*i/Na for i in np.arange(Na)])
if save_data:
scipy.io.savemat(data_name,{"rx":rx,"struct":struct})
if save_img or show_img:
plt.plot(rx,struct,**kwargs)
if save_img:
plt.savefig(img_name)
if show_img:
plt.show()
return rx,struct
def render_structure_2d(basis,grid,Na,Nb,c,
save_data=True,data_name="struct2d.mat",
save_img=True,show_img=True,
img_name="struct2d.png",
levels=None,cmap=None,
**kwargs):
''' Calculate and render 2D structure for given SABF and unit cell.
:param basis: a set of SABFs
:type basis: :class:`Basis`
:param Na: number of grids in **a** of the unit cell.
:type Na: integer
:param Nb: number of grids in **b** of the unit cell.
:type Nb: integer
:param c: coefficients for each SABF
:type c: 1D `numpy.array`
:param save_data: if True, save data in file with Matlab mat format
:type save_data: bool
:param data_name: the file name of the data file
:type data_name: string
:param save_img: if True, save image in file, the format is determined by the extension of the image file name
:type save_img: bool
:param img_name: the file name of the image file
:type img_name: string
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
# If generate_structure_by_fft failed
# Give generate_structure a try.
#struct = basis.generate_structure((Na,Nb),c)
struct = basis.generate_structure_by_fft((Na,Nb),c,grid)
# For debug only
print "Input c: ",c
print "c from constructed structure: "
print basis.fft2sabf(np.fft.fftn(struct),grid)
rx = np.zeros((Na,Nb))
ry = np.zeros((Na,Nb))
for (i,j) in np.ndindex(Na,Nb):
x = (1.0*np.array([i,j])) / (Na,Nb)
rx[i,j],ry[i,j] = np.dot(x,basis.shape.h)
if save_data:
scipy.io.savemat(data_name,{"rx":rx,"ry":ry,"struct":struct})
if save_img or show_img:
dx = rx.max() - rx.min()
dy = ry.max() - ry.min()
w,h = plt.figaspect(float(dy/dx)) # float is must
# No frame, white background, w/h aspect ratio figure
fig = plt.figure(figsize=(w,h),frameon=False,
dpi=80,facecolor='w')
# full figure subplot, no border, no axes
ax = fig.add_axes([0,0,1,1],frameon=False,axisbg='w')
# no ticks
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Default: there are 256 contour levels
if levels is None:
step = (struct.max() - struct.min()) / 256
levels = np.arange(struct.min(),struct.max()+step,step)
# Default: colormap is monochromatic red
if cmap is None:
clr = np.zeros((256,3))
for i in np.arange(256):
clr[i,0] = i / 255.0
cmap = colors.ListedColormap(clr)
# actual plot
ax.contourf(rx,ry,struct,levels=levels,
cmap=cmap,antialiased=False,**kwargs)
#ax.contourf(rx,ry,struct)
if save_img:
plt.savefig(img_name)
if show_img:
plt.show()
return rx,ry,struct
def render_structure_3d(basis,grid,Na,Nb,Nc,c,
save_data=True,data_name="struct3d.mat",
save_img=True,show_img=True,
img_name="struct3d.png",
levels=None,cmap=None,
**kwargs):
''' Calculate and render 3D structure for given SABF and unit cell.
:param basis: a set of SABFs
:type basis: :class:`Basis`
:param Na: number of grids in **a** of the unit cell.
:type Na: integer
:param Nb: number of grids in **b** of the unit cell.
:type Nb: integer
:param Nc: number of grids in **c** of the unit cell.
:type Nc: integer
:param c: coefficients for each SABF
:type c: 1D `numpy.array`
:param save_data: if True, save data in file with Matlab mat format
:type save_data: bool
:param data_name: the file name of the data file
:type data_name: string
:param save_img: if True, save image in file, the format is determined by the extension of the image file name
:type save_img: bool
:param img_name: the file name of the image file
:type img_name: string
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
#struct = basis.generate_structure((Na,Nb,Nc),c)
struct = basis.generate_structure_by_fft((Na,Nb,Nc),c,grid)
# For debug only
#print basis.fft2sabf(np.fft.fftn(struct),grid)
rx = np.zeros((Na,Nb,Nc))
ry = np.zeros((Na,Nb,Nc))
rz = np.zeros((Na,Nb,Nc))
for (i,j,k) in np.ndindex(Na,Nb,Nc):
x = (1.0*np.array([i,j,k])) / (Na,Nb,Nc)
rx[i,j,k],ry[i,j,k],rz[i,j,k] = np.dot(x,basis.shape.h)
if save_data:
scipy.io.savemat(data_name,
{"rx":rx,"ry":ry,"rz":rz,"struct":struct})
if save_img or show_img:
mlab.contour3d(rx,ry,rz,struct,**kwargs)
if save_img:
mlab.savefig(img_name)
if show_img:
plt.show()
return rx,ry,rz,struct
| StarcoderdataPython |
77744 | <reponame>yusharon/sagemaker-xgboost-container<filename>test/unit/test_encoder.py
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
from mock import Mock, patch
import mock
import os
from pathlib import Path
import pytest
import tempfile
from sagemaker_containers import _content_types, _errors
import xgboost as xgb
from sagemaker_xgboost_container import encoder
@pytest.mark.parametrize('target', ('42,6,9', '42.0,6.0,9.0', '42\n6\n9\n'))
def test_csv_to_dmatrix(target):
actual = encoder.csv_to_dmatrix(target)
assert type(actual) is xgb.DMatrix
@pytest.mark.parametrize(
'target', ('1,2,3,12:12:12',
'1,2,3,2019-1-1',
'1,2,3,2019-1-1 12:12:12',
'1,2,3,2019-1-1 12:12:12+00',
'1,2,3,-14 days',
'1,2,3\n1,2,c'))
def test_csv_to_dmatrix_error(target):
try:
encoder.csv_to_dmatrix(target)
assert False
except Exception as e:
assert type(e) is ValueError
@pytest.mark.parametrize('target', (b'0 0:1 5:1', b'0:1 5:1'))
def test_libsvm_to_dmatrix(target):
temp_libsvm_file = tempfile.NamedTemporaryFile(delete=False)
temp_libsvm_file_name = temp_libsvm_file.name
assert os.path.exists(temp_libsvm_file_name)
with mock.patch('sagemaker_xgboost_container.encoder.tempfile') as mock_tempfile:
mock_tempfile.NamedTemporaryFile.return_value = temp_libsvm_file
actual = encoder.libsvm_to_dmatrix(target)
assert type(actual) is xgb.DMatrix
assert not os.path.exists(temp_libsvm_file_name)
@pytest.mark.parametrize(
'target', (b'\n#\xd7\xce\x13\x00\x00\x00\n\x11\n\x06values\x12\x07:\x05\n\x03*\x06\t\x00', # 42,6,9
b'\n#\xd7\xce(\x00\x00\x00\n&\n\x06values\x12\x1c\x1a\x1a\n\x18\x00\x00\x00' # 42.0,6.0,9.0
b'\x00\x00\x00E@\x00\x00\x00\x00\x00\x00\x18@\x00\x00\x00\x00\x00\x00"@',
b'\n#\xd7\xce\x19\x00\x00\x00\n\x17\n\x06values\x12\r:\x0b\n\x02\x01\x01\x12' # 0:1 5:1
b'\x02\x00\x05\x1a\x01\x06\x00\x00\x00'))
def test_recordio_protobuf_to_dmatrix(target):
actual = encoder.recordio_protobuf_to_dmatrix(target)
assert type(actual) is xgb.DMatrix
def test_sparse_recordio_protobuf_to_dmatrix():
current_path = Path(os.path.abspath(__file__))
data_path = os.path.join(str(current_path.parent.parent), 'resources', 'data')
files_path = os.path.join(data_path, 'recordio_protobuf', 'sparse_edge_cases')
for filename in os.listdir(files_path):
file_path = os.path.join(files_path, filename)
with open(file_path, 'rb') as f:
target = f.read()
actual = encoder.recordio_protobuf_to_dmatrix(target)
assert type(actual) is xgb.DMatrix
def test_decode_error():
with pytest.raises(_errors.UnsupportedFormatError):
encoder.decode(42, _content_types.OCTET_STREAM)
@pytest.mark.parametrize('content_type', [_content_types.JSON, _content_types.CSV])
def test_decode(content_type):
decoder = Mock()
with patch.dict(encoder._dmatrix_decoders_map, {content_type: decoder}, clear=True):
encoder.decode(42, content_type)
decoder.assert_called_once_with(42)
@pytest.mark.parametrize('content_type', ['text/csv; charset=UTF-8'])
def test_decode_with_complex_csv_content_type(content_type):
dmatrix_result = encoder.decode("42.0,6.0,9.0\n42.0,6.0,9.0", content_type)
assert type(dmatrix_result) is xgb.DMatrix
def test_encoder_jsonlines_from_json():
json_response = json.dumps({'predictions': [{"predicted_label": 1, "probabilities": [0.4, 0.6]},
{"predicted_label": 0, "probabilities": [0.9, 0.1]}]})
expected_jsonlines = b'{"predicted_label": 1, "probabilities": [0.4, 0.6]}\n' \
b'{"predicted_label": 0, "probabilities": [0.9, 0.1]}\n'
jsonlines_response = encoder.json_to_jsonlines(json_response)
assert expected_jsonlines == jsonlines_response
def test_encoder_jsonlines_from_json_error():
bad_json_response = json.dumps({'predictions': [], 'metadata': []})
with pytest.raises(ValueError):
encoder.json_to_jsonlines(bad_json_response)
| StarcoderdataPython |
123604 | from output.models.nist_data.atomic.id.schema_instance.nistschema_sv_iv_atomic_id_enumeration_1_xsd.nistschema_sv_iv_atomic_id_enumeration_1 import (
NistschemaSvIvAtomicIdEnumeration1,
NistschemaSvIvAtomicIdEnumeration1Type,
Out,
)
__all__ = [
"NistschemaSvIvAtomicIdEnumeration1",
"NistschemaSvIvAtomicIdEnumeration1Type",
"Out",
]
| StarcoderdataPython |
1653665 | <reponame>xMestas/pyMARS<filename>pymars/tests/test_readInConditions.py
""" Tests the reduction methods implemented by pyMARS """
import sys
import os
import pytest
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
import cantera as ct
import readin_initial_conditions
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def relative_location( file):
file_path = os.path.join(ROOT_DIR, file)
return file_path
<EMAIL>
def testGoodInput1():
#test reading in example_input_file
conditions = relative_location("example_files/example_input_file.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
print("---------------------------------------------------------------");
print("condObject[1].pressure: " + condObject[1].pressure)
print("condObject[1].temperature: " + condObject[1].temperature)
print("condObject[1].species: ", condObject[1].species)
print("condObject[1].fuel: " + condObject[1].fuel)
print("condObject[1].oxid: " + condObject[1].oxid)
assert condObject[0].pressure == " 1.0\n"
assert condObject[0].temperature == " 1000\n"
assert condObject[0].species == {'CH4': '1.0', 'N2': '3.76', 'O2': '1.0'}
assert condObject[0].fuel == "CH4:1.0"
assert condObject[0].oxid == 'O2:1.0,N2:3.76'
assert condObject[1].pressure == " 1.0\n"
assert condObject[1].temperature == " 1200\n"
assert condObject[1].species == {'CH4': '1.0', 'N2': '3.76', 'O2': '1.0'}
assert condObject[1].fuel == "CH4:1.0"
assert condObject[1].oxid == 'O2:1.0,N2:3.76'
<EMAIL>
def testGoodInput2():
#test reading in example_input_artificial
conditions = relative_location("pymars/tests/example_input_artificial.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
assert condObject[0].pressure == " 1.0\n"
assert condObject[0].temperature == " 500\n"
assert condObject[0].species == {'H': '1.0', 'O2': '1.0'}
assert condObject[0].fuel == "H:1.0"
assert condObject[0].oxid == 'O2:1.0'
<EMAIL>
def testRearrangedInput():
#test reading in re-arranged example_input_file
conditions = relative_location("pymars/tests/inputfiles/example_input_file_new_order.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
print("---------------------------------------------------------------");
print("condObject[1].pressure: " + condObject[1].pressure)
print("condObject[1].temperature: " + condObject[1].temperature)
print("condObject[1].species: ", condObject[1].species)
print("condObject[1].fuel: " + condObject[1].fuel)
print("condObject[1].oxid: " + condObject[1].oxid)
assert condObject[0].pressure == " 1.0\n"
assert condObject[0].temperature == " 1000\n"
assert condObject[0].species == {'CH4': '1.0', 'N2': '3.76', 'O2': '1.0'}
assert condObject[0].fuel == "CH4:1.0"
assert condObject[0].oxid == 'O2:1.0,N2:3.76'
assert condObject[1].pressure == " 1.0\n"
assert condObject[1].temperature == " 1200\n"
assert condObject[1].species == {'CH4': '1.0', 'N2': '3.76', 'O2': '1.0'}
assert condObject[1].fuel == "CH4:1.0"
assert condObject[1].oxid == 'O2:1.0,N2:3.76'
<EMAIL>
def testIllogicalSpeciesNames():
#test reading file in correct order and format, but illogical values
conditions = relative_location("pymars/tests/inputfiles/example_input_file_illogical.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
assert condObject[0].pressure == " hello\n"
assert condObject[0].temperature == " I\n"
assert condObject[0].species == {'everthing': '5.0', 'oxidizer': '1.0', 'peanutbutter': '3.7'}
assert condObject[0].fuel == "everthing:5.0"
assert condObject[0].oxid == 'oxidizer:1.0,peanutbutter:3.7'
@pytest.mark.xfail
def testStringForFloats():
#test string values when expecting float
conditions = relative_location("pymars/tests/inputfiles/example_input_file_bad_float.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
assert condObject[0].pressure == " 1.0\n"
assert condObject[0].temperature == " 1000\n"
assert condObject[0].species == {'CH4': 'youtube', 'N2': '4.5.6', 'O2': 'gello'}
assert condObject[0].fuel == "CH4:youtube"
assert condObject[0].oxid == 'O2:gello,N2:4.5.6'
@pytest.mark.xfail
def testNoValuesAfterSpecies():
conditions = relative_location("pymars/tests/inputfiles/example_input_file_no_species_value.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
assert condObject[1].pressure == " 1.0\n"
assert condObject[1].temperature == " 1000\n"
assert condObject[1].species == {'CH4', 'N2', 'O2'}
assert condObject[1].fuel == "CH4"
assert condObject[1].oxid == 'O2,N2'
def testNegativeSpeciesValues():
#test reading in negative species values
conditions = relative_location("pymars/tests/inputfiles/example_input_file_negative_species_values.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
assert condObject[0].pressure == " 1.0\n"
assert condObject[0].temperature == " 1000\n"
assert condObject[0].species == {'CH4': '-1.0', 'N2': '-3.0', 'O2': '-2.0'}
assert condObject[0].fuel == "CH4:-1.0"
assert condObject[0].oxid == 'O2:-2.0,N2:-3.0'
@pytest.mark.xfail
def testNoOpeningKeyword():
#test file with no "CONV"
#should not read anything.
conditions = relative_location("pymars/tests/inputfiles/example_input_file_no_open_keyword.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
assert condObject[0].pressure == " 1.0\n"
assert condObject[0].temperature == " 1000\n"
assert condObject[0].species == {'CH4': '1.0', 'N2': '3.76', 'O2': '1.0'}
assert condObject[0].fuel == "CH4:1.0"
assert condObject[0].oxid == 'O2:1.0,N2:3.76'
@pytest.mark.xfail
def testNoClosingKeyword():
#test file with no "END"
#Will read, but will never close.
conditions = relative_location("pymars/tests/inputfiles/example_input_file_no_close_keyword.txt")
condObject = readin_initial_conditions.readin_conditions(conditions)
print("condObject[0].pressure: " + condObject[0].pressure)
print("condObject[0].temperature: " + condObject[0].temperature)
print("condObject[0].species: ", condObject[0].species)
print("condObject[0].fuel: " + condObject[0].fuel)
print("condObject[0].oxid: " + condObject[0].oxid)
assert condObject[0].pressure == " 1.0\n"
assert condObject[0].temperature == " 1000\n"
assert condObject[0].species == {'CH4': '1.0', 'N2': '3.76', 'O2': '1.0'}
assert condObject[0].fuel == "CH4:1.0"
assert condObject[0].oxid == 'O2:1.0,N2:3.76'
#testNoClosingKeyword()
| StarcoderdataPython |
25618 | <gh_stars>1-10
import numpy as np
import heapq
def cosine(x, y):
eps = 1e-10
return np.dot(x, y) / np.sqrt((np.dot(x, x) * np.dot(y, y)) + eps)
def get_nearest_k(word, vocab, vocab_matrix, k=4, return_score=False):
k_nearest_neighbors = []
vector_word = vocab_matrix[vocab[word]]
for w in vocab:
if w == word:
continue
dist = cosine(vector_word, vocab_matrix[vocab[w]])
if len(k_nearest_neighbors) < k:
heapq.heappush(k_nearest_neighbors, (dist, w))
else:
dist_min, _ = k_nearest_neighbors[0]
if dist_min < dist:
heapq.heappop(k_nearest_neighbors)
heapq.heappush(k_nearest_neighbors, (dist, w))
k_nearest_neighbors = [w for (d, w) in k_nearest_neighbors] if not return_score else k_nearest_neighbors
return k_nearest_neighbors
def get_nearest_k_with_matrix(vector, word2vec, k, return_score):
k_nearest_neighbors = []
for w in word2vec:
dist = cosine(vector, word2vec[w])
if len(k_nearest_neighbors) < k:
heapq.heappush(k_nearest_neighbors, (dist, w))
else:
dist_min, _ = k_nearest_neighbors[0]
if dist_min < dist:
heapq.heappop(k_nearest_neighbors)
heapq.heappush(k_nearest_neighbors, (dist, w))
k_nearest_neighbors = [w for (d, w) in k_nearest_neighbors] if not return_score else k_nearest_neighbors
return k_nearest_neighbors
def get_furthest_k(word, vocab, vocab_matrix, k=4, return_score=False):
k_nearest_neighbors = []
vector_word = vocab_matrix[vocab[word]]
for w in vocab:
if w == word:
continue
dist = -cosine(vector_word, vocab_matrix[vocab[w]])
if len(k_nearest_neighbors) < k:
heapq.heappush(k_nearest_neighbors, (dist, w))
else:
dist_min, _ = k_nearest_neighbors[0]
if dist_min < dist:
heapq.heappop(k_nearest_neighbors)
heapq.heappush(k_nearest_neighbors, (dist, w))
k_nearest_neighbors = [w for (d, w) in k_nearest_neighbors] if not return_score else k_nearest_neighbors
return k_nearest_neighbors
| StarcoderdataPython |
3241753 | <filename>src/practice_4.py
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
import tensorflow as tf
'''run() 的输入参数'''
a = tf.constant([5, 3], name='input_a')
b = tf.reduce_sum(a, name='add_b')
c = tf.reduce_prod(a, name='mul_c')
d = tf.add(b, c, name='add_d')
sess = tf.Session()
# fetches参数可接收 Op或Tensor对象,后者则输出一个Numpy数组, 前者输出为None
# 接收Tensor对象
print('cal d : ', sess.run(d))
print('cal b, c, d : ', sess.run([b, c, d]))
# # 接收Op句柄
# print('Op', sess.run(tf.initialize_all_variables()))
# feed_dict参数 用于覆盖数据流图中的Tensor对象值
replace_dict = {b: 15}
print('cal d afer replaced : ', sess.run(d, feed_dict=replace_dict))
# writer = tf.summary.FileWriter('./my_graph', sess.graph)
# # 在对应的虚拟环境下运行 tensorboard --logdir=.\src\my_graph ,在浏览器中访问 6006端口可查看数据流图
# writer.close()
sess.close()
| StarcoderdataPython |
10002 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: x.huang
# @date:17-8-4
import logging
from pony.orm import db_session
from handlers.base.base import BaseRequestHandler
class LoginRequireError(Exception):
pass
class AuthBaseHandler(BaseRequestHandler):
""" 登录验证的基类 """
def prepare(self):
if not self.current_user and self.request.method.lower() != 'options':
self.render_error('Auth Error.', status_code=401)
super(AuthBaseHandler, self).prepare()
class Authentication(object):
def __init__(self, handler):
self.handler = handler
def admin_auth(self, username, password):
try:
with db_session:
user_obj = self.handler.m_useradmin.get(username=username, is_delete=False)
if user_obj:
is_auth = user_obj.check_password(password)
if is_auth:
user_dict = user_obj.to_dict(exclude=self.handler.m_useradmin.password.column)
user_dict['permission'] = user_obj.role_id.permission if user_obj.role_id else None
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
def api_auth(self, phone, password, sc_auth=False):
try:
with db_session:
user_obj = self.handler.m_appuser.get(phone=phone, is_delete=False)
if user_obj:
is_auth = False
if password:
is_auth = user_obj.check_password(password)
if sc_auth or is_auth:
user_dict = user_obj.to_dict()
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
def web_auth(self, username, password):
try:
with db_session:
user_obj = self.handler.m_comuser.get(com_username=username, is_delete=False)
if user_obj:
is_auth = False
if password:
is_auth = user_obj.check_password(password)
if is_auth:
user_dict = user_obj.to_dict()
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
| StarcoderdataPython |
3391294 | # -*- coding: utf-8 -*-
from .app import TenDaysWeb
from .response import Response
from .exceptions import HttpException | StarcoderdataPython |
51878 | def link_exists(url, links):
data = [x['link'] for x in links['links']]
return (lambda item, elements: item in elements)(url, data)
| StarcoderdataPython |
1679172 | #
# Copyright (c) 2019 MagicStack Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import edgedb
import random
from . import queries
INSERT_PREFIX = 'insert_test__'
def connect(ctx):
return edgedb.create_client().with_retry_options(
edgedb.RetryOptions(attempts=10),
)
def close(ctx, conn):
conn.close()
def load_ids(ctx, conn):
d = conn.query_single('''
WITH
U := User {id, r := random()},
M := Movie {id, r := random()},
P := Person {id, r := random()}
SELECT (
users := array_agg((SELECT U ORDER BY U.r LIMIT <int64>$lim).id),
movies := array_agg((SELECT M ORDER BY M.r LIMIT <int64>$lim).id),
people := array_agg((SELECT P ORDER BY P.r LIMIT <int64>$lim).id),
);
''', lim=ctx.number_of_ids)
movies = list(d.movies)
people = list(d.people)
return dict(
get_user=list(d.users),
get_movie=movies,
get_person=people,
# re-use user IDs for update tests
update_movie=movies[:],
# generate as many insert stubs as "concurrency" to
# accommodate concurrent inserts
insert_user=[INSERT_PREFIX] * ctx.concurrency,
insert_movie=[{
'prefix': INSERT_PREFIX,
'people': people[:4],
}] * ctx.concurrency,
insert_movie_plus=[INSERT_PREFIX] * ctx.concurrency,
)
def get_user(conn, id):
return conn.query_single_json(queries.GET_USER, id=id)
def get_movie(conn, id):
return conn.query_single_json(queries.GET_MOVIE, id=id)
def get_person(conn, id):
return conn.query_single_json(queries.GET_PERSON, id=id)
def update_movie(conn, id):
return conn.query_single_json(
queries.UPDATE_MOVIE, id=id, suffix=str(id)[:8])
def insert_user(conn, val):
num = random.randrange(1_000_000)
return conn.query_single_json(
queries.INSERT_USER, name=f'{val}{num}', image=f'image_{val}{num}')
def insert_movie(conn, val):
num = random.randrange(1_000_000)
return conn.query_single_json(
queries.INSERT_MOVIE,
title=f'{val["prefix"]}{num}',
image=f'{val["prefix"]}image{num}.jpeg',
description=f'{val["prefix"]}description{num}',
year=num,
d_id=val["people"][0],
cast=val["people"][1:4],
)
def insert_movie_plus(conn, val):
num = random.randrange(1_000_000)
return conn.query_single_json(
queries.INSERT_MOVIE_PLUS,
title=f'{val}{num}',
image=f'{val}image{num}.jpeg',
description=f'{val}description{num}',
year=num,
dfn=f'{val}Alice',
dln=f'{val}Director',
dimg=f'{val}image{num}.jpeg',
cfn0=f'{val}Billie',
cln0=f'{val}Actor',
cimg0=f'{val}image{num+1}.jpeg',
cfn1=f'{val}Cameron',
cln1=f'{val}Actor',
cimg1=f'{val}image{num+2}.jpeg',
)
def setup(ctx, conn, queryname):
if queryname == 'update_movie':
conn.execute('''
update Movie
filter contains(.title, '---')
set {
title := str_split(.title, '---')[0]
};
''')
elif queryname == 'insert_user':
conn.query('''
delete User
filter .name LIKE <str>$prefix
''', prefix=f'{INSERT_PREFIX}%')
elif queryname == 'insert_movie':
conn.query('''
delete Movie
filter .image LIKE <str>$prefix
''', prefix=f'{INSERT_PREFIX}image%')
elif queryname == 'insert_movie_plus':
conn.query('''
delete Movie
filter .image LIKE <str>$prefix
''', prefix=f'{INSERT_PREFIX}image%')
conn.query('''
delete Person
filter .image LIKE <str>$prefix
''', prefix=f'{INSERT_PREFIX}image%')
def cleanup(ctx, conn, queryname):
if queryname in {'update_movie', 'insert_user', 'insert_movie',
'insert_movie_plus'}:
# The clean up is the same as setup for mutation benchmarks
setup(ctx, conn, queryname)
| StarcoderdataPython |
108913 | #! /usr/bin/env python3
import os
from datetime import timedelta
import flask
from module.Interface import *
app = flask.Flask(__name__, template_folder="./static/html")
app.config['SECRET_KEY'] = os.urandom(24)
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(minutes=30)
@app.route('/', methods=["GET", "POST"])
def index() :
return flask.redirect('/static/html/index.html')
@app.route('/test', methods=["GET", "POST"])
def test() :
return flask.redirect('/static/test/index.html')
# 博客数据接口
blogInterfaceList = [
['/blog/is_login', BlogInterface.isLogin],
['/blog/login', BlogInterface.login],
['/blog/username', BlogInterface.getUsername],
['/blog/avatar', BlogInterface.getAvatar],
['/blog/running_days', BlogInterface.getRunDays],
['/blog/visiting_count', BlogInterface.getVisitingCount],
['/blog/visiting_modify', BlogInterface.addVisitingCount],
]
for route in blogInterfaceList :
app.add_url_rule(route[0], endpoint=route[0], view_func=route[1], methods=['POST'])
# 文章数据接口
articleInterfaceList = [
['/article/count', ArticleInterface.count],
['/article/get_id_by_order', ArticleInterface.getIdByOrder],
['/article/title', ArticleInterface.title],
['/article/date', ArticleInterface.date],
['/article/reading_count', ArticleInterface.readingCount],
['/article/markdown', ArticleInterface.markdown],
['/article/html', ArticleInterface.html],
['/article/total', ArticleInterface.total],
['/article/aside', ArticleInterface.aside],
['/article/list', ArticleInterface.list],
['/article/pages', ArticleInterface.pages],
['/article/latest', ArticleInterface.latest],
['/article/modify/reading_count', ArticleInterface.modifyReadingCount],
['/article/save', ArticleInterface.save],
['/article/delete', ArticleInterface.delete],
['/article/add_reading', ArticleInterface.addReading],
]
for route in articleInterfaceList :
app.add_url_rule(route[0], endpoint=route[0], view_func=route[1], methods=['POST'])
# 留言数据接口
messageInterfaceList = [
['/message/count', MessageInterface.count],
['/message/get_id_by_order', MessageInterface.getIdByOrder],
['/message/visitor_name', MessageInterface.visitorName],
['/message/date', MessageInterface.date],
['/message/markdown', MessageInterface.markdown],
['/message/html', MessageInterface.html],
['/message/total', MessageInterface.total],
['/message/pages', MessageInterface.pages],
['/message/list', MessageInterface.getList],
['/message/aside', MessageInterface.getAside],
['/message/save', MessageInterface.save],
['/message/delete', MessageInterface.delete],
]
for route in messageInterfaceList :
app.add_url_rule(route[0], endpoint=route[0], view_func=route[1], methods=['POST'])
# Markdown接口
markdownInterfaceList = [
['/markdown/render', MarkdownInterface.render],
]
for route in markdownInterfaceList :
app.add_url_rule(route[0], endpoint=route[0], view_func=route[1], methods=['POST'])
if __name__ == "__main__" :
app.run(port=8102) | StarcoderdataPython |
4811170 | <reponame>suryanarayana007/python-iot-raspberry-pi-1486806556316
from flask import Flask,redirect
from flask import render_template
from flask import request
import os, json
import time
import ibmiotf.application
from twilio.rest import TwilioRestClient
vcap = json.loads(os.getenv("VCAP_SERVICES"))
twilioAccount = vcap["user-provided"][0]["credentials"]["accountSID"]
twilioToken = vcap["user-provided"][0]["credentials"]["authToken"]
twilioClient = TwilioRestClient(twilioAccount, twilioToken)
client = None
phoneNumberTo = ""
textMessage = "Button Pushed"
phoneNumberFrom = os.getenv("PHONE_NUMBER_FROM")
deviceId = os.getenv("DEVICE_ID")
def myCommandCallback(cmd):
global phoneNumberTo
global textMessage
payload = json.loads(cmd.payload)
buttonPushed = payload["buttonPushed"]
message = twilioClient.messages.create(to=phoneNumberTo, from_=phoneNumberFrom, body=textMessage)
print buttonPushed
try:
options = {
"org": vcap["iotf-service"][0]["credentials"]["org"],
"id": vcap["iotf-service"][0]["credentials"]["iotCredentialsIdentifier"],
"auth-method": "apikey",
"auth-key": vcap["iotf-service"][0]["credentials"]["apiKey"],
"auth-token": vcap["iotf-service"][0]["credentials"]["apiToken"]
}
client = ibmiotf.application.Client(options)
client.connect()
client.deviceEventCallback = myCommandCallback
client.subscribeToDeviceEvents(event="input")
except ibmiotf.ConnectionException as e:
print e
app = Flask(__name__)
if os.getenv("PORT"):
port = int(os.getenv("PORT"))
else:
port = 8080
@app.route('/')
def hello():
return render_template('index.html')
@app.route('/light/<command>', methods=['GET', 'POST'])
def light_route(command):
print command
myData = {'command' : command}
client.publishEvent("raspberrypi", deviceId, "light", "json", myData)
return redirect("/", code=302)
@app.route('/phoneNumber', methods=['POST'])
def phone_number_route():
global phoneNumberTo
global textMessage
phoneNumber = request.form['phoneNumber']
textMessage = request.form['message']
if phoneNumber.startswith('+'):
phoneNumberTo = phoneNumber
else:
phoneNumberTo = "+1" + phoneNumber
return redirect("/", code=302)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port)
| StarcoderdataPython |
3243901 | <reponame>Franky1/speech-emotion-webapp
from datetime import datetime
import cv2
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.models import load_model
# constants
starttime = datetime.now()
CAT6 = ['fear', 'angry', 'neutral', 'happy', 'sad', 'surprise']
CAT7 = ['fear', 'disgust', 'neutral', 'happy', 'sad', 'surprise', 'angry']
CAT3 = ["positive", "neutral", "negative"]
COLOR_DICT = {"neutral": "grey",
"positive": "green",
"happy": "green",
"surprise": "orange",
"fear": "purple",
"negative": "red",
"angry": "red",
"sad": "lightblue",
"disgust":"brown"}
TEST_CAT = ['fear', 'disgust', 'neutral', 'happy', 'sad', 'surprise', 'angry']
TEST_PRED = np.array([.3,.3,.4,.1,.6,.9,.1])
# page settings
# st.set_page_config(page_title="SER web-app", page_icon=":speech_balloon:", layout="wide")
def get_melspec(audio):
y, sr = librosa.load(audio, sr=44100)
X = librosa.stft(y)
Xdb = librosa.amplitude_to_db(abs(X))
img = np.stack((Xdb,) * 3,-1)
img = img.astype(np.uint8)
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grayImage = cv2.resize(grayImage, (224, 224))
rgbImage = np.repeat(grayImage[..., np.newaxis], 3, -1)
return (rgbImage, Xdb)
def get_title(predictions, categories, first_line=''):
txt = f"{first_line}\nDetected emotion: \
{categories[predictions.argmax()]} - {predictions.max() * 100:.2f}%"
return txt
def plot_colored_polar(fig, predictions, categories,
title="", colors=COLOR_DICT):
N = len(predictions)
ind = predictions.argmax()
COLOR = color_sector = colors[categories[ind]]
sector_colors = [colors[i] for i in categories]
fig.set_facecolor("#d1d1e0")
ax = plt.subplot(111, polar="True")
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
for sector in range(predictions.shape[0]):
radii = np.zeros_like(predictions)
radii[sector] = predictions[sector] * 10
width = np.pi / 1.8 * predictions
c = sector_colors[sector]
ax.bar(theta, radii, width=width, bottom=0.0, color=c, alpha=0.25)
angles = [i / float(N) * 2 * np.pi for i in range(N)]
angles += angles[:1]
data = list(predictions)
data += data[:1]
plt.polar(angles, data, color=COLOR, linewidth=2)
plt.fill(angles, data, facecolor=COLOR, alpha=0.25)
ax.spines['polar'].set_color('lightgrey')
ax.set_theta_offset(np.pi / 3)
ax.set_theta_direction(-1)
plt.xticks(angles[:-1], categories)
ax.set_rlabel_position(0)
plt.yticks([0, .25, .5, .75, 1], color="grey", size=8)
plt.suptitle(title, color="darkblue", size=10)
plt.title(f"BIG {N}\n", color=COLOR)
plt.ylim(0, 1)
plt.subplots_adjust(top=0.75)
def plot_melspec(path, tmodel=None, three=False,
CAT3=CAT3, CAT6=CAT6):
# load model if it is not loaded
if tmodel is None:
tmodel = load_model("tmodel_all.h5")
# mel-spec model results
mel = get_melspec(path)[0]
mel = mel.reshape(1, *mel.shape)
tpred = tmodel.predict(mel)[0]
cat = CAT6
if three:
pos = tpred[3] + tpred[5] * .5
neu = tpred[2] + tpred[5] * .5 + tpred[4] * .5
neg = tpred[0] + tpred[1] + tpred[4] * .5
tpred = np.array([pos, neu, neg])
cat = CAT3
txt = get_title(tpred, cat)
fig = plt.figure(figsize=(6, 4))
plot_colored_polar(fig, predictions=tpred, categories=cat, title=txt)
return (fig, tpred)
if __name__ == "__main__":
plot_melspec("audio/morris3.wav")
| StarcoderdataPython |
1651772 | # coding=utf8
OS = 1
SIZE_SZ = 8
MALLOC_ALIGNMENT = 2 * SIZE_SZ
MALLOC_ALIGN_MASK = 2 * SIZE_SZ - 1
MINSIZE = 32
FASTBIN_MAX_SIZE = 0x80
SMALLBIN_MAX_SIZE = 0x3f0
FASTBIN_CHUNK = 1
SMALLBIN_CHUNK = 2
UNSORTEDBIN_CHUNK = 3
LARGEBIN_CHUNK = 4
UNDEFINED = 0 | StarcoderdataPython |
1752657 | <filename>footballdatawrapper/Fixture.py
class Fixture:
"""
"id": 149461,
"soccerseasonId": 406,
"date": "2014-07-08T20:00:00Z",
"matchday": 6,
"homeTeamName": "Brazil",
"homeTeamId": 764,
"awayTeamName": "Germany",
"awayTeamId": 759,
"result":
{
"goalsHomeTeam": 1,
"goalsAwayTeam": 7
}
"""
def __init__(self, *args, **kwargs):
self.__id = kwargs.get("id", -1)
self.__home_team = kwargs.get("homeTeamId", -1)
self.__away_team = kwargs.get("awayTeamId", -1)
self.__matchday = kwargs.get("matchday", -1)
self.__soccer_season = kwargs.get("soccerseasonId", -1)
self.__result = kwargs.get("result", -1)
@property
def result(self):
return self.__result
@property
def home_team(self):
return self.__home_team
| StarcoderdataPython |
3352715 | <filename>src/model/training/train.py
# https://colab.research.google.com/github/pytorch/vision/blob/temp-tutorial/tutorials/torchvision_finetuning_instance_segmentation.ipynb
from azureml.core import Run
import os
import sys
sys.path += ['.']
import logging
from shutil import copy
import numpy as np
import torch
import torch.utils.data
from training.engine import train_one_epoch, evaluate
import dataProcessing.utils as utils
from dataProcessing.coco_utils import CocoDetection
from training.model_utils import rt, get_transform, get_instance_segmentation_model, save_snapshot
import argparse
from torch.optim import SGD, Adam
from sklearn.model_selection import KFold
import random
logging.basicConfig(level=logging.INFO)
# TODO: output path wired up to blob storage.
# TODO: command-line parameters for these folders
datapath = rt('data/dataset')
labelpath = rt('data/labeldata')
outputpath = rt('outputData')
logging.info(f'Using pytorch version {torch.__version__}')
logging.info(f'Using numpy version {np.__version__}')
# TODO: enable more arguments
parser = argparse.ArgumentParser()
parser.add_argument('--cv', type=int, default=1, help='cross-validation slices (<=1 for a single 90/10 split)')
parser.add_argument('--optimizer', default='sgd', help='Optimizer: sgd or adam for now')
if "RUNINAZURE" in os.environ:
from azureml.core import Workspace, Datastore, Dataset
logging.info('Downloading datasets')
ws = Workspace.from_config()
imagestore = Datastore.get(ws, datastore_name='images')
labeldata = Datastore.get(ws, datastore_name='labeldata')
imagestore_paths = [(imagestore, '/**')]
images_ds = Dataset.File.from_files(path=imagestore_paths)
images_ds.download(target_path=datapath, overwrite=True)
labeldata_paths = [(labeldata, '/**')]
labeldata_ds = Dataset.File.from_files(path=labeldata_paths)
labeldata_ds.download(target_path=labelpath, overwrite=True)
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
if __name__ == '__main__':
args = parser.parse_args()
# use our dataset and defined transformations
label_fn = 'coco_all_annotations_xformed.json' # coco_xformed.json
dataset_base = CocoDetection(datapath, os.path.join(labelpath, label_fn), get_transform(train=True))
dataset_test_base = CocoDetection(datapath, os.path.join(labelpath, label_fn), get_transform(train=False))
seed = 42
torch.manual_seed(seed)
random.seed(seed)
all_stats = {}
def log_summary(summary, fold, epoch, best_score_key=None):
'''
After each epoch in each fold, compiles statistics organized by epoch
for future collation.
Logs current epoch result to the current fold for per-fold plotting.
best_score_key indicates the key metric to check if we've reached the best overall score.
'''
run = Run.get_context()
for k in summary:
if epoch not in all_stats:
all_stats[epoch] = {}
if k not in all_stats[epoch]:
all_stats[epoch][k] = []
all_stats[epoch][k].append(summary[k])
# We'll just log the raw values to a single metric to avoid clutter (they will reset on each fold)
run.log(k, summary[k])
return best_score_key is not None and best_score_key in summary and \
summary[best_score_key] >= max([max(all_stats[e][best_score_key]) for e in all_stats])
def log_final_stats():
run = Run.get_context()
for epoch in all_stats:
for k in all_stats[epoch]:
run.log(f'mean {k}', np.mean(all_stats[epoch][k]))
run.log(f'std {k}', np.std(all_stats[epoch][k]))
def train(fold, dataset, dataset_test, save_best=False):
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=2, shuffle=True, num_workers=0,
collate_fn=utils.collate_fn)
print(f'data loader has {len(data_loader)} batches')
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=0,
collate_fn=utils.collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# our dataset has three classes: background, casing, and primer
num_classes = 3
# get the model using our helper function
model = get_instance_segmentation_model(num_classes)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = None
init_lr = 0.005
if args.optimizer == 'sgd':
optimizer = SGD(params, lr=init_lr,
momentum=0.9, weight_decay=0.0005)
elif args.optimizer == 'adam':
optimizer = Adam(params, lr=init_lr) # weight_decay = ?
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
num_epochs = 10
folder = outputpath
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
with open(os.path.join(folder, f'loss_{fold}.txt'), 'w', encoding='utf-8') as outLoss:
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10, outLog=outLoss)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
summary = evaluate(model, data_loader_test, device=device)
is_best = log_summary(summary, fold, epoch, best_score_key='segm F1' if save_best else None)
checkpoint = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
# 'args': args
}
save_snapshot(checkpoint, folder, fold, epoch, is_best)
# Run the main training part, maybe with cross-validation
if args.cv <= 1:
# split the dataset in train and test set
indices = torch.randperm(len(dataset_base)).tolist()
cutoff = max(10, int(0.1 * len(dataset_base)))
dataset = torch.utils.data.Subset(dataset_base, indices[:-cutoff])
dataset_test = torch.utils.data.Subset(dataset_test_base, indices[-cutoff:])
# we only set save_best for single 90/10 split runs, not cross-validation
train(0, dataset, dataset_test, save_best=True)
else:
kfold = KFold(n_splits=args.cv, shuffle=True)
#
# K-fold Cross Validation model evaluation
for fold, (train_ids, test_ids) in enumerate(kfold.split(dataset_base)):
logging.info(f'Cross-Validation fold {fold}')
dataset = torch.utils.data.Subset(dataset_base, train_ids)
dataset_test = torch.utils.data.Subset(dataset_test_base, test_ids)
train(fold, dataset, dataset_test)
log_final_stats()
if "RUNINAZURE" in os.environ:
folder = outputpath
copy('training/model_utils.py', folder)
copy('training/engine.py', folder)
copy('dataProcessing/coco_utils.py', folder)
copy('dataProcessing/utils.py', folder)
copy('dataProcessing/transforms.py', folder)
from azureml.core.model import Model
logging.info("Registering Model")
model = Model.register(model_name="APImodel",
model_path=outputpath,
description="",
workspace=ws)
targetpath = Run.get_context().display_name
logging.info(f"uploading results to {targetpath}")
files = [os.path.join(outputpath, f) for f in os.listdir(outputpath)]
modeldata = Datastore.get(ws, datastore_name='models')
modeldata.upload_files(files, target_path=targetpath)
| StarcoderdataPython |
3385265 | import compas_ags
from compas_ags.diagrams import FormDiagram
from compas_ags.diagrams import ForceDiagram
from compas_ags.ags import form_update_q_from_qind
from compas_ags.ags import force_update_from_form
from compas_ags.ags import form_update_from_force
from compas_ags.viewers import Viewer
def view_form_force(form, force, forcescale=0.5, edge_label=True):
if edge_label:
form_edge_label = {uv: index for index, uv in enumerate(form.edges())}
force_edge_label = force_edge_labels
else:
form_edge_label = None
force_edge_label = None
viewer = Viewer(form, force, delay_setup=False)
viewer.draw_form(edgelabel=form_edge_label,
forces_on=True,
forcescale=forcescale,
vertexcolor={key: '#000000' for key in form.vertices_where({'is_fixed': True})})
viewer.draw_force(edgelabel=force_edge_label)
viewer.show()
def view_with_initial_stage(form, force, forcescale=0.5, edge_label=True):
if edge_label:
form_edge_label = {uv: index for index, uv in enumerate(form.edges())}
force_edge_label = force_edge_labels
else:
form_edge_label = None
force_edge_label = None
viewer = Viewer(form, force, delay_setup=False)
viewer.draw_form(lines=form_lines,
forces_on=True,
external_on=True,
forcescale=forcescale,
edgelabel=form_edge_label,
vertexcolor={key: '#000000' for key in form.vertices_where({'is_fixed': True})})
viewer.draw_force(lines=force_lines,
edgelabel=force_edge_label
)
viewer.show()
def store_initial_lines(form, force):
form_lines = []
for u, v in form.edges():
form_lines.append({
'start': form.vertex_coordinates(u, 'xy'),
'end': form.vertex_coordinates(v, 'xy'),
'width': 1.0,
'color': '#cccccc',
'style': '--'
})
force_lines = []
for u, v in force.edges():
force_lines.append({
'start': force.vertex_coordinates(u, 'xy'),
'end': force.vertex_coordinates(v, 'xy'),
'width': 1.0,
'color': '#cccccc',
'style': '--'
})
return form_lines, force_lines
# ------------------------------------------------------------------------------
# 2. Dragging the force diagram and updating form diagram
# - Find a deeper form diagram
# - Invert compression/tension
# ------------------------------------------------------------------------------
input_file = compas_ags.get('paper/exB_arch-output.json')
form = FormDiagram.from_json(input_file)
force = ForceDiagram.from_formdiagram(form)
# create label for plots
force_edges = force.ordered_edges(form)
force_edge_labels = {(u, v): index for index, (u, v) in enumerate(force_edges)}
force_edge_labels.update({(v, u): index for index, (u, v) in enumerate(force_edges)})
# update the diagrams
form_update_q_from_qind(form)
force_update_from_form(force, form)
# visualise initial solution
view_form_force(form, force, forcescale=2.0)
# Identify auto constraints
form.identify_constraints()
form_lines, force_lines = store_initial_lines(form, force)
# ---------------------------------------------
# Move to change sag (deeper form diagram)
move_vertices = [0, 9, 8]
translation = +1.81
for key in move_vertices:
x0 = force.vertex_attribute(key, 'x')
force.vertex_attribute(key, 'x', x0 + translation)
form_update_from_force(form, force)
view_with_initial_stage(form, force, forcescale=2.0, edge_label=False)
# ---------------------------------------------
# Move to invert compression to tension
move_vertices = [0, 9, 8]
translation = +4.00
for key in move_vertices:
x0 = force.vertex_attribute(key, 'x')
force.vertex_attribute(key, 'x', x0 + translation)
form_update_from_force(form, force)
view_with_initial_stage(form, force, forcescale=2.0, edge_label=False)
| StarcoderdataPython |
1655148 | <reponame>fochoao/cpython
from .log_widget import LogMonitorWidget, LogMonitorDockWidget, LogMonitorDropdown
from .log_database_handler import DatabaseHandler
import logging
from qtstrap import OPTIONS
from pathlib import Path
# Make sure the log database directory exists
Path(OPTIONS.config_dir).mkdir(parents=True, exist_ok=True)
db_path = OPTIONS.config_dir + '/log.db'
def install(database_name=db_path):
logger = logging.getLogger()
logger.setLevel(1)
logger.addHandler(DatabaseHandler(database_name)) | StarcoderdataPython |
3211237 | #!/usr/bin/env python
import os
import sys
from setuptools import setup
import chesslib
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit(0)
classifiers = '''\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Operating System :: OS Independent
Topic :: Software Development :: Libraries :: Python Modules
'''.splitlines()
with open('README.rst') as fp:
long_description = fp.read()
setup(
name='chesslib',
version=chesslib.get_version(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dakrauth/chesslib',
license='MIT',
platforms=['any'],
py_modules=['chesslib'],
description=chesslib.__doc__,
classifiers=classifiers,
long_description=long_description
) | StarcoderdataPython |
21972 | <reponame>WWWCourses/PythonCourseNetIT-Slides
"""ЗАДАЧА:
Разгледайте дадения по-долу код и направете необходимите промени,
така че след приключването на двата процеса променливата x да има стойност 20.
Използвайте multiprocessing.Queue() за да обмените текущата стойност на x между процесите.
"""
import multiprocessing as mp
def increment(r):
global x
for _ in r:
x+=1
print(f"x in {mp.current_process().name}: {x}")
if __name__ == "__main__":
x = 0
incr_count = 10
# create and start 2 process which should increment a variable:
pr1 = mp.Process(target=increment, args=(range(incr_count),))
pr2 = mp.Process(target=increment, args=(range(incr_count),))
pr1.start(); pr2.start()
# wait processes to finish
pr1.join();pr2.join()
print(f"x in {mp.current_process().name}: {x}")
#Очакван изход
# x in Main Process: 20 | StarcoderdataPython |
1618661 | <reponame>ToddG/hypermodern-python-seed
"""{{cookiecutter.project_name}} tests."""
| StarcoderdataPython |
1629244 | #!/usr/bin/env python
# encoding: utf-8
from .converter import Csv2Weka # noqa
from .version import __version__ # noqa
__all__ = ['Csv2Weka', '__version__']
| StarcoderdataPython |
165301 | '''set_degrees_counted(degrees_counted)
Sets the "number of degrees counted" to the desired value.
Parameters
degrees_counted
The value to which the number of degrees counted should be set.
Type:integer (a positive or negative whole number, including 0)
Values:any number
Default:no default value
Errors
TypeError
degrees_counted is not an integer.
RuntimeError
The motor has been disconnected from the Port
'''
from spike import Motor
motor = Motor('A')
motor.set_degrees_counted(10)
#motor.run_for_rotations(2)
# The program will never proceed to here if the motor is stalled | StarcoderdataPython |
3276368 | # V0
# V1
# http://bookshadow.com/weblog/2018/06/17/leetcode-exam-room/
# https://blog.csdn.net/fuxuemingzhu/article/details/83141523
# IDEA : bisect.insort : https://www.cnblogs.com/skydesign/archive/2011/09/02/2163592.html
class ExamRoom(object):
def __init__(self, N):
"""
:type N: int
"""
self.N, self.L = N, list()
def seat(self):
"""
:rtype: int
"""
N, L = self.N, self.L
if not self.L: res = 0
else:
d, res = L[0], 0
# d means cur distance, res means cur pos
for a, b in zip(L, L[1:]):
if (b - a) / 2 > d:
d = (b - a) / 2
res = (b + a) / 2
if N - 1 - L[-1] > d:
res = N - 1
bisect.insort(L, res)
return res
def leave(self, p):
"""
:type p: int
:rtype: void
"""
self.L.remove(p)
# Your ExamRoom object will be instantiated and called as such:
# obj = ExamRoom(N)
# param_1 = obj.seat()
# obj.leave(p)
# V2
# Time: seat: O(logn), amortized
# leave: O(logn)
# Space: O(n)
import heapq
class ExamRoom(object):
def __init__(self, N):
"""
:type N: int
"""
self.__num = N
self.__seats = {-1: [-1, self.__num], self.__num: [-1, self.__num]}
self.__max_heap = [(-self.__distance((-1, self.__num)), -1, self.__num)]
def seat(self):
"""
:rtype: int
"""
while self.__max_heap[0][1] not in self.__seats or \
self.__max_heap[0][2] not in self.__seats or \
self.__seats[self.__max_heap[0][1]][1] != self.__max_heap[0][2] or \
self.__seats[self.__max_heap[0][2]][0] != self.__max_heap[0][1]:
heapq.heappop(self.__max_heap) # lazy deletion
_, left, right = heapq.heappop(self.__max_heap)
mid = 0 if left == -1 \
else self.__num-1 if right == self.__num \
else (left+right) // 2
self.__seats[mid] = [left, right]
heapq.heappush(self.__max_heap, (-self.__distance((left, mid)), left, mid))
heapq.heappush(self.__max_heap, (-self.__distance((mid, right)), mid, right))
self.__seats[left][1] = mid
self.__seats[right][0] = mid
return mid
def leave(self, p):
"""
:type p: int
:rtype: void
"""
left, right = self.__seats[p]
self.__seats.pop(p)
self.__seats[left][1] = right
self.__seats[right][0] = left
heapq.heappush(self.__max_heap, (-self.__distance((left, right)), left, right))
def __distance(self, segment):
return segment[1]-segment[0]-1 if segment[0] == -1 or segment[1] == self.__num \
else (segment[1]-segment[0]) // 2 | StarcoderdataPython |
1651015 | import os
import re
import magic
import fnmatch
import ctypes
import ctypes.util
import binwalk.smartstrings
from binwalk.compat import *
from binwalk.common import strings
from binwalk.prettyprint import PrettyPrint
class HashResult(object):
'''
Class for storing libfuzzy hash results.
For internal use only.
'''
def __init__(self, name, hash=None, strings=None):
self.name = name
self.hash = hash
self.strings = strings
class HashMatch(object):
'''
Class for fuzzy hash matching of files and directories.
'''
# Requires libfuzzy.so
LIBRARY_NAME = "fuzzy"
# Max result is 148 (http://ssdeep.sourceforge.net/api/html/fuzzy_8h.html)
FUZZY_MAX_RESULT = 150
# Files smaller than this won't produce meaningful fuzzy results (from ssdeep.h)
FUZZY_MIN_FILE_SIZE = 4096
DEFAULT_CUTOFF = 0
CONSERVATIVE_CUTOFF = 90
def __init__(self, cutoff=None, strings=False, same=False, symlinks=False, name=False, max_results=None, display=False, log=None, csv=False, quiet=False, format_to_screen=False, abspath=False, matches={}, types={}):
'''
Class constructor.
@cutoff - The fuzzy cutoff which determines if files are different or not.
@strings - Only hash strings inside of the file, not the entire file itself.
@same - Set to True to show files that are the same, False to show files that are different.
@symlinks - Set to True to include symbolic link files.
@name - Set to True to only compare files whose base names match.
@max_results - Stop searching after x number of matches.
@display - Set to True to display results to stdout, or pass an instance of binwalk.prettyprint.PrettyPrint.
@log - Specify a log file to log results to.
@csv - Set to True to log data in CSV format.
@quiet - Set to True to suppress output to stdout.
@format_to_screen - Set to True to format the output to the terminal window width.
@abspath - Set to True to display absolute file paths.
@matches - A dictionary of file names to diff.
@types - A dictionary of file types to diff.
Returns None.
'''
self.cutoff = cutoff
self.strings = strings
self.show_same = same
self.symlinks = symlinks
self.matches = matches
self.name = name
self.types = types
self.abspath = abspath
self.max_results = max_results
if display:
if isinstance(display, PrettyPrint):
self.pretty_print = display
else:
self.pretty_print = PrettyPrint(log=log, csv=csv, format_to_screen=format_to_screen, quiet=quiet)
self.pretty_print.header(header="PERCENTAGE\t\t\tFILE", csv=True)
else:
self.pretty_print = None
self.total = 0
self.last_file1 = HashResult(None)
self.last_file2 = HashResult(None)
self.magic = magic.open(0)
self.magic.load()
lib_path = ctypes.util.find_library(self.LIBRARY_NAME)
if lib_path is None:
raise Exception('Could not find the hash matching library. Please install libfuzzy from ssdeep.')
self.lib = ctypes.cdll.LoadLibrary(lib_path)
if self.cutoff is None:
self.cutoff = self.DEFAULT_CUTOFF
for k in get_keys(self.types):
for i in range(0, len(self.types[k])):
self.types[k][i] = re.compile(self.types[k][i])
def _get_strings(self, fname):
return ''.join(list(binwalk.common.strings(fname, minimum=10)))
def _print(self, match, fname):
if self.pretty_print:
if self.abspath:
fname = os.path.abspath(fname)
self.pretty_print._pprint('%4d\t\t\t\t%s\n' % (match, self.pretty_print._format(fname)))
def _print_footer(self):
if self.pretty_print:
self.pretty_print.footer()
def _compare_files(self, file1, file2):
'''
Fuzzy diff two files.
@file1 - The first file to diff.
@file2 - The second file to diff.
Returns the match percentage.
Returns None on error.
'''
status = 0
file1_dup = False
file2_dup = False
if not self.name or os.path.basename(file1) == os.path.basename(file2):
if os.path.exists(file1) and os.path.exists(file2):
hash1 = ctypes.create_string_buffer(self.FUZZY_MAX_RESULT)
hash2 = ctypes.create_string_buffer(self.FUZZY_MAX_RESULT)
# Check if the last file1 or file2 matches this file1 or file2; no need to re-hash if they match.
if file1 == self.last_file1.name and self.last_file1.hash:
file1_dup = True
else:
self.last_file1.name = file1
if file2 == self.last_file2.name and self.last_file2.hash:
file2_dup = True
else:
self.last_file2.name = file2
try:
if self.strings:
if file1_dup:
file1_strings = self.last_file1.strings
else:
self.last_file1.strings = file1_strings = self._get_strings(file1)
if file2_dup:
file2_strings = self.last_file2.strings
else:
self.last_file2.strings = file2_strings = self._get_strings(file2)
if file1_strings == file2_strings:
return 100
else:
if file1_dup:
hash1 = self.last_file1.hash
else:
status |= self.lib.fuzzy_hash_buf(str2bytes(file1_strings), len(file1_strings), hash1)
if file2_dup:
hash2 = self.last_file2.hash
else:
status |= self.lib.fuzzy_hash_buf(str2bytes(file2_strings), len(file2_strings), hash2)
else:
if file1_dup:
hash1 = self.last_file1.hash
else:
status |= self.lib.fuzzy_hash_filename(str2bytes(file1), hash1)
if file2_dup:
hash2 = self.last_file2.hash
else:
status |= self.lib.fuzzy_hash_filename(str2bytes(file2), hash2)
if status == 0:
if not file1_dup:
self.last_file1.hash = hash1
if not file2_dup:
self.last_file2.hash = hash2
if hash1.raw == hash2.raw:
return 100
else:
return self.lib.fuzzy_compare(hash1, hash2)
except Exception as e:
print ("WARNING: Exception while doing fuzzy hash: %s" % e)
return None
def is_match(self, match):
'''
Returns True if this is a good match.
Returns False if his is not a good match.
'''
return (match is not None and ((match >= self.cutoff and self.show_same) or (match < self.cutoff and not self.show_same)))
def _get_file_list(self, directory):
'''
Generates a directory tree, including/excluding files as specified in self.matches and self.types.
@directory - The root directory to start from.
Returns a set of file paths, excluding the root directory.
'''
file_list = []
# Normalize directory path so that we can exclude it from each individual file path
directory = os.path.abspath(directory) + os.path.sep
for (root, dirs, files) in os.walk(directory):
# Don't include the root directory in the file paths
root = ''.join(root.split(directory, 1)[1:])
# Get a list of files, with or without symlinks as specified during __init__
files = [os.path.join(root, f) for f in files if self.symlinks or not os.path.islink(f)]
# If no filters were specified, return all files
if not self.types and not self.matches:
file_list += files
else:
# Filter based on the file type, as reported by libmagic
if self.types:
for f in files:
for (include, regex_list) in iterator(self.types):
for regex in regex_list:
try:
magic_result = self.magic.file(os.path.join(directory, f)).lower()
except Exception as e:
magic_result = ''
match = regex.match(magic_result)
# If this matched an include filter, or didn't match an exclude filter
if (match and include) or (not match and not include):
file_list.append(f)
# Filter based on file name
if self.matches:
for (include, file_filter_list) in iterator(self.matches):
for file_filter in file_filter_list:
matching_files = fnmatch.filter(files, file_filter)
# If this is an include filter, add all matching files to the list
if include:
file_list += matching_files
# Else, this add all files except those that matched to the list
else:
file_list += list(set(files) - set(matching_files))
return set(file_list)
def files(self, needle, haystack):
'''
Compare one file against a list of other files.
@needle - File to match against.
@haystack - A list of haystack files.
Returns a list of tuple results.
'''
results = []
self.total = 0
for f in haystack:
m = self._compare_files(needle, f)
if m is not None and self.is_match(m):
self._print(m, f)
results.append((m, f))
self.total += 1
if self.max_results and self.total >= self.max_results:
break
self._print_footer()
return results
def file(self, needle, haystack):
'''
Search for one file inside one or more directories.
@needle - File to search for.
@haystack - List of directories to search in.
Returns a list of tuple results.
'''
matching_files = []
self.total = 0
done = False
for directory in haystack:
for f in self._get_file_list(directory):
f = os.path.join(directory, f)
m = self._compare_files(needle, f)
if m is not None and self.is_match(m):
self._print(m, f)
matching_files.append((m, f))
self.total += 1
if self.max_results and self.total >= self.max_results:
done = True
break
if done:
break
self._print_footer()
return matching_files
def directories(self, needle, haystack):
'''
Compare the contents of one directory with the contents of other directories.
@source - Source directory to compare everything to.
@dir_list - Compare files in source to files in these directories.
Returns a list of tuple results.
'''
done = False
results = []
self.total = 0
source_files = self._get_file_list(needle)
for directory in haystack:
dir_files = self._get_file_list(directory)
for f in source_files:
if f in dir_files:
file1 = os.path.join(needle, f)
file2 = os.path.join(directory, f)
m = self._compare_files(file1, file2)
if m is not None and self.is_match(m):
self._print(m, file2)
results.append((m, file2))
self.total += 1
if self.max_results and self.total >= self.max_results:
done = True
break
if done:
break
self._print_footer()
return results
if __name__ == '__main__':
import sys
hmatch = HashMatch(strings=True, name=False, types={True:"^elf"})
print (hmatch.file(sys.argv[1], sys.argv[2:]))
#for (match, fname) in hmatch.directories(sys.argv[1], sys.argv[2]):
#for (match, fname) in hmatch.find_file(sys.argv[1], sys.argv[2:]):
# print match, fname
| StarcoderdataPython |
2982 | <filename>reservation_management/migrations/0021_delete_greenpass.py
# Generated by Django 3.2.7 on 2021-10-22 14:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reservation_management', '0020_greenpass'),
]
operations = [
migrations.DeleteModel(
name='GreenPass',
),
]
| StarcoderdataPython |
3392250 | from carla.data import DataCatalog
from carla.models import load_model, predict_negative_instances
if __name__ == "__main__":
data_name = "adult"
data_catalog = "adult_catalog.yaml"
data = DataCatalog(data_name, data_catalog)
model = load_model("ann", data_name)
print(f"Using model: {model.__class__.__module__}")
print(data.target)
print(predict_negative_instances(model, data).head(100))
| StarcoderdataPython |
1770628 | """
atmospheric.py, <NAME> (2016-10-26)
Atmospheric water vapour, ozone and AOT from GEE
Usage
H2O = Atmospheric.water(geom,date)
O3 = Atmospheric.ozone(geom,date)
AOT = Atmospheric.aerosol(geom,date)
"""
import ee
import geemap
from Py6S import *
import os, sys, time, math, datetime
class Atmospheric():
def round_date(date,xhour):
"""
rounds a date of to the closest 'x' hours
"""
y = date.get('year')
m = date.get('month')
d = date.get('day')
H = date.get('hour')
HH = H.divide(xhour).round().multiply(xhour)
return date.fromYMD(y,m,d).advance(HH,'hour')
def round_month(date):
"""
round date to closest month
"""
# start of THIS month
m1 = date.fromYMD(date.get('year'),date.get('month'),ee.Number(1))
# start of NEXT month
m2 = m1.advance(1,'month')
# difference from date
d1 = ee.Number(date.difference(m1,'day')).abs()
d2 = ee.Number(date.difference(m2,'day')).abs()
# return closest start of month
return ee.Date(ee.Algorithms.If(d2.gt(d1),m1,m2))
def water(geom,date):
"""
Water vapour column above target at time of image aquisition.
(Kalnay et al., 1996, The NCEP/NCAR 40-Year Reanalysis Project. Bull.
Amer. Meteor. Soc., 77, 437-471)
"""
# Point geometry required
centroid = geom.centroid()
# H2O datetime is in 6 hour intervals
H2O_date = Atmospheric.round_date(date,6)
# filtered water collection
water_ic = ee.ImageCollection('NCEP_RE/surface_wv').filterDate(H2O_date, H2O_date.advance(1,'month'))
# water image
water_img = ee.Image(water_ic.first())
# water_vapour at target
water = water_img.reduceRegion(reducer=ee.Reducer.mean(), geometry=centroid).get('pr_wtr')
# convert to Py6S units (Google = kg/m^2, Py6S = g/cm^2)
water_Py6S_units = ee.Number(water).divide(10)
return water_Py6S_units
def ozone(geom,date):
"""
returns ozone measurement from merged TOMS/OMI dataset
OR
uses our fill value (which is mean value for that latlon and day-of-year)
"""
# Point geometry required
centroid = geom.centroid()
def ozone_measurement(centroid,O3_date):
# filtered ozone collection
ozone_ic = ee.ImageCollection('TOMS/MERGED').filterDate(O3_date, O3_date.advance(1,'month'))
# ozone image
ozone_img = ee.Image(ozone_ic.first())
# ozone value IF TOMS/OMI image exists ELSE use fill value
ozone = ee.Algorithms.If(ozone_img,\
ozone_img.reduceRegion(reducer=ee.Reducer.mean(), geometry=centroid).get('ozone'),\
ozone_fill(centroid,O3_date))
return ozone
def ozone_fill(centroid,O3_date):
"""
Gets our ozone fill value (i.e. mean value for that doy and latlon)
you can see it
1) compared to LEDAPS: https://code.earthengine.google.com/8e62a5a66e4920e701813e43c0ecb83e
2) as a video: https://www.youtube.com/watch?v=rgqwvMRVguI&feature=youtu.be
"""
# ozone fills (i.e. one band per doy)
ozone_fills = ee.ImageCollection('users/samsammurphy/public/ozone_fill').toList(366)
# day of year index
jan01 = ee.Date.fromYMD(O3_date.get('year'),1,1)
doy_index = date.difference(jan01,'day').toInt()# (NB. index is one less than doy, so no need to +1)
# day of year image
fill_image = ee.Image(ozone_fills.get(doy_index))
# return scalar fill value
return fill_image.reduceRegion(reducer=ee.Reducer.mean(), geometry=centroid).get('ozone')
# O3 datetime in 24 hour intervals
O3_date = Atmospheric.round_date(date,24)
# TOMS temporal gap
TOMS_gap = ee.DateRange('1994-11-01','1996-08-01')
# avoid TOMS gap entirely
ozone = ee.Algorithms.If(TOMS_gap.contains(O3_date),ozone_fill(centroid,O3_date),ozone_measurement(centroid,O3_date))
# fix other data gaps (e.g. spatial, missing images, etc..)
ozone = ee.Algorithms.If(ozone,ozone,ozone_fill(centroid,O3_date))
#convert to Py6S units
ozone_Py6S_units = ee.Number(ozone).divide(1000)# (i.e. Dobson units are milli-atm-cm )
return ozone_Py6S_units
def aerosol(geom,date):
"""
Aerosol Optical Thickness.
try:
MODIS Aerosol Product (monthly)
except:
fill value
"""
def aerosol_fill(date):
"""
MODIS AOT fill value for this month (i.e. no data gaps)
"""
return ee.Image('users/samsammurphy/public/AOT_stack')\
.select([ee.String('AOT_').cat(date.format('M'))])\
.rename(['AOT_550'])
def aerosol_this_month(date):
"""
MODIS AOT original data product for this month (i.e. some data gaps)
"""
# image for this month
img = ee.Image(\
ee.ImageCollection('MODIS/006/MOD08_M3')\
.filterDate(Atmospheric.round_month(date))\
.first()\
)
# fill missing month (?)
img = ee.Algorithms.If(img,\
# all good
img\
.select(['Aerosol_Optical_Depth_Land_Mean_Mean_550'])\
.divide(1000)\
.rename(['AOT_550']),\
# missing month
aerosol_fill(date))
return img
def get_AOT(AOT_band,geom):
"""
AOT scalar value for target
"""
return ee.Image(AOT_band).reduceRegion(reducer=ee.Reducer.mean(),\
geometry=geom.centroid())\
.get('AOT_550')
after_modis_start = date.difference(ee.Date('2000-03-01'),'month').gt(0)
AOT_band = ee.Algorithms.If(after_modis_start, aerosol_this_month(date), aerosol_fill(date))
AOT = get_AOT(AOT_band,geom)
AOT = ee.Algorithms.If(AOT,AOT,get_AOT(aerosol_fill(date),geom))
# i.e. check reduce region worked (else force fill value)
return AOT
class S2_L1C():
"""
Batch function to correct all images inside a GEE collection
Reference idea from <NAME> https://github.com/samsammurphy/gee-atmcorr-S2/issues/7
conversion():
- Geom: Area of interest which 6S corresction is applied
- imgCol: Sentinel 2 L1C image collection
- gpath: Asset folder and image sufix to save images in GEE (e.g. users/samsammurphy/shared/sentinel2/6S/ESRIN_).
The direction is completed by image dateString.
"""
def conversion(geom, imgCol, gpath):
region = geom.buffer(1000).bounds().getInfo()['coordinates']
# Spectral Response functions
def spectralResponseFunction(bandname):
"""
Extract spectral response function for given band name
"""
bandSelect = {
'B1':PredefinedWavelengths.S2A_MSI_01,
'B2':PredefinedWavelengths.S2A_MSI_02,
'B3':PredefinedWavelengths.S2A_MSI_03,
'B4':PredefinedWavelengths.S2A_MSI_04,
'B5':PredefinedWavelengths.S2A_MSI_05,
'B6':PredefinedWavelengths.S2A_MSI_06,
'B7':PredefinedWavelengths.S2A_MSI_07,
'B8':PredefinedWavelengths.S2A_MSI_08,
'B8A':PredefinedWavelengths.S2A_MSI_8A,
'B9':PredefinedWavelengths.S2A_MSI_09,
'B10':PredefinedWavelengths.S2A_MSI_10,
'B11':PredefinedWavelengths.S2A_MSI_11,
'B12':PredefinedWavelengths.S2A_MSI_12,
}
return Wavelength(bandSelect[bandname])
# TOA Reflectance to Radiance
def toa_to_rad(bandname):
"""
Converts top of atmosphere reflectance to at-sensor radiance
"""
# solar exoatmospheric spectral irradiance
ESUN = info['SOLAR_IRRADIANCE_'+bandname]
solar_angle_correction = math.cos(math.radians(solar_z))
# Earth-Sun distance (from day of year)
doy = scene_date.timetuple().tm_yday
d = 1 - 0.01672 * math.cos(0.9856 * (doy-4))
# http://physics.stackexchange.com/
# questions/177949/earth-sun-distance-on-a-given-day-of-the-year
# conversion factor
multiplier = ESUN*solar_angle_correction/(math.pi*d**2)
# at-sensor radiance
rad = toa.select(bandname).multiply(multiplier)
return rad
# Radiance to Surface Reflectance
def surface_reflectance(bandname):
"""
Calculate surface reflectance from at-sensor radiance given waveband name
"""
# run 6S for this waveband
s.wavelength = spectralResponseFunction(bandname)
s.run()
# extract 6S outputs
Edir = s.outputs.direct_solar_irradiance #direct solar irradiance
Edif = s.outputs.diffuse_solar_irradiance #diffuse solar irradiance
Lp = s.outputs.atmospheric_intrinsic_radiance #path radiance
absorb = s.outputs.trans['global_gas'].upward #absorption transmissivity
scatter = s.outputs.trans['total_scattering'].upward #scattering transmissivity
tau2 = absorb*scatter #total transmissivity
# radiance to surface reflectance
rad = toa_to_rad(bandname)
ref = rad.subtract(Lp).multiply(math.pi).divide(tau2*(Edir+Edif))
return ref
# List with images to filter
features = imgCol.getInfo()['features']
for i in features:
# Filter by ID in the list by image
id = i['id']
print("Bands: ")
# Selection of the image
img = ee.Image(id)
# write image date
date = img.date()
# Defining global variables:
global toa
global info
global scene_date
global solar_z
global s
# top of atmosphere reflectance
toa = img.divide(10000)
# METADATA
info = img.getInfo()['properties']
scene_date = datetime.datetime\
.utcfromtimestamp(info['system:time_start']/1000)
solar_z = info['MEAN_SOLAR_ZENITH_ANGLE']
# ATMOSPHERIC CONSTITUENTS
h2o = Atmospheric.water(geom,date).getInfo()
o3 = Atmospheric.ozone(geom,date).getInfo()
# Atmospheric Optical Thickness
aot = Atmospheric.aerosol(geom,date).getInfo()
# TARGET ALTITUDE (km)
SRTM = ee.Image('CGIAR/SRTM90_V4')
alt = SRTM.reduceRegion(reducer = ee.Reducer.mean(),
geometry = geom.centroid()).get('elevation').getInfo()
km = alt/1000 # i.e. Py6S uses units of kilometers
# 6S OBJECT
# Instantiate
s = SixS()
# Atmospheric constituents
s.atmos_profile = AtmosProfile.UserWaterAndOzone(h2o,o3)
s.aero_profile = AeroProfile.Continental
s.aot550 = aot
# Earth-Sun-satellite geometry
s.geometry = Geometry.User()
s.geometry.view_z = 0 # calculation assuming vision in NADIR
s.geometry.solar_z = solar_z # solar zenith angle
s.geometry.month = scene_date.month # month used in distance Earth-Sun
s.geometry.day = scene_date.day # day used in the distance Earth-Sun
s.altitudes\
.set_sensor_satellite_level() # Sensor altitude
s.altitudes\
.set_target_custom_altitude(km) # Altitude of the surface
# ATMOSPHERIC CORRECTION (by waveband)
output = img.select('QA60')
for band in ['B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','B12']:
print(band, end=' ')
output = output.addBands(surface_reflectance(band))
# set some properties for export
dateString = scene_date.strftime("%Y-%m-%d")
ref = output.set({'satellite':'Sentinel 2',
'fileID':info['system:index'],
'date':dateString,
'aerosol_optical_thickness':aot,
'water_vapour':h2o,
'ozone':o3})
# define YOUR assetID or folder
assetID = gpath + dateString
# export
export = ee.batch.Export.image.toAsset(\
image = ref,
description = 'sentinel2_atmcorr_export',
assetId = assetID,
region = region,
crs = 'EPSG:4326',
scale = 10)
export.start()
# print a message for each exported image
print("image "+ assetID +" exported")
print('\n')
time.sleep(1)
return print("Conversion ready")
| StarcoderdataPython |
1777924 | from Symtab import ModuleScope
from PyrexTypes import *
from UtilityCode import CythonUtilityCode
from Errors import error
from Scanning import StringSourceDescriptor
class CythonScope(ModuleScope):
is_cython_builtin = 1
def __init__(self):
ModuleScope.__init__(self, u'cython', None, None)
self.pxd_file_loaded = True
self.populate_cython_scope()
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
type = parse_basic_type(name)
if type:
return type
def find_module(self, module_name, pos):
error("cython.%s is not available" % module_name, pos)
def find_submodule(self, module_name):
entry = self.entries.get(module_name, None)
if entry and entry.as_module:
return entry.as_module
else:
# TODO: fix find_submodule control flow so that we're not
# expected to create a submodule here (to protect CythonScope's
# possible immutability). Hack ourselves out of the situation
# for now.
raise error((StringSourceDescriptor(u"cython", u""), 0, 0),
"cython.%s is not available" % module_name)
def lookup_qualified_name(self, qname):
# ExprNode.as_cython_attribute generates qnames and we untangle it here...
name_path = qname.split(u'.')
scope = self
while len(name_path) > 1:
scope = scope.lookup_here(name_path[0]).as_module
del name_path[0]
if scope is None:
return None
else:
return scope.lookup_here(name_path[0])
def populate_cython_scope(self):
# These are used to optimize isinstance in FinalOptimizePhase
type_object = self.declare_typedef(
'PyTypeObject',
base_type = c_void_type,
pos = None,
cname = 'PyTypeObject')
type_object.is_void = True
type_object_type = type_object.type
self.declare_cfunction(
'PyObject_TypeCheck',
CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
CFuncTypeArg("t", c_ptr_type(type_object_type), None)]),
pos = None,
defining = 1,
cname = 'PyObject_TypeCheck')
# self.test_cythonscope()
def test_cythonscope(self):
# A special function just to make it easy to test the scope and
# utility code functionality in isolation. It is available to
# "end-users" but nobody will know it is there anyway...
cython_testscope_utility_code.declare_in_scope(self)
cython_test_extclass_utility_code.declare_in_scope(self)
#
# The view sub-scope
#
self.viewscope = viewscope = ModuleScope(u'cython.view', self, None)
self.declare_module('view', viewscope, None)
viewscope.is_cython_builtin = True
viewscope.pxd_file_loaded = True
cythonview_testscope_utility_code.declare_in_scope(viewscope)
def create_cython_scope(context, create_testscope):
# One could in fact probably make it a singleton,
# but not sure yet whether any code mutates it (which would kill reusing
# it across different contexts)
scope = CythonScope()
if create_testscope:
scope.test_cythonscope()
return scope
cython_testscope_utility_code = CythonUtilityCode(u"""
@cname('__pyx_testscope')
cdef object _testscope(int value):
return "hello from cython scope, value=%d" % value
""")
undecorated_methods_protos = UtilityCode(proto=u"""
/* These methods are undecorated and have therefore no prototype */
static PyObject *__pyx_TestClass_cdef_method(
struct __pyx_TestClass *self, int value);
static PyObject *__pyx_TestClass_cpdef_method(
struct __pyx_TestClass *self, int value, int skip_dispatch);
static PyObject *__pyx_TestClass_def_method(
PyObject *self, PyObject *value);
""")
test_cython_utility_dep = CythonUtilityCode(u"""
@cname('__pyx_test_dep')
cdef test_dep(obj):
print 'test_dep', obj
""")
cython_test_extclass_utility_code = CythonUtilityCode(
name="TestClassUtilityCode",
prefix="__pyx_prefix_TestClass_",
requires=[undecorated_methods_protos, test_cython_utility_dep],
impl=u"""
cdef extern from *:
cdef object __pyx_test_dep(object)
@cname('__pyx_TestClass')
cdef class TestClass(object):
cdef public int value
def __init__(self, int value):
self.value = value
def __str__(self):
return 'TestClass(%d)' % self.value
cdef cdef_method(self, int value):
print 'Hello from cdef_method', value
cpdef cpdef_method(self, int value):
print 'Hello from cpdef_method', value
def def_method(self, int value):
print 'Hello from def_method', value
@cname('cdef_cname')
cdef cdef_cname_method(self, int value):
print "Hello from cdef_cname_method", value
@cname('cpdef_cname')
cpdef cpdef_cname_method(self, int value):
print "Hello from cpdef_cname_method", value
@cname('def_cname')
def def_cname_method(self, int value):
print "Hello from def_cname_method", value
@cname('__pyx_test_call_other_cy_util')
cdef test_call(obj):
print 'test_call'
__pyx_test_dep(obj)
@cname('__pyx_TestClass_New')
cdef _testclass_new(int value):
return TestClass(value)
""")
cythonview_testscope_utility_code = CythonUtilityCode(u"""
@cname('__pyx_view_testscope')
cdef object _testscope(int value):
return "hello from cython.view scope, value=%d" % value
""")
| StarcoderdataPython |
3243638 | <reponame>paulaksm/rrt-plan
class Domain(object):
''' STRIPS domain representation '''
def __init__(self, name, requirements, types, predicates, operators):
self._name = name
self._requirements = requirements
self._types = types
self._predicates = predicates
self._operators = operators
# getters
@property
def name(self):
return self._name
@property
def requirements(self):
return self._requirements[:]
@property
def types(self):
return self._types[:]
@property
def predicates(self):
return self._predicates[:]
@property
def operators(self):
return self._operators[:]
def __str__(self):
domain_str = '@ Domain: {0}\n'.format(self._name)
domain_str += '>> requirements: {0}\n'.format(', '.join(self._requirements))
domain_str += '>> types: {0}\n'.format(', '.join(self._types))
domain_str += '>> predicates: {0}\n'.format(', '.join(map(str, self._predicates)))
domain_str += '>> operators:\n {0}\n'.format(
'\n '.join(str(op).replace('\n', '\n ') for op in self._operators))
return domain_str
| StarcoderdataPython |
3257296 | <filename>ldap2sql.py
#!/usr/bin/python
import inspect
import os
import sys
import urllib
import urllib2
import hashlib
import logging
from sqlalchemy import create_engine
reload(sys)
sys.setdefaultencoding('UTF8')
cmd_folder = os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "contrib"))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from activedirectory import ActiveDirectory
jira_stats = """
select 'issues' as metric, count(*) as value from jiraissue
UNION
select 'projects', count(*) from project
UNION
select 'customfields', count(*) from customfield
UNION
select 'workflows', count(distinct name) from os_wfentry
UNION
select 'users', count(*) from cwd_user
UNION
SELECT 'users_active', count(*)
FROM cwd_user, cwd_user_attributes
WHERE cwd_user_attributes.user_id = cwd_user.id
AND cwd_user_attributes.attribute_name = 'login.previousLoginMillis'
UNION
select 'roles', count(*) as roles from projectrole
UNION
select 'dashboards', count(*) as dashboards from portalpage
UNION
select 'plugins', count(*) as plugins from pluginstate where pluginenabled = 'true'
UNION
select 'actions', count(*) as actions from jiraaction
UNION
select 'issuetypes', count(*) as issuetype from issuetype
UNION
select 'statuses', count(*) as issuestatus from issuestatus
UNION
select 'issuetypescreenschemes', count(*) from issuetypescreenscheme
UNION
select 'issuelinktypes', count(*) from issuelinktype
UNION
select 'fieldscreenschemes', count(*) from fieldscreenscheme
UNION
select 'fieldscreens', count(*) from fieldscreen
UNION
select 'fieldlayouts', count(*) from fieldlayout
UNION
select 'fieldlayoutschemes', count(*) from fieldlayoutscheme
UNION
select 'fieldconfigscheme', count(*) from fieldconfigscheme
UNION
select 'changegroup', count(*) from changegroup
UNION
select 'changeitem', count(*) from changeitem
UNION
select 'agileboards', count(*) from "AO_60DB71_RAPIDVIEW"
UNION
select 'attachments', count(*) as attachments from fileattachment
UNION
select 'attachments_gb', round(sum(filesize)/1024/1024/1024) as attachments_gb from fileattachment
order by metric
;
"""
class CustomUpdater(object):
"""The methods both update and insert elements in the table as folows:
UPDATE table SET some_column='something' WHERE another_column='something else';
INSER INTO table (some_column) 'something' WHERE NOT EXISTS (SELECT 1 FROM table WHERE another_column='something else')
"""
def __init__(self, stats_uri=None, activedirectory_uri=None):
if stats_uri is not None:
self.engine = create_engine(stats_uri, convert_unicode=True)
if activedirectory_uri is not None:
self.ad = ActiveDirectory(activedirectory_uri, paged_size=1000, size_limit=50000)
self.fields = ['mail', 'title', 'manager', 'distinguishedName', 'postalCode', 'telephoneNumber', 'givenName', 'name', 'facsimileTelephoneNumber',
'department', 'company', 'streetAddress', 'sAMAccountType', 'mobile', 'c', 'l', 'st', 'extensionAttribute14',
'extensionAttribute15', 'extensionAttribute3', 'sAMAccountName', 'userAccountControl']
self.sql_names = ['mail', 'title', 'managerdn', 'distinguishedname', 'postalcode', 'phone', 'givenname', 'name', 'fax',
'department', 'company', 'streetaddress', 'samaccounttype', 'mobile', 'country', 'locale', 'state', 'vp',
'region', 'office', 'username', 'useraccountcontrol']
self.sql_times = ['created', 'changed']
self.time_fields = ['whenCreated', 'whenChanged']
self.exists = None
self.elem_dict = {}
self.users = []
"""Updates all the fields in custom.stats"""
def update_stats(self):
try:
self.engine.execute('INSERT INTO custom.stats (date) (SELECT CURRENT_DATE);')
except Exception:
pass
for row in self.engine.execute(jira_stats):
self.elem_dict[str(row[0])] = row[1]
for key, value in self.elem_dict.iteritems():
update_query = 'UPDATE custom.stats SET %s=%s WHERE date=CURRENT_DATE;' % (key, value)
self.engine.execute(update_query)
"""Updates most of the fields in custom.activedirectory
The method gets all the attributes for each user whose account was modified since the day of the last update
and parses those attributes to meet the fields in the table"""
def update_activedirectory(self, full=False):
if full:
newf = None
else:
newf = "(whenChanged>=" + self.get_max_date_ad() + ")"
self.users = self.ad.get_users(new_filter=newf, attrlist=self.fields)
logging.info('Found %s users in AD using filter = %s' % (len(self.users), newf))
if not self.users:
raise NotImplemented("WTH")
for count, user in enumerate(self.users):
if count % 100 == 0:
logging.info("%s..." % count)
#print count, user
try:
atr = self.users[user]
except NotImplementedError as e:
logging.error("Skipped user %s because %s" % (user, e))
continue
update_query = 'UPDATE custom.activedirectory SET counter = counter+1 '
for i in range(len(self.fields)):
update_query = self.update_fields(update_query, atr, self.fields[i], self.sql_names[i])
update_query = self.update_times(update_query, atr)
if int(atr['userAccountControl']) & 0x02:
update_query += ', is_active=\'false\''
else:
update_query += ', is_active=\'true\''
update_query += ' WHERE username=\'' + user + '\';'
insert_query = 'INSERT INTO custom.activedirectory ('
first = True
for i in range(len(self.sql_names)):
try:
atr[self.fields[i]]
if not first:
insert_query += ','
insert_query += self.sql_names[i]
first = False
except (IndexError, KeyError):
pass
for i in range(len(self.sql_times)):
try:
atr[self.time_fields[i]]
insert_query += ', ' + self.sql_times[i]
except (IndexError, KeyError):
pass
# UPSERT implementation based on http://stackoverflow.com/a/6527838/99834
insert_query += ',is_active) SELECT '
insert_query = self.insert_fields(insert_query, atr)
insert_query = self.insert_times(insert_query, atr)
if int(atr['userAccountControl']) & 0x02:
insert_query += ',\'false\''
else:
insert_query += ',\'true\''
insert_query += ' WHERE NOT EXISTS (SELECT 1 FROM custom.activedirectory WHERE username= \''\
+ self.escape_quote(user) + '\');'
self.engine.execute(update_query)
self.engine.execute(insert_query)
# updating managers, LDAP returns DN instead of username for managers
# we look for all mana
"""Checks the deleted users from ldap by comparing the users from ldap with those from the database"""
def update_deleted(self):
sql_user = []
for row in self.engine.execute("SELECT samaccountname FROM custom.activedirectory WHERE is_deleted = 'false' ORDER BY samaccountname"):
if row[0]:
sql_user.append(row[0].encode('utf-8'))
self.users = self.ad.get_users()
for i in sql_user:
if not i in self.users:
logging.info("User %s was deleted from LDAP" % i)
self.engine.execute("UPDATE custom.activedirectory SET is_deleted = 'true' and deleted = now() where username = '%s'" % i)
"""Creates the url that should exist if the user has a gravatar picture conected with his email.
Then it checks if the url exists"""
def check_gravatar(self):
return # TODO: re-enable gravator check
self.users = self.ad.get_users()
for count, user in enumerate (self.users):
atr = self.ad.get_attributes(user = user)
try:
email = atr['mail']
default = 'http://www.gravatar.com/avatar/'
size = 40
gravatar_url = "http://www.gravatar.com/avatar/" + hashlib.md5(email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'d':default, 's':str(size)})
try:
u = self.find_matches(gravatar_url)
if len(u) == 0:
has_avatar = 'true'
else:
has_avatar = 'false'
except (urllib2.HTTPError, urllib2.URLError):
has_avatar = 'false'
except (IndexError, KeyError, TypeError):
has_avatar = 'false'
self.engine.execute('UPDATE custom.activedirectory SET has_gravatar=\'%s\' WHERE username=\'%s\';' % (has_avatar, user))
def find_matches(self, newu):
urls = []
urls.append('http://www.gravatar.com/avatar/64908bc7260a8be06b142d34f83b9781?s=40&d=http%3A%2F%2Fwww.gravatar.com%2Favatar%2F')
urls.append(newu)
d = {}
url_contents = {}
matches = []
for url in urls:
c = urllib2.urlopen(url)
url_contents[url] = []
while 1:
r = c.read(4096)
if not r: break
md5 = hashlib.md5(r).hexdigest()
url_contents[url].append(md5)
if md5 in d:
url2 = d[md5]
matches.append((md5, url, url2))
else:
d[md5] = []
d[md5].append(url)
return matches
def update_all(self, full=False):
"""Updates all the fields in all the custom tables"""
logging.info("Updating changes from AD...")
self.update_activedirectory(full=full)
for row in self.engine.execute('SELECT CURRENT_DATE'):
current_date = str(row[0])
current_date = current_date[:10]
break
for row in self.engine.execute('SELECT MAX(gravatar_check_date) FROM custom.activedirectory;'):
check_date = str(row[0])
check_date = check_date[:10]
break
if check_date == current_date:
self.check_gravatar()
self.update_stats()
logging.info("Updating deleted accounts...")
self.update_deleted() # must be before managers!
logging.info("Updating managers...")
self.update_managers()
def update_managers(self):
"""
This will populate the manager field with the username of the manager, based on the managerdn (the field returned by ldap)
:return:
"""
for row in self.engine.execute("""select ad.username, ad.manager as oldmanager, ad2.username as newmanager
from custom.activedirectory ad
left join custom.activedirectory ad2 on ad.managerdn = ad2.distinguishedname and NOT ad2.is_deleted
where ad.managerdn is not NULL AND ad.manager != ad2.username
--and ad.manager != ad2.username
--limit 100;"""):
(username, oldmanager, newmanager) = row
self.engine.execute("UPDATE custom.activedirectory SET manager='%s' where username='%s'" % (newmanager, username))
def update_fields(self, update_query, atr, varname, sql_name):
"""Updates the update_query string with the fields that don't require special parsing"""
try:
atr[varname]
update_query += ', ' + sql_name + "='" + self.escape_quote(atr[varname]).encode('utf-8') + "'"
except (IndexError, KeyError):
pass
return update_query
def insert_fields(self, insert_query, atr):
"""Updates the insert_query string with the same fields as the ones above"""
first = True
for i in range(len(self.sql_names)):
try:
atr[self.fields[i]]
if not first:
insert_query += ','
insert_query += '\'' + self.escape_quote(atr[self.fields[i]]).encode('utf-8') + '\''
first = False
except (IndexError, KeyError):
pass
return insert_query
def update_times(self, update_query, atr):
"""Updates the update_query string with the fields that require special parsing (date variables)"""
for i in range(len(self.time_fields)):
try:
update_query += ', ' + self.sql_times[i] + '=\'' + self.convert_date(atr[self.time_fields[i]]).encode('utf-8') + '\''
except (IndexError, KeyError):
pass
return update_query
def insert_times(self, insert_query, atr):
"""Same as the above just for insert_query"""
for i in range(len(self.sql_times)):
try:
atr[self.time_fields[i]]
insert_query += ', \'' + self.convert_date(atr[self.time_fields[i]]).encode('utf-8') + '\''
except (IndexError, KeyError):
pass
return insert_query
def escape_quote(self, string):
"""Escapes the quotes in a string with double quote:
someone's string => someone''s string"""
new_str = string
count = 0
for i in range(len(string)):
if string[i] == '\'':
new_str = new_str[:count] + '\'' + string[i:]
count += 1
count += 1
return new_str
def get_max_date_ad(self):
"""Determines the last date at which the table was updated.
Finds the last date at which an account from the table was updated
and returns that date"""
for row in self.engine.execute("SELECT MAX(changed) FROM custom.activedirectory"):
date = row[0]
break
date = (str(date)).split('-')
if len(date) != 3 or len(date[0]) != 4 or len(date[1]) != 2 or len(date[2]) != 2:
logging.fatal("Couldn't get maximum date from custom.activedirectory")
sys.exit(1)
max_date = date[0] + date[1] + date[2] + "000000.0Z"
return max_date
def convert_date(self, string):
"""Converts date from the ldap timestamp to the sql timestamp
20010101121212.0Z => 2001-01-01 """
string = string[:8]
if len(string) != 8:
return None
try:
int(string)
res = string[:4] + '-' + string[4:6] + '-' + string[6:]
return res
except ValueError:
return None
def main():
logging_format = "%(asctime).19s %(levelname)8s %(message)s"
logging.basicConfig(
level=logging.INFO,
format=logging_format,
#filename='%s.log' % JIRA_PROJECT,
#mode="w"
)
if 'LDAP2DB_DB_URI' not in os.environ or 'LDAP2DB_AD_URI' not in os.environ:
logging.fatal("""You need to set configuration using environment variables.
LDAP2DB_DB_URI='postgresql+pg8000://dbuser:dbpass@db.example.com/dbname'
LDAP2DB_AD_URI='ldaps://pdc.example.com:3269/dc=example,dc=com'
""")
sys.exit(1)
db_uri = os.environ['LDAP2DB_DB_URI']
ad_uri = os.environ['LDAP2DB_AD_URI']
custom = CustomUpdater(
stats_uri=db_uri,
activedirectory_uri=ad_uri)
custom.update_all(full=False)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1773893 | <reponame>UST-QuAntiL/Quokka<filename>api/controller/algorithms/algorithm_controller.py<gh_stars>0
from flask_smorest import Blueprint
from ...model.circuit_response import CircuitResponseSchema
from ...model.algorithm_request import (
HHLAlgorithmRequestSchema,
HHLAlgorithmRequest,
QAOAAlgorithmRequestSchema,
QAOAAlgorithmRequest,
)
blp = Blueprint(
"algorithms",
__name__,
url_prefix="/algorithms",
description="get quantum circuit algorithms",
)
@blp.route("/hhl", methods=["POST"])
@blp.arguments(
HHLAlgorithmRequestSchema,
example=dict(matrix=[[1.5, 0.5], [0.5, 1.5]], vector=[0, 1]),
)
@blp.response(200, CircuitResponseSchema)
def algorithm(json: HHLAlgorithmRequest):
if json:
return
@blp.route("/qaoa", methods=["POST"])
@blp.arguments(
QAOAAlgorithmRequestSchema,
example=dict(
pauli_op_string="0.5 * ((I^Z^Z) + (Z^I^Z) + (Z^Z^I))",
reps=2,
gammas=[1.0, 1.2],
betas=[0.4, 0.7],
),
)
@blp.response(200, CircuitResponseSchema)
def algorithm(json: QAOAAlgorithmRequest):
if json:
return None
| StarcoderdataPython |
4811021 | """
Author: <NAME>
Date: September 7th 2020
Class for the defining a molecule object, and
being able to grab molecule features.
Molecule objects contain:
- XYZ geometry
- Atom list
- Charge
- Multiplicity
"""
import sys
import os
import logging
import numpy as np
class Molecule():
"""
Class contains properties of molecule and
functions capable of returning specific features
of a molecule object.
Molecule objects require
1. structure
2. charge
3. multiplicity
"""
def __init__(self, charge, mult, xyzStructure):
self.charge = charge
self.mult = mult
self.xyzStructure = xyzStructure
def get_atoms(self):
"""
Function grabs the number of atoms in a molecules, and the
element symbol for each atom in order.
Ex: Water molecule input structure = ["O", 0.60161, 1.68925, -0.00684,
"H", 1.56949, 1.64563, 0.00906,
"H", 0.32276, 0.81732, 0.31087]
Returns 3 (natoms), ["O", "H", "H"] (atoms)
"""
self.natoms = len(self.xyzStructure) // 4
self.atoms = self.xyzStructure[0:len(self.xyzStructure):4]
return self.natoms, self.atoms
def get_coords(self):
"""
Function grabs only the xyz coordinates from the molecule, ommiting the
atoms element label.
"""
formattedXYZ = reshape_geom(self, self.xyzStructure)
self.coords = formattedXYZ[:, 1:4].astype(float)
return self.coords
def get_charge(self):
# function returns molecules charge
return self.charge
def get_mult(self):
# function returns molecules multiplicity
return self.mult
def reshape_geom(self, structure):
"""
Reformats geometry from a list of coordinates into a natoms x 4 array
column 1 = atom label (C, O, H, etc.)
columns 2-4 = X, Y, Z coordinates for each atom
Ex:
input structure = ["O", 0.60161, 1.68925, -0.00684, "H", 1.56949, 1.64563, 0.00906, "H", 0.32276, 0.81732, 0.31087]
Formatted structure =
"O", 0.60161, 1.68925, -0.00684,
"H", 1.56949, 1.64563, 0.00906,
"H", 0.32276, 0.81732, 0.31087
"""
natoms = len(structure) // 4
formattedStructure = np.reshape(structure, (natoms, 4))
return formattedStructure
| StarcoderdataPython |
1703010 | <reponame>derpyninja/nlp4cciwr
# -*- coding: utf-8 -*-
import os
import logging
import textacy
import textacy.tm
from tqdm import tqdm
class TopicModelPermutation:
def __init__(self, grp_term_matrix, vectorizer, version=None):
# matrix & model
self.grp_term_matrix = grp_term_matrix
self.vectorizer = vectorizer
self.version = version
# sklearn.decomposition.<model>
self.model_types = ["nmf", "lsa"]
# cols = number of topics in the model to be initialized
self.n_topics_list = [2, 3, 4, 5, 6, 7, 8, 9]
# rows = number of terms
self.n_terms_list = [10, 30, 50]
def calc(self, model_dir=None, figure_dir=None, save=True, plot=True):
logger = logging.getLogger(__name__)
logger.info("Topic modelling permutation.")
for model_type in tqdm(self.model_types):
for n_topics in self.n_topics_list:
for n_terms in self.n_terms_list:
# init model
model = textacy.tm.TopicModel(
model=model_type, n_topics=n_topics, n_jobs=-1
)
# fit model
model.fit(self.grp_term_matrix)
# transform group-term matrix to group-topic matrix
model.transform(self.grp_term_matrix)
# save model to disk
if save:
model.save(
os.path.join(
model_dir,
"BBC_2007_07_04_CORPUS_TEXTACY_{}_TM_{}_{}x{}.{}".format(
self.version,
model_type.upper(),
n_topics,
n_terms,
"pkl",
),
)
)
# termite plot
if plot:
model.termite_plot(
doc_term_matrix=self.grp_term_matrix,
id2term=self.vectorizer.id_to_term,
topics=-1,
n_terms=n_terms,
sort_topics_by="index",
rank_terms_by="topic_weight",
sort_terms_by="seriation",
save=os.path.join(
figure_dir,
"BBC_2007_07_04_CORPUS_TEXTACY_{}_TM_{}_{}x{}.{}".format(
self.version,
model_type.upper(),
n_topics,
n_terms,
"png",
),
),
rc_params={"dpi": 300},
)
| StarcoderdataPython |
43342 | #
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from metakernel import Magic
class SASsessionMagic(Magic):
def __init__(self, *args, **kwargs):
super(SASsessionMagic, self).__init__(*args, **kwargs)
def line_SASsession(self, *args):
"""
SAS Kernel magic allows a programatic way to submit configuration
details.
This magic is only available within the SAS Kernel
"""
if len(args) > 1:
args = ''.join(args)
elif len(args) == 1:
args = ''.join(args[0])
args = args.replace(' ', '')
args = args.replace('"', '')
args = args.replace("'", '')
sess_params = dict(s.split('=') for s in args.split(','))
self.kernel._allow_stdin = True
self.kernel._start_sas(**sess_params)
def register_magics(kernel):
kernel.register_magics(SASsessionMagic)
def register_ipython_magics():
from metakernel import IPythonKernel
from IPython.core.magic import register_line_magic
kernel = IPythonKernel()
magic = SASsessionMagic(kernel)
# Make magics callable:
kernel.line_magics["SASsession"] = magic
@register_line_magic
def SASsession(line):
kernel.call_magic("%SASsession " + line)
| StarcoderdataPython |
1639175 | <gh_stars>1-10
def digitToword(num):
switcher = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine'
}
return switcher.get(num)
def numberToword(num):
if(num == 0):
return
x = num%10
numberToword(num//10)
print(digitToword(x),end = " ")
return
n = int(input())
numberToword(n)
| StarcoderdataPython |
190632 | #!/usr/bin/env python
from flask import Flask, jsonify
from alexa_agent import AlexaAgent
app = Flask(__name__)
@app.route('/tell-me-a-joke', methods=['GET'])
def tell_me_a_joke():
agent = AlexaAgent()
agent.wakeup()
agent.ask('tell me a joke')
return jsonify({'code': 200, 'message': 'Was that joke funny?'})
@app.route('/morning-report', methods=['GET'])
def morning_report():
agent = AlexaAgent()
agent.wakeup()
agent.ask([
"What is today's date",
"What time is it",
"How's the weather"
])
return jsonify({'code': 200, 'message': 'Morning report delivered!'})
if __name__ == '__main__':
app.run(port=8888, debug=True)
| StarcoderdataPython |
3202664 | from setuptools import setup
setup(
name='mi',
packages=['mi'],
install_requires=[
'docopt', 'cached-property', 'sqlalchemy',
],
entry_points="""
[console_scripts]
mi=mi.main:entry_point
""",
)
| StarcoderdataPython |
3308575 | """
Entradas
Lectura de la factura-->float-->L
Costo kilovatio-->float-->CK
Salidas
Monto total de la factura-->float-->MT
"""
L=float(input("Ingrese la lectura de su factura: "))
CK=float(input("ingrese el costo del kilovatio: "))
MT=(L*CK)
print("Monto total de su factura: "+str(MT)) | StarcoderdataPython |
115974 | from interpolator import interpolate, interpolation_printer
def f(x, y, z):
return x*x*y*3 + x*x*2 + x*y*6 - x*13 - y*235 - 3351
xs = [2,3,5]
ys = [0,1]
zs = [3]
ps = (xs, ys, zs)
res = interpolate(f, ps)
interpolation_printer(res, tuple(map(len, ps)), 'xyz')
def g(x):
return sum(range(x + 1)) # equals to x(x+1)/2
res = interpolate(g, (range(10),))
interpolation_printer(res, (10,)) | StarcoderdataPython |
1727595 | import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import pyspark.sql.functions as F
from pyspark import SparkContext
# from operator import add
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
from pyspark.sql.types import StringType, IntegerType
from awsglue.dynamicframe import DynamicFrame
## @params: [JOB_NAME]add
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
# spark context
sc = SparkContext()
# glue context
glueContext = GlueContext(sc)
# spark
spark = glueContext.spark_session
# job
job = Job(glueContext)
# initialize job
job.init(args['JOB_NAME'], args)
# glue context
read_gs = glueContext.create_dynamic_frame.from_catalog(
database="google_scholar",
table_name="read")
print("Count: ", read_gs.count())
# print schema
read_gs.printSchema()
# convert to df
df_gs = read_gs.toDF()
# "author_name", "email", "affiliation", "coauthors_names", "research_interest"
df_gs.show(10)
# research_interest can't be None
df_gs_clean = df_gs.filter("col4 != 'None'")
# referring Column Names
rdd_ri = df_gs_clean.rdd.map(lambda x: (x["col4"]))
print("\nSample RDD rows:")
print(rdd_ri.take(5))
print("\nSample RDD rows after frequenc count for each words:")
# flatMap() helps to apply transformation
rdd_ri_freq = rdd_ri.flatMap(lambda x: [(w.lower(), 1) for w in x.split('##')]).reduceByKey(lambda a, b: a + b)
# rdd print with take() function
print(rdd_ri_freq.take(5))
# convert to df with schema
schema = StructType([StructField("ri", StringType(), False),
StructField("frequency", IntegerType(), False)
])
# convert rdd to df with schema
df = spark.createDataFrame(rdd_ri_freq, schema)
print("\nProposed Schema of DF:")
# print schema (to verify)
df.printSchema()
print("\nRDD converted to DF with schema:")
# sort
df_sort = df.sort(F.col("frequency").desc())
df_sort.show(10, truncate=False)
# just one csv file
df_sort = df_sort.repartition(1)
# create a dyanmic frame (equivalent to df) in glue context
dynamic_frame_write = DynamicFrame.fromDF(df_sort, glueContext, "dynamic_frame_write")
# path for output file
path_s3_write= "s3://google-scholar-csv/write/"
# write to s3
glueContext.write_dynamic_frame.from_options(
frame = dynamic_frame_write,
connection_type = "s3",
connection_options = {
"path": path_s3_write
},
format = "csv",
format_options={
"quoteChar": -1,
"separator": "|"
}
)
job.commit()
| StarcoderdataPython |
3357917 | """
Some key layers used for constructing a Capsule Network. These layers can used to construct CapsNet on other dataset,
not just on MNIST.
Author: <NAME>, E-mail: `<EMAIL>`, Github: `https://github.com/XifengGuo/CapsNet-Pytorch`
"""
import torch.nn.functional as F
import torch
from torch import nn
from torch.autograd import Variable
from . import TorchModel, NUM_GESTURES
import numpy as np
def squash(inputs, axis=-1):
"""
The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0
:param inputs: vectors to be squashed
:param axis: the axis to squash
:return: a Tensor with same size as inputs
"""
norm = torch.norm(inputs, p=2, dim=axis, keepdim=True)
scale = norm**2 / (1 + norm**2) / (norm + 1e-8)
return scale * inputs
class DenseCapsule(nn.Module):
"""
The dense capsule layer. It is similar to Dense (FC) layer. Dense layer has `in_num` inputs, each is a scalar, the
output of the neuron from the former layer, and it has `out_num` output neurons. DenseCapsule just expands the
output of the neuron from scalar to vector. So its input size = [None, in_num_caps, in_dim_caps] and output size = \
[None, out_num_caps, out_dim_caps]. For Dense Layer, in_dim_caps = out_dim_caps = 1.
:param in_num_caps: number of cpasules inputted to this layer
:param in_dim_caps: dimension of input capsules
:param out_num_caps: number of capsules outputted from this layer
:param out_dim_caps: dimension of output capsules
:param routings: number of iterations for the routing algorithm
"""
def __init__(self, in_num_caps, in_dim_caps, out_num_caps, out_dim_caps, routings=3):
super(DenseCapsule, self).__init__()
self.in_num_caps = in_num_caps
self.in_dim_caps = in_dim_caps
self.out_num_caps = out_num_caps
self.out_dim_caps = out_dim_caps
self.routings = routings
self.weight = nn.Parameter(0.01 * torch.randn(out_num_caps, in_num_caps, out_dim_caps, in_dim_caps))
def forward(self, x):
# x.size=[batch, in_num_caps, in_dim_caps]
# expanded to [batch, 1, in_num_caps, in_dim_caps, 1]
# weight.size =[ out_num_caps, in_num_caps, out_dim_caps, in_dim_caps]
# torch.matmul: [out_dim_caps, in_dim_caps] x [in_dim_caps, 1] -> [out_dim_caps, 1]
# => x_hat.size =[batch, out_num_caps, in_num_caps, out_dim_caps]
x_hat = torch.squeeze(torch.matmul(self.weight, x[:, None, :, :, None]), dim=-1)
# In forward pass, `x_hat_detached` = `x_hat`;
# In backward, no gradient can flow from `x_hat_detached` back to `x_hat`.
x_hat_detached = x_hat.detach()
# The prior for coupling coefficient, initialized as zeros.
# b.size = [batch, out_num_caps, in_num_caps]
b = Variable(torch.zeros(x.size(0), self.out_num_caps, self.in_num_caps)).cuda()
assert self.routings > 0, 'The \'routings\' should be > 0.'
for i in range(self.routings):
# c.size = [batch, out_num_caps, in_num_caps]
c = F.softmax(b, dim=1)
# At last iteration, use `x_hat` to compute `outputs` in order to backpropagate gradient
if i == self.routings - 1:
# c.size expanded to [batch, out_num_caps, in_num_caps, 1 ]
# x_hat.size = [batch, out_num_caps, in_num_caps, out_dim_caps]
# => outputs.size= [batch, out_num_caps, 1, out_dim_caps]
outputs = squash(torch.sum(c[:, :, :, None] * x_hat, dim=-2, keepdim=True))
# outputs = squash(torch.matmul(c[:, :, None, :], x_hat)) # alternative way
else: # Otherwise, use `x_hat_detached` to update `b`. No gradients flow on this path.
outputs = squash(torch.sum(c[:, :, :, None] * x_hat_detached, dim=-2, keepdim=True))
# outputs = squash(torch.matmul(c[:, :, None, :], x_hat_detached)) # alternative way
# outputs.size =[batch, out_num_caps, 1, out_dim_caps]
# x_hat_detached.size=[batch, out_num_caps, in_num_caps, out_dim_caps]
# => b.size =[batch, out_num_caps, in_num_caps]
b = b + torch.sum(outputs * x_hat_detached, dim=-1)
return torch.squeeze(outputs, dim=-2)
class PrimaryCapsule(nn.Module):
"""
Apply Conv2D with `out_channels` and then reshape to get capsules
:param in_channels: input channels
:param out_channels: output channels
:param dim_caps: dimension of capsule
:param kernel_size: kernel size
:return: output tensor, size=[batch, num_caps, dim_caps]
"""
def __init__(self, in_channels, out_channels, dim_caps, kernel_size, stride=1, padding=0):
super(PrimaryCapsule, self).__init__()
self.dim_caps = dim_caps
self.conv1d = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
def forward(self, x):
outputs = self.conv1d(x)
outputs = outputs.view(x.size(0), -1, self.dim_caps)
outputs = squash(outputs)
return outputs
class Structure(nn.Module):
"""
A Capsule Network on MNIST.
:param input_size: data size = [channels, width, height]
:param classes: number of classes
:param routings: number of routing iterations
Shape:
- Input: (batch, channels, width, height), optional (batch, classes) .
- Output:((batch, classes), (batch, channels, width, height))
"""
def __init__(self, input_size, classes, routings, conv_filters = 256, conv_kernel_size= 9, conv_stride= 1, conv_padding= 0, primcaps_filters= 256, primcaps_kernel_size= 5, primcaps_stride= 2, primcaps_padding= 0, primcaps_num= 8, digitcaps_dim= 16):
super(Structure, self).__init__()
self.input_size = input_size
self.classes = classes
self.routings = routings
# Layer 0: Batch Norm
self.batch_norm1 = torch.nn.BatchNorm1d(np.product(input_size))
# Layer 1: Just a conventional Conv1D layer
self.conv = nn.Conv1d(input_size[0], conv_filters, kernel_size=conv_kernel_size,
stride=conv_stride, padding=conv_padding)
self.conv_shape = [conv_filters, int(np.floor(
(input_size[1] - conv_kernel_size + 2 * conv_padding) / conv_stride) + 1)]
# Layer 1.0: Batch Norm
self.batch_norm2 = torch.nn.BatchNorm1d(np.product(self.conv_shape))
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_caps, dim_caps]
self.primarycaps = PrimaryCapsule(self.conv_shape[0], primcaps_filters,
dim_caps=primcaps_num,
kernel_size=primcaps_kernel_size,
stride=primcaps_stride, padding=primcaps_padding)
self.primcaps_shape = [int(np.product([primcaps_filters, int(np.floor(
(self.conv_shape[1] - primcaps_kernel_size + 2 * primcaps_padding) /
primcaps_stride) + 1)]) / primcaps_num), primcaps_num]
# Layer 2.0: Batch Norm
self.batch_norm3 = torch.nn.BatchNorm1d(np.product(self.primcaps_shape))
# Layer 3: Capsule layer. Routing algorithm works here.
self.digitcaps = DenseCapsule(in_num_caps=self.primcaps_shape[0], in_dim_caps=self.primcaps_shape[1],
out_num_caps=classes, out_dim_caps=digitcaps_dim, routings=routings)
# Decoder network.
self.decoder = nn.Sequential(
nn.Linear(digitcaps_dim * classes, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, np.product(input_size)),
nn.Sigmoid()
)
self.relu = nn.ReLU()
def forward(self, x, y=None):
import matplotlib.pyplot as plt
if x.shape[0] > 1:
x = self.batch_norm1(x.flatten(1)).view(*x.shape)
x = self.relu(self.conv(x))
#img = x.cpu().detach().numpy()[0]
#plt.imshow(img)
#plt.savefig('conv.svg')
if x.shape[0] > 1:
x = self.batch_norm2(x.flatten(1)).view(*x.shape)
x = self.primarycaps(x)
if x.shape[0] > 1:
x = self.batch_norm3(x.flatten(1)).view(*x.shape)
x = self.digitcaps(x)
length = x.norm(dim=-1)
index = length.max(dim=1)[1]
if y is None: # during testing, no label given. create one-hot coding using `length`
y = Variable(torch.zeros(length.size()).scatter_(1, index.view(-1, 1).cpu().data, 1.).cuda())
reconstruction = self.decoder((x * y[:, :, None]).view(x.size(0), -1))
return length, reconstruction.view(-1, *self.input_size)
def loss(self, y_true, y_pred, x, x_recon, lam_recon):
"""
Capsule loss = Margin loss + lam_recon * reconstruction loss.
:param y_true: true labels, one-hot coding, size=[batch, classes]
:param y_pred: predicted labels by CapsNet, size=[batch, classes]
:param x: input data, size=[batch, channels, width, height]
:param x_recon: reconstructed data, size is same as `x`
:param lam_recon: coefficient for reconstruction loss
:return: Variable contains a scalar loss value.
"""
L = y_true * torch.clamp(0.9 - y_pred, min=0.) ** 2 + \
0.5 * (1 - y_true) * torch.clamp(y_pred - 0.1, min=0.) ** 2
L_margin = L.sum(dim=1).mean()
L_recon = nn.MSELoss()(x_recon, x)
return L_margin + lam_recon * L_recon
#
# Yet another variation of FullyConnectedNNV2, leveraging the CustomNet module
#
class CapsuleNet(TorchModel):
def define_model(self, dims):
model = Structure(dims, NUM_GESTURES, self.capsnet_routings, self.capsnet_conv_filters, self.capsnet_conv_kernel_size, \
self.capsnet_conv_stride, self.capsnet_conv_padding, self.capsnet_primcaps_filters,\
self.capsnet_primcaps_kernel_size, self.capsnet_primcaps_stride, self.capsnet_primcaps_padding,\
self.capsnet_primcaps_num, self.capsnet_digitcaps_dim)
return model
def forward_pass(self, sample):
targets = torch.LongTensor(sample[1].type(torch.LongTensor)).to(self.device)
if len(targets.shape) == 1:
targets = torch.zeros(targets.size(0), NUM_GESTURES).to(self.device).scatter_(1, targets.view(-1, 1), 1.) # change to one-hot coding
input = sample[0].to(self.device).float()
predictions, reconstruction = self.model(input)
return self.model.loss(targets, predictions, input, reconstruction, self.capsnet_lam_recon), [predictions, reconstruction]
def on_start(self, state):
super(CapsuleNet, self).on_start(state)
self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=self.capsnet_lr_decay)
"""transform = torchvision.transforms.transforms.Compose([
#torchvision.transforms.transforms.ToPILImage(),
# torchvision.transforms.transforms.RandomCrop(size=(28, 28), padding=2),
torchvision.transforms.transforms.ToTensor(),
# torchvision.transforms.transforms.Normalize((0,), (1,))
])
# dataloaders
if state['train'] == True:
#self.train_features = (self.train_features-self.train_features.min()) / (self.train_features.max() - self.train_features.min())
state['iterator'] = torch.utils.data.DataLoader(
TorchDatasetIterator(self.train_features, self.train_labels, transform), batch_size=self.batch_size,
shuffle=True)
pass
else:
state['iterator'] = torch.utils.data.DataLoader(
TorchDatasetIterator(self.valid_features, self.valid_labels, transform), batch_size=self.batch_size,
shuffle=True)"""
def on_end_epoch(self, state):
self.lr_scheduler.step()
super(CapsuleNet, self).on_end_epoch(state) | StarcoderdataPython |
66646 | import itertools
from abc import abstractmethod, ABC
from typing import (
Sequence,
Any,
Iterable,
Dict,
)
from .helper import Offset2ID
from .... import Document
class BaseGetSetDelMixin(ABC):
"""Provide abstract methods and derived methods for ``__getitem__``, ``__setitem__`` and ``__delitem__``
.. note::
The following methods must be implemented:
- :meth:`._get_doc_by_id`
- :meth:`._set_doc_by_id`
- :meth:`._del_doc_by_id`
Keep in mind that these methods above ** must not ** handle offset2id of the DocumentArray.
These methods are actually wrapped by the following methods which handle the offset2id:
- :meth:`._set_doc`
- :meth:`._del_doc`
- :meth:`._del_all_docs`
Therefore, you should make sure to use the wrapper methods in case you expect offset2id to be updated, and use
the inner methods in case you don't want to handle offset2id (for example, if you want to handle it in a
later step)
Other methods implemented a generic-but-slow version that leverage the methods above.
Please override those methods in the subclass whenever a more efficient implementation is available.
Mainly, if the backend storage supports operations in batches, you can implement the following methods:
- :meth:`._get_docs_by_ids`
- :meth:`._set_docs_by_ids`
- :meth:`._del_docs_by_ids`
- :meth:`._clear_storage`
Likewise, the methods above do not handle offset2id. They are wrapped by the following methods that update the
offset2id in a single step:
- :meth:`._set_docs`
- :meth:`._del_docs`
- :meth:`._del_all_docs`
"""
# Getitem APIs
def _get_doc_by_offset(self, offset: int) -> 'Document':
return self._get_doc_by_id(self._offset2ids.get_id(offset))
@abstractmethod
def _get_doc_by_id(self, _id: str) -> 'Document':
...
def _get_docs_by_slice(self, _slice: slice) -> Iterable['Document']:
"""This function is derived from :meth:`_get_doc_by_offset`
Override this function if there is a more efficient logic
:param _slice: the slice used for indexing
:return: an iterable of document
"""
return self._get_docs_by_ids(self._offset2ids.get_id(_slice))
def _get_docs_by_offsets(self, offsets: Sequence[int]) -> Iterable['Document']:
"""This function is derived from :meth:`_get_doc_by_offset`
Override this function if there is a more efficient logic
:param offsets: the offsets used for indexing
:return: an iterable of document
"""
return (self._get_doc_by_offset(o) for o in offsets)
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""This function is derived from :meth:`_get_doc_by_id`
Override this function if there is a more efficient logic
:param ids: the ids used for indexing
:return: an iterable of document
"""
return (self._get_doc_by_id(_id) for _id in ids)
# Delitem APIs
def _del_doc_by_offset(self, offset: int):
self._del_doc_by_id(self._offset2ids.get_id(offset))
self._offset2ids.delete_by_offset(offset)
def _del_doc(self, _id: str):
self._offset2ids.delete_by_id(_id)
self._del_doc_by_id(_id)
@abstractmethod
def _del_doc_by_id(self, _id: str):
...
def _del_docs_by_slice(self, _slice: slice):
"""This function is derived and may not have the most efficient implementation.
Override this function if there is a more efficient logic
:param _slice: the slice used for indexing
"""
ids = self._offset2ids.get_id(_slice)
self._del_docs(ids)
def _del_docs_by_mask(self, mask: Sequence[bool]):
"""This function is derived and may not have the most efficient implementation.
Override this function if there is a more efficient logic
:param mask: the boolean mask used for indexing
"""
ids = list(itertools.compress(self._offset2ids, (_i for _i in mask)))
self._del_docs(ids)
def _del_all_docs(self):
self._clear_storage()
self._offset2ids = Offset2ID()
def _del_docs_by_ids(self, ids):
"""This function is derived from :meth:`_del_doc_by_id`
Override this function if there is a more efficient logic
:param ids: the ids used for indexing
"""
for _id in ids:
self._del_doc_by_id(_id)
def _del_docs(self, ids):
self._del_docs_by_ids(ids)
self._offset2ids.delete_by_ids(ids)
def _clear_storage(self):
"""This function is derived and may not have the most efficient implementation.
Override this function if there is a more efficient logic.
If you override this method, you should only take care of clearing the storage backend."""
for doc in self:
self._del_doc_by_id(doc.id)
# Setitem API
def _set_doc_by_offset(self, offset: int, value: 'Document'):
self._set_doc(self._offset2ids.get_id(offset), value)
def _set_doc(self, _id: str, value: 'Document'):
if _id != value.id:
self._offset2ids.update(self._offset2ids.index(_id), value.id)
self._set_doc_by_id(_id, value)
@abstractmethod
def _set_doc_by_id(self, _id: str, value: 'Document'):
...
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""This function is derived from :meth:`_set_doc_by_id`
Override this function if there is a more efficient logic
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
def _set_docs(self, ids, docs: Iterable['Document']):
docs = list(docs)
mismatch_ids = {_id: doc.id for _id, doc in zip(ids, docs) if _id != doc.id}
self._set_docs_by_ids(ids, docs, mismatch_ids)
self._offset2ids.update_ids(mismatch_ids)
def _set_docs_by_slice(self, _slice: slice, value: Sequence['Document']):
"""This function is derived and may not have the most efficient implementation.
Override this function if there is a more efficient logic
:param _slice: the slice used for indexing
:param value: the value docs will be updated to
:raises TypeError: error raised when right-hand assignment is not an iterable
"""
if not isinstance(value, Iterable):
raise TypeError(
f'You right-hand assignment must be an iterable, receiving {type(value)}'
)
ids = self._offset2ids.get_id(_slice)
self._set_docs(ids, value)
def _set_doc_value_pairs(
self, docs: Iterable['Document'], values: Sequence['Document']
):
docs = list(docs)
if len(docs) != len(values):
raise ValueError(
f'length of docs to set({len(docs)}) does not match '
f'length of values({len(values)})'
)
for _d, _v in zip(docs, values):
self._set_doc(_d.id, _v)
def _set_doc_value_pairs_nested(
self, docs: Iterable['Document'], values: Sequence['Document']
):
"""This function is derived and may not have the most efficient implementation.
Override this function if there is a more efficient logic
:param docs: the docs to update
:param values: the value docs will be updated to
"""
docs = list(docs)
if len(docs) != len(values):
raise ValueError(
f'length of docs to set({len(docs)}) does not match '
f'length of values({len(values)})'
)
for _d, _v in zip(docs, values):
if _d.id != _v.id:
raise ValueError(
'Setting Documents by traversal paths with different IDs is not supported'
)
_d._data = _v._data
if _d not in self:
root_d = self._find_root_doc_and_modify(_d)
else:
# _d is already on the root-level
root_d = _d
if root_d:
self._set_doc(root_d.id, root_d)
def _set_doc_attr_by_offset(self, offset: int, attr: str, value: Any):
"""This function is derived and may not have the most efficient implementation.
Override this function if there is a more efficient logic
:param offset: the offset used for indexing
:param attr: the attribute of document to update
:param value: the value doc's attr will be updated to
"""
_id = self._offset2ids.get_id(offset)
d = self._get_doc_by_id(_id)
if hasattr(d, attr):
setattr(d, attr, value)
self._set_doc(_id, d)
def _set_doc_attr_by_id(self, _id: str, attr: str, value: Any):
"""This function is derived and may not have the most efficient implementation.
Override this function if there is a more efficient logic
:param _id: the id used for indexing
:param attr: the attribute of document to update
:param value: the value doc's attr will be updated to
"""
if attr == 'id' and value is None:
raise ValueError(
'setting the ID of a Document stored in a DocumentArray to None is not allowed'
)
d = self._get_doc_by_id(_id)
if hasattr(d, attr):
setattr(d, attr, value)
self._set_doc(_id, d)
def _find_root_doc_and_modify(self, d: Document) -> 'Document':
"""Find `d`'s root Document in an exhaustive manner
:param: d: the input document
:return: the root of the input document
"""
from docarray import DocumentArray
for _d in self:
da = DocumentArray(_d)[...]
_all_ids = set(da[:, 'id'])
if d.id in _all_ids:
da[d.id].copy_from(d)
return _d
@abstractmethod
def _load_offset2ids(self):
...
@abstractmethod
def _save_offset2ids(self):
...
def __del__(self):
if hasattr(self, '_offset2ids'):
self._save_offset2ids()
| StarcoderdataPython |
3346434 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
All necessary for feature
"""
import tensorflow as tf
# model preprocess
model_preprocess = {
"DenseNet": tf.keras.applications.densenet.preprocess_input,
"EfficientNet": tf.keras.applications.efficientnet.preprocess_input,
"NasNet": tf.keras.applications.nasnet.preprocess_input,
"ResNet": tf.keras.applications.resnet_v2.preprocess_input,
}
| StarcoderdataPython |
1792794 | <reponame>gsi-upm/senpy<filename>senpy/blueprints.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Blueprints for Senpy
"""
from flask import (Blueprint, request, current_app, render_template, url_for,
jsonify, redirect)
from .models import Error, Response, Help, Plugins, read_schema, dump_schema, Datasets
from . import api
from .version import __version__
from functools import wraps
from .gsitk_compat import GSITK_AVAILABLE, datasets
import logging
import json
import base64
logger = logging.getLogger(__name__)
api_blueprint = Blueprint("api", __name__)
demo_blueprint = Blueprint("demo", __name__, template_folder='templates')
ns_blueprint = Blueprint("ns", __name__)
_mimetypes_r = {'json-ld': ['application/ld+json'],
'turtle': ['text/turtle'],
'ntriples': ['application/n-triples'],
'text': ['text/plain']}
MIMETYPES = {}
for k, vs in _mimetypes_r.items():
for v in vs:
if v in MIMETYPES:
raise Exception('MIMETYPE {} specified for two formats: {} and {}'.format(v,
v,
MIMETYPES[v]))
MIMETYPES[v] = k
DEFAULT_MIMETYPE = 'application/ld+json'
DEFAULT_FORMAT = 'json-ld'
def get_params(req):
if req.method == 'POST':
indict = req.form.to_dict(flat=True)
elif req.method == 'GET':
indict = req.args.to_dict(flat=True)
else:
raise Error(message="Invalid data")
return indict
def encode_url(url=None):
code = ''
if not url:
url = request.parameters.get('prefix', request.full_path[1:] + '#')
return code or base64.urlsafe_b64encode(url.encode()).decode()
def url_for_code(code, base=None):
# if base:
# return base + code
# return url_for('api.decode', code=code, _external=True)
# This was producing unique yet very long URIs, which wasn't ideal for visualization.
return 'http://senpy.invalid/'
def decoded_url(code, base=None):
path = base64.urlsafe_b64decode(code.encode()).decode()
if path[:4] == 'http':
return path
base = base or request.url_root
return base + path
@demo_blueprint.route('/')
def index():
# ev = str(get_params(request).get('evaluation', True))
# evaluation_enabled = ev.lower() not in ['false', 'no', 'none']
evaluation_enabled = GSITK_AVAILABLE
return render_template("index.html",
evaluation=evaluation_enabled,
version=__version__)
@api_blueprint.route('/contexts/<code>')
def context(code=''):
context = Response._context
context['@base'] = url_for('api.decode', code=code, _external=True)
context['endpoint'] = url_for('api.api_root', _external=True)
return jsonify({"@context": context})
@api_blueprint.route('/d/<code>')
def decode(code):
try:
return redirect(decoded_url(code))
except Exception:
return Error('invalid URL').flask()
@ns_blueprint.route('/') # noqa: F811
def index():
context = Response._context.copy()
context['endpoint'] = url_for('api.api_root', _external=True)
return jsonify({"@context": context})
@api_blueprint.route('/schemas/<schema>')
def schema(schema="definitions"):
try:
return dump_schema(read_schema(schema))
except Exception as ex: # Should be FileNotFoundError, but it's missing from py2
return Error(message="Schema not found: {}".format(ex), status=404).flask()
def basic_api(f):
default_params = {
'in-headers': False,
'expanded-jsonld': False,
'outformat': None,
'with-parameters': True,
}
@wraps(f)
def decorated_function(*args, **kwargs):
raw_params = get_params(request)
# logger.info('Getting request: {}'.format(raw_params))
logger.debug('Getting request. Params: {}'.format(raw_params))
headers = {'X-ORIGINAL-PARAMS': json.dumps(raw_params)}
params = default_params
mime = request.accept_mimetypes\
.best_match(MIMETYPES.keys(),
DEFAULT_MIMETYPE)
mimeformat = MIMETYPES.get(mime, DEFAULT_FORMAT)
outformat = mimeformat
try:
params = api.parse_params(raw_params, api.WEB_PARAMS, api.API_PARAMS)
outformat = params.get('outformat', mimeformat)
if hasattr(request, 'parameters'):
request.parameters.update(params)
else:
request.parameters = params
response = f(*args, **kwargs)
if 'parameters' in response and not params['with-parameters']:
del response.parameters
logger.debug('Response: {}'.format(response))
prefix = params.get('prefix')
code = encode_url(prefix)
return response.flask(
in_headers=params['in-headers'],
headers=headers,
prefix=prefix or url_for_code(code),
base=prefix,
context_uri=url_for('api.context',
code=code,
_external=True),
outformat=outformat,
expanded=params['expanded-jsonld'],
template=params.get('template'),
verbose=params['verbose'],
aliases=params['aliases'],
fields=params.get('fields'))
except (Exception) as ex:
if current_app.debug or current_app.config['TESTING']:
raise
if not isinstance(ex, Error):
msg = "{}".format(ex)
ex = Error(message=msg, status=500)
response = ex
response.parameters = raw_params
logger.exception(ex)
return response.flask(
outformat=outformat,
expanded=params['expanded-jsonld'],
verbose=params.get('verbose', True),
)
return decorated_function
@api_blueprint.route('/', defaults={'plugins': None}, methods=['POST', 'GET'])
@api_blueprint.route('/<path:plugins>', methods=['POST', 'GET'])
@basic_api
def api_root(plugins):
if plugins:
if request.parameters['algorithm'] != api.API_PARAMS['algorithm']['default']:
raise Error('You cannot specify the algorithm with a parameter and a URL variable.'
' Please, remove one of them')
plugins = plugins.replace('+', ',').replace('/', ',')
plugins = api.processors['string_to_tuple'](plugins)
else:
plugins = request.parameters['algorithm']
print(plugins)
sp = current_app.senpy
plugins = sp.get_plugins(plugins)
if request.parameters['help']:
apis = [api.WEB_PARAMS, api.API_PARAMS, api.NIF_PARAMS]
# Verbose is set to False as default, but we want it to default to
# True for help. This checks the original value, to make sure it wasn't
# set by default.
if not request.parameters['verbose'] and get_params(request).get('verbose'):
apis = []
if request.parameters['algorithm'] == ['default', ]:
plugins = []
allparameters = api.get_all_params(plugins, *apis)
response = Help(valid_parameters=allparameters)
return response
req = api.parse_call(request.parameters)
analyses = api.parse_analyses(req.parameters, plugins)
results = current_app.senpy.analyse(req, analyses)
return results
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
@basic_api
def evaluate():
if request.parameters['help']:
dic = dict(api.EVAL_PARAMS)
response = Help(parameters=dic)
return response
else:
params = api.parse_params(request.parameters, api.EVAL_PARAMS)
response = current_app.senpy.evaluate(params)
return response
@api_blueprint.route('/plugins/', methods=['POST', 'GET'])
@basic_api
def plugins():
sp = current_app.senpy
params = api.parse_params(request.parameters, api.PLUGINS_PARAMS)
ptype = params.get('plugin-type')
plugins = list(sp.analysis_plugins(plugin_type=ptype))
dic = Plugins(plugins=plugins)
return dic
@api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
@basic_api
def plugin(plugin):
sp = current_app.senpy
return sp.get_plugin(plugin)
@api_blueprint.route('/datasets/', methods=['POST', 'GET'])
@basic_api
def get_datasets():
dic = Datasets(datasets=list(datasets.values()))
return dic
| StarcoderdataPython |
1719050 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Help determine actual units of downloaded Spitzer images.
#
# <NAME>
# Created: 2019-09-11
# Last modified: 2019-09-11
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
import os, sys, time
star1 = {
'ra' : 63.8901815,
'dec' : -9.4344435,
'w2flx' : 775532.496,
'expt' : 26.8,
'ipeak' : 170.0,
'itotal': 393.0671,
}
star2 = {
'ra' : 63.8090503,
'dec' : -9.4921137,
'w2flx' : 94188959.652,
'expt' : 10.4,
'ipeak' : 6700.0,
'itotal': 40509.34, # SPITZER_I2_49755136_0004_0000_3_cbcd.fits.cat
}
def rcalc(datum):
return star2[datum] / star1[datum]
## Expected flux ratio for matching exposures:
#w2ratio = star2['w2flx'] / star1['w2flx']
w2ratio = rcalc('w2flx')
sys.stderr.write("W2 flux ratio: %10.3f\n" % w2ratio)
#pkratio = star2['ipeak'] / star1['ipeak']
pkratio = rcalc('ipeak')
imratio = rcalc('itotal')
sys.stderr.write("Star peak counts ratio: %10.3f\n" % pkratio)
sys.stderr.write("Star total count ratio: %10.3f\n" % imratio)
| StarcoderdataPython |
1648316 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Tuple
import numpy as np
import nnabla as nn
import nnabla.functions as F
def classification_loss_with_orthogonal_loss(
pred_logit: nn.Variable, label: nn.Variable, transformation_mat: nn.Variable, reg_weight=0.001
) -> Tuple[nn.Variable, Dict[str, nn.Variable]]:
"""classification loss with orthogonal loss
Args:
pred_logit (nn.Variable): pred logit, shape(batch, num_classes)
label (nn.Variable): label, shape(batch, 1)
transformation_mat (nn.Variable): label, shape(batch, K, K)
Returns:
Tuple[nn.Variable, Dict[str, nn.Variable]]: loss and internal loss
"""
cross_entropy_loss = F.softmax_cross_entropy(pred_logit, label)
classify_loss = F.mean(cross_entropy_loss)
# Enforce the transformation as orthogonal matrix
mat_squared = F.batch_matmul(
transformation_mat, F.transpose(transformation_mat, (0, 2, 1)))
batch_size, k, _ = transformation_mat.shape
target_array = np.tile(np.eye(k, dtype=np.float32), (batch_size, 1, 1))
target = nn.Variable.from_numpy_array(target_array)
mat_diff = mat_squared - target
# Frobenius norm
mat_diff = F.reshape(mat_diff, (batch_size, -1))
mat_loss = F.mean(F.norm(mat_diff, axis=1))
return classify_loss + mat_loss * reg_weight, {
"classify_loss": classify_loss,
"mat_loss": mat_loss,
"mat_diff": mat_diff,
}
| StarcoderdataPython |
11129 | #! /usr/bin/env python
# -*- coding:utf8 -*-
#
# pw_classes.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
from mediapack import from_yaml
from mediapack import Air, PEM, EqFluidJCA
from pyPLANES.utils.io import initialisation_out_files_plain
from pyPLANES.core.calculus import PwCalculus
from pyPLANES.core.multilayer import MultiLayer
from pyPLANES.pw.pw_layers import FluidLayer
from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking
Air = Air()
# def initialise_PW_solver(L, b):
# nb_PW = 0
# dofs = []
# for _layer in L:
# if _layer.medium.MODEL == "fluid":
# dofs.append(nb_PW+np.arange(2))
# nb_PW += 2
# elif _layer.medium.MODEL == "pem":
# dofs.append(nb_PW+np.arange(6))
# nb_PW += 6
# elif _layer.medium.MODEL == "elastic":
# dofs.append(nb_PW+np.arange(4))
# nb_PW += 4
# interface = []
# for i_l, _layer in enumerate(L[:-1]):
# interface.append((L[i_l].medium.MODEL, L[i_l+1].medium.MODEL))
# return nb_PW, interface, dofs
class PwProblem(PwCalculus, MultiLayer):
"""
Plane Wave Problem
"""
def __init__(self, **kwargs):
PwCalculus.__init__(self, **kwargs)
termination = kwargs.get("termination","rigid")
self.method = kwargs.get("termination","global")
MultiLayer.__init__(self, **kwargs)
self.kx, self.ky, self.k = None, None, None
self.shift_plot = kwargs.get("shift_pw", 0.)
self.plot = kwargs.get("plot_results", [False]*6)
self.result = {}
self.outfiles_directory = False
if self.method == "global":
self.layers.insert(0,FluidLayer(Air,1.e-2))
if self.layers[1].medium.MEDIUM_TYPE == "fluid":
self.interfaces.append(FluidFluidInterface(self.layers[0],self.layers[1]))
self.nb_PW = 0
for _layer in self.layers:
if _layer.medium.MODEL == "fluid":
_layer.dofs = self.nb_PW+np.arange(2)
self.nb_PW += 2
elif _layer.medium.MODEL == "pem":
_layer.dofs = self.nb_PW+np.arange(6)
self.nb_PW += 6
elif _layer.medium.MODEL == "elastic":
_layer.dofs = self.nb_PW+np.arange(4)
self.nb_PW += 4
def update_frequency(self, f):
PwCalculus.update_frequency(self, f)
MultiLayer.update_frequency(self, f, self.k, self.kx)
def create_linear_system(self, f):
self.A = np.zeros((self.nb_PW-1, self.nb_PW), dtype=complex)
i_eq = 0
# Loop on the interfaces
for _int in self.interfaces:
if self.method == "global":
i_eq = _int.update_M_global(self.A, i_eq)
# for i_inter, _inter in enumerate(self.interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = self.interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_fluid_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_fluid_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = self.interface_pem_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_pem_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_pem_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "elastic":
# if _inter[1] == "fluid":
# i_eq = self.interface_elastic_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_elastic_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_elastic_elastic(i_eq, i_inter, Layers, dofs, M)
# if self.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = self.interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1] )
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = self.interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif Layers[-1].medium.MODEL == "elastic":
# i_eq = self.interface_elastic_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif self.backing == "transmission":
# i_eq = self.semi_infinite_medium(M, i_eq, Layers[-1], dofs[-1] )
self.F = -self.A[:, 0]*np.exp(1j*self.ky*self.layers[0].d) # - is for transposition, exponential term is for the phase shift
self.A = np.delete(self.A, 0, axis=1)
# print(self.A)
X = LA.solve(self.A, self.F)
# print(X)
# R_pyPLANES_PW = X[0]
# if self.backing == "transmission":
# T_pyPLANES_PW = X[-2]
# else:
# T_pyPLANES_PW = 0.
# X = np.delete(X, 0)
# del(dofs[0])
# for i, _ld in enumerate(dofs):
# dofs[i] -= 2
# if self.plot:
# self.plot_sol_PW(X, dofs)
# out["R"] = R_pyPLANES_PW
# out["T"] = T_pyPLANES_PW
# return out
# class Solver_PW(PwCalculus):
# def __init__(self, **kwargs):
# PwCalculus.__init__(self, **kwargs)
# ml = kwargs.get("ml")
# termination = kwargs.get("termination")
# self.layers = []
# for _l in ml:
# if _l[0] == "Air":
# mat = Air
# else:
# mat = from_yaml(_l[0]+".yaml")
# d = _l[1]
# self.layers.append(Layer(mat,d))
# if termination in ["trans", "transmission","Transmission"]:
# self.backing = "Transmission"
# else:
# self.backing = backing.rigid
# self.kx, self.ky, self.k = None, None, None
# self.shift_plot = kwargs.get("shift_pw", 0.)
# self.plot = kwargs.get("plot_results", [False]*6)
# self.result = {}
# self.outfiles_directory = False
# initialisation_out_files_plain(self)
# def write_out_files(self, out):
# self.out_file.write("{:.12e}\t".format(self.current_frequency))
# abs = 1-np.abs(out["R"])**2
# self.out_file.write("{:.12e}\t".format(abs))
# self.out_file.write("\n")
# def interface_fluid_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_rigid(self, M, ieq, L, d):
# SV, k_y = fluid_SV(self.kx, self.k, L.medium.K)
# M[ieq, d[0]] = SV[0, 0]*np.exp(-1j*k_y*L.thickness)
# M[ieq, d[1]] = SV[0, 1]
# ieq += 1
# return ieq
# def semi_infinite_medium(self, M, ieq, L, d):
# M[ieq, d[1]] = 1.
# ieq += 1
# return ieq
# def interface_pem_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium, self.kx)
# for _i in range(6):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+0][4]] = SV_1[_i, 4]
# M[ieq, d[iinter+0][5]] = SV_1[_i, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[_i, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[_i, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[4, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[4, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[4, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[4, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[4, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[4, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = SV_2[3, 2]
# M[ieq, d[iinter+1][3]] = SV_2[3, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[3, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[3, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[0, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = SV_2[1, 2]
# M[ieq, d[iinter+1][3]] = SV_2[1, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[1, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[1, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = (SV_2[3, 0]-SV_2[4, 0])
# M[ieq, d[iinter+1][1]] = (SV_2[3, 1]-SV_2[4, 1])
# M[ieq, d[iinter+1][2]] = (SV_2[3, 2]-SV_2[4, 2])
# M[ieq, d[iinter+1][3]] = (SV_2[3, 3]-SV_2[4, 3])*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = (SV_2[3, 4]-SV_2[4, 4])*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = (SV_2[3, 5]-SV_2[4, 5])*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[3, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[3, 3]
# M[ieq, d[iinter+1][0]] = SV_2[5, 0]
# M[ieq, d[iinter+1][1]] = SV_2[5, 1]
# M[ieq, d[iinter+1][2]] = SV_2[5, 2]
# M[ieq, d[iinter+1][3]] = SV_2[5, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[5, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[5, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium,self.kx)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[1, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[1, 3]
# M[ieq, d[iinter+0][4]] = SV_1[1, 4]
# M[ieq, d[iinter+0][5]] = SV_1[1, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = (SV_1[3, 0]-SV_1[4, 0])*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = (SV_1[3, 1]-SV_1[4, 1])*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = (SV_1[3, 2]-SV_1[4, 2])*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = (SV_1[3, 3]-SV_1[4, 3])
# M[ieq, d[iinter+0][4]] = (SV_1[3, 4]-SV_1[4, 4])
# M[ieq, d[iinter+0][5]] = (SV_1[3, 5]-SV_1[4, 5])
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[5, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[5, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[5, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[5, 3]
# M[ieq, d[iinter+0][4]] = SV_1[5, 4]
# M[ieq, d[iinter+0][5]] = SV_1[5, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[3, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[3, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# for _i in range(4):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium, self.kx, self.omega)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[4, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[4, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[4, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[4, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[4, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[4, 5]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[3, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[3, 3]
# M[ieq, d[iinter+0][4]] = SV_1[3, 4]
# M[ieq, d[iinter+0][5]] = SV_1[3, 5]
# ieq += 1
# return ieq
# def interface_elastic_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium, self.kx, self.omega)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# ieq += 1
# return ieq
# def interface_elastic_rigid(self, M, ieq, L, d):
# SV, k_y = elastic_SV(L.medium,self.kx, self.omega)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]
# M[ieq, d[3]] = SV[1, 3]
# ieq += 1
# M[ieq, d[0]] = SV[3, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[3, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[3, 2]
# M[ieq, d[3]] = SV[3, 3]
# ieq += 1
# return ieq
# def interface_pem_rigid(self, M, ieq, L, d):
# SV, k_y = PEM_SV(L.medium, self.kx)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[1, 3]
# M[ieq, d[4]] = SV[1, 4]
# M[ieq, d[5]] = SV[1, 5]
# ieq += 1
# M[ieq, d[0]] = SV[2, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[2, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[2, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[2, 3]
# M[ieq, d[4]] = SV[2, 4]
# M[ieq, d[5]] = SV[2, 5]
# ieq += 1
# M[ieq, d[0]] = SV[5, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[5, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[5, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[5, 3]
# M[ieq, d[4]] = SV[5, 4]
# M[ieq, d[5]] = SV[5, 5]
# ieq += 1
# return ieq
# def plot_sol_PW(self, X, dofs):
# x_start = self.shift_plot
# for _l, _layer in enumerate(self.layers):
# x_f = np.linspace(0, _layer.thickness,200)
# x_b = x_f-_layer.thickness
# if _layer.medium.MODEL == "fluid":
# SV, k_y = fluid_SV(self.kx, self.k, _layer.medium.K)
# pr = SV[1, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# pr += SV[1, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# ut = SV[0, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# ut += SV[0, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# # plt.figure(5)
# # plt.plot(x_start+x_f,np.abs(ut),'b')
# # plt.plot(x_start+x_f,np.imag(ut),'k')
# if _layer.medium.MODEL == "pem":
# SV, k_y = PEM_SV(_layer.medium, self.kx)
# ux, uy, pr, ut = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(3):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# uy += SV[5, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[5, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# pr += SV[4, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr += SV[4, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# ut += SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ut += SV[2, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# if _layer.medium.MODEL == "elastic":
# SV, k_y = elastic_SV(_layer.medium, self.kx, self.omega)
# ux, uy, pr, sig = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(2):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# uy += SV[3, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[3, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# pr -= SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr -= SV[2, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# sig -= SV[0, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# sig -= SV[0, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# # if self.plot[2]:
# # plt.figure(2)
# # plt.plot(x_start+x_f, np.abs(pr), 'r')
# # plt.plot(x_start+x_f, np.imag(pr), 'm')
# # plt.title("Sigma_yy")
# # if self.plot[2]:
# # plt.figure(3)
# # plt.plot(x_start+x_f, np.abs(sig), 'r')
# # plt.plot(x_start+x_f, np.imag(sig), 'm')
# # plt.title("Sigma_xy")
# x_start += _layer.thickness
# def PEM_SV(mat,ky):
# ''' S={0:\hat{\sigma}_{xy}, 1:u_y^s, 2:u_y^t, 3:\hat{\sigma}_{yy}, 4:p, 5:u_x^s}'''
# kx_1 = np.sqrt(mat.delta_1**2-ky**2)
# kx_2 = np.sqrt(mat.delta_2**2-ky**2)
# kx_3 = np.sqrt(mat.delta_3**2-ky**2)
# kx = np.array([kx_1, kx_2, kx_3])
# delta = np.array([mat.delta_1, mat.delta_2, mat.delta_3])
# alpha_1 = -1j*mat.A_hat*mat.delta_1**2-1j*2*mat.N*kx[0]**2
# alpha_2 = -1j*mat.A_hat*mat.delta_2**2-1j*2*mat.N*kx[1]**2
# alpha_3 = -2*1j*mat.N*kx[2]*ky
# SV = np.zeros((6,6), dtype=complex)
# SV[0:6, 0] = np.array([-2*1j*mat.N*kx[0]*ky, kx[0], mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 3] = np.array([ 2*1j*mat.N*kx[0]*ky,-kx[0],-mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 1] = np.array([-2*1j*mat.N*kx[1]*ky, kx[1], mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 4] = np.array([ 2*1j*mat.N*kx[1]*ky,-kx[1],-mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 2] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, alpha_3, 0., -kx[2]])
# SV[0:6, 5] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, -alpha_3, 0., kx[2]])
# return SV, kx
# def elastic_SV(mat,ky, omega):
# ''' S={0:\sigma_{xy}, 1: u_y, 2 \sigma_{yy}, 3 u_x}'''
# P_mat = mat.lambda_ + 2.*mat.mu
# delta_p = omega*np.sqrt(mat.rho/P_mat)
# delta_s = omega*np.sqrt(mat.rho/mat.mu)
# kx_p = np.sqrt(delta_p**2-ky**2)
# kx_s = np.sqrt(delta_s**2-ky**2)
# kx = np.array([kx_p, kx_s])
# alpha_p = -1j*mat.lambda_*delta_p**2 - 2j*mat.mu*kx[0]**2
# alpha_s = 2j*mat.mu*kx[1]*ky
# SV = np.zeros((4, 4), dtype=np.complex)
# SV[0:4, 0] = np.array([-2.*1j*mat.mu*kx[0]*ky, kx[0], alpha_p, ky])
# SV[0:4, 2] = np.array([ 2.*1j*mat.mu*kx[0]*ky, -kx[0], alpha_p, ky])
# SV[0:4, 1] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky,-alpha_s, -kx[1]])
# SV[0:4, 3] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky, alpha_s, kx[1]])
# return SV, kx
# def fluid_SV(kx, k, K):
# ''' S={0:u_y , 1:p}'''
# ky = np.sqrt(k**2-kx**2)
# SV = np.zeros((2, 2), dtype=complex)
# SV[0, 0:2] = np.array([ky/(1j*K*k**2), -ky/(1j*K*k**2)])
# SV[1, 0:2] = np.array([1, 1])
# return SV, ky
# def resolution_PW_imposed_displacement(S, p):
# # print("k={}".format(p.k))
# Layers = S.layers.copy()
# n, interfaces, dofs = initialise_PW_solver(Layers, S.backing)
# M = np.zeros((n, n), dtype=complex)
# i_eq = 0
# # Loop on the layers
# for i_inter, _inter in enumerate(interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_fluid_pem(i_eq, i_inter, Layers, dofs, M, p)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = interface_pem_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_pem_pem(i_eq, i_inter, Layers, dofs, M, p)
# if S.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# if Layers[0].medium.MODEL == "fluid":
# F = np.zeros(n, dtype=complex)
# SV, k_y = fluid_SV(p.kx, p.k, Layers[0].medium.K)
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]*np.exp(-1j*k_y*Layers[0].thickness)
# F[i_eq] = 1.
# elif Layers[0].medium.MODEL == "pem":
# SV, k_y = PEM_SV(Layers[0].medium, p.kx)
# M[i_eq, dofs[0][0]] = SV[2, 0]
# M[i_eq, dofs[0][1]] = SV[2, 1]
# M[i_eq, dofs[0][2]] = SV[2, 2]
# M[i_eq, dofs[0][3]] = SV[2, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[2, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[2, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# F = np.zeros(n, dtype=complex)
# F[i_eq] = 1.
# i_eq +=1
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]
# M[i_eq, dofs[0][2]] = SV[0, 2]
# M[i_eq, dofs[0][3]] = SV[0, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[0, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[0, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# i_eq += 1
# M[i_eq, dofs[0][0]] = SV[3, 0]
# M[i_eq, dofs[0][1]] = SV[3, 1]
# M[i_eq, dofs[0][2]] = SV[3, 2]
# M[i_eq, dofs[0][3]] = SV[3, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[3, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[3, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# X = LA.solve(M, F)
# # print("|R pyPLANES_PW| = {}".format(np.abs(X[0])))
# print("R pyPLANES_PW = {}".format(X[0]))
# plot_sol_PW(S, X, dofs, p)
| StarcoderdataPython |
1740480 | <filename>django_town/rest/resources/mongo_resource.py<gh_stars>0
from django_town.utils import json
import datetime
from django.utils.functional import cached_property
from mongoengine import ListField, SortedListField, EmbeddedDocumentField, PointField, EmbeddedDocument, \
NotUniqueError, ValidationError, DateTimeField
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from bson import ObjectId
from bson.errors import InvalidId
from django_town.mongoengine_extension.fields import DynamicResourceField, ResourceReferenceField
from django_town.rest.serializers import default_mongo_serializer
from django_town.rest.exceptions import RestNotFound, RestBadRequest, RestFormInvalid, RestDuplicate
from django_town.rest.resources.base import DataBasedResource, ResourceInstance
from django_town.utils.rand import generate_random_from_vschar_set
class MongoResourceInstance(ResourceInstance):
"""
Mongoengine-based resource instance. instance._instance will return mongoengine object with self.pk.
"""
def __init__(self, pk, manager, instance=None):
self.document = manager._meta.document
if instance:
self.__dict__['_instance'] = instance
super(MongoResourceInstance, self).__init__(pk, manager)
@cached_property
def _instance(self):
try:
return self.document.objects.get(pk=self._pk)
except self.document.DoesNotExist:
raise RestNotFound(self._manager)
def update(self, data=None, files=None, acceptable_fields=None, required_fields=None):
kwargs = {}
if data:
kwargs.update(data)
if files:
kwargs.update(files)
all_keys = set(kwargs.keys())
if not acceptable_fields:
acceptable_fields = self._manager._meta.create_acceptable_fields
if not required_fields:
required_fields = self._manager._meta.create_required_fields
if required_fields and not set(required_fields).issubset(all_keys):
raise RestBadRequest()
if acceptable_fields and not set(acceptable_fields).issuperset(all_keys):
raise RestBadRequest()
try:
obj = self._instance
obj.update(**kwargs)
obj.save()
except ValueError:
raise RestBadRequest()
self._manager.invalidate_cache(pk=self._pk)
return self
def delete(self):
self._instance.delete()
del self.__dict__["_instance"]
self._manager.invalidate_cache(pk=self._pk)
return
class MongoResource(DataBasedResource):
"""
Mongoengine based resource.
Default resource instance is "MongoResourceInstance".
Default serializer is mongo_serializer.
You can install Mongoegine in "http://mongoengine.org/".
"""
class Meta:
resource_instance_cls = MongoResourceInstance
document = None
pk_regex = "[a-zA-Z0-9\-_]+"
serializer = default_mongo_serializer
@staticmethod
def pk_to_object_id(pk):
try:
return ObjectId(urlsafe_base64_decode(pk))
except TypeError:
raise RestNotFound()
@staticmethod
def object_id_to_pk(obj_id):
return urlsafe_base64_encode(obj_id.binary)
def __init__(self, document=None, name=None, **kwargs):
from django_town.mongoengine_extension import ResourceField, DynamicResourceField, ResourceIntField
if not document:
document = self._meta.document
_db_field_map = document._db_field_map
kwargs['_resource_fields'] = set()
if not self._meta.date_format_fields:
self._meta.date_format_fields = []
for each_field in document._fields.items():
field_name = _db_field_map.get(each_field[0])
current_field = each_field[1]
if isinstance(current_field, (ResourceField, DynamicResourceField, ResourceIntField, ResourceReferenceField)):
kwargs['_resource_fields'].add((field_name, each_field[0]))
if isinstance(each_field, DateTimeField):
self._meta.date_format_fields.append(each_field.name)
# if isinstance(field_name, TopLevelDocumentMetaclass):
# self._db_field_map[each_field[0]] = each_field[0]
# print each_field
if not name:
name = document.__name__.lower()
super(MongoResource, self).__init__(name=name, document=document, **kwargs)
def create_from_db(self, data=None, files=None, or_get=False):
kwargs = {}
# ref_kwargs = {}
_db_field_map = self._meta.document._db_field_map
for each_field in self._meta.document._fields.items():
field_name = _db_field_map.get(each_field[0])
if not field_name:
continue
data_source = None
if data and field_name in data:
data_source = data
elif files and field_name in files:
data_source = files
if data_source:
from django_town.mongoengine_extension.fields.extra import LocalStorageFileField
current_field = each_field[1]
if isinstance(current_field, (SortedListField, ListField)) and hasattr(data_source, 'getlist'):
kwargs[field_name] = data_source.getlist(field_name)
elif isinstance(current_field, EmbeddedDocumentField) and \
not isinstance(data_source.get(field_name), EmbeddedDocument):
kwargs[field_name] = json.loads(data_source.get(field_name))
elif isinstance(current_field, PointField):
latlng = data_source.get(field_name)
if latlng:
latlng = data_source.get(field_name).split(',')
kwargs[field_name] = [float(latlng[1]), float(latlng[0])]
# elif isinstance(current_field, LocalStorageFileField):
# file_fields[field_name] = data_source.get(field_name)
elif isinstance(current_field, DynamicResourceField):
kwargs[field_name] = data_source.get(field_name)
kwargs[field_name + "_type"] = kwargs[field_name]._manager._meta.name
elif isinstance(current_field, DateTimeField):
kwargs[field_name] = datetime.datetime.fromtimestamp(float(data_source.get(field_name)))
# elif isinstance(current_field, ReferenceField):
# ref_kwargs[field_name] = data_source.get(field_name)
else:
kwargs[field_name] = data_source.get(field_name)
# print self.document._fields, self.document._db_field_map
# print kwargs
try:
if or_get:
_instance, created = self._meta.document.objects.get_or_create(**kwargs)
else:
created = False
_instance = self._meta.document(**kwargs).save()
# for k, v in file_fields.iteritems():
# if v:
# getattr(_instance, k).save(generate_random_from_vschar_set(length=30), v)
except ValueError:
import traceback
traceback.print_exc()
raise RestBadRequest()
except NotUniqueError:
raise RestDuplicate()
except ValidationError as e:
import traceback
traceback.print_exc()
raise RestFormInvalid(e.errors.keys()[0])
return _instance, created
def __call__(self, pk, instance=None):
if isinstance(pk, ObjectId):
return self._meta.resource_instance_cls(pk, self, instance=instance)
try:
return self._meta.resource_instance_cls(self.pk_to_object_id(pk), self, instance=instance)
except InvalidId:
raise RestBadRequest()
def serialize(self, resource_instance, options=None, request=None):
ret = super(MongoResource, self).serialize(resource_instance, options=options, request=request)
for each_resource_fields, field_in_python in self._meta._resource_fields:
if each_resource_fields in ret:
value = getattr(self._meta.document, field_in_python).to_python(ret[each_resource_fields])
if isinstance(value, ResourceInstance):
ret[each_resource_fields] = value.to_dict()
else:
pass
# else:
# default = getattr(self.document, field_in_python).default
# if callable(default):
# default = default()
# if isinstance(default, ResourceInstance):
# ret[each_resource_fields] = default.to_dict()
# else:
# ret[each_resource_fields] = default
return ret
def pk_collection(self, **kwargs):
return self._meta.document.objects.order_by('pk').values_list('pk')
def count(self, **kwargs):
return self._meta.document(**kwargs).count() | StarcoderdataPython |
3217944 | <filename>python3/easy/temperatures.py
import sys
import math
n = int(input())
numbers = input().split() or [0]
pos = min([int(i) if int(i) >= 0 else math.inf for i in numbers])
neg = max([int(i) if int(i) < 0 else (math.inf * -1) for i in numbers])
print(pos if pos <= abs(neg) else neg)
| StarcoderdataPython |
192220 | import sys
from typing import Dict
from ttt import *
from ttt.helper_util import PositionOccupiedException, InvalidCellPosition, \
BgColors
from utils.contracts import require, ensure
players: Dict[int, Player] = {}
symbols = {
1: "X",
2: "O"
}
@require("Player to be an Instance of Player",
lambda args: isinstance(args.current_player, Player))
@ensure("Color Can be one of the Defined Types",
lambda args, result: result in [BgColors.PURPLE, BgColors.ORANGE])
def get_color(current_player: Player):
"""Yield the color for Pattern to use in the Console while fetching input"""
if current_player.marker.lower() == 'x':
return BgColors.PURPLE
elif current_player.marker.lower() == 'o':
return BgColors.ORANGE
@require("Player Number to be an integer",
lambda args: isinstance(args.count, int))
@ensure("Requires a valid Player to be returned",
lambda args, result: isinstance(result, Player))
def get_player_info(count: int) -> Player:
"""Fetch Player Details before starting the game."""
while True:
name = str(input("\nEnter name for Player {}: \n>> ".format(count)))
if len(name) > 0:
return Player(name=name, marker="{}".format(symbols[count]))
@require("Current Player to be not None and valid",
lambda args: args.current_player in players.values())
@ensure("Requires a valid return move",
lambda args, result: isinstance(result, Move))
def get_move(current_player: Player) -> Move:
"""Fetch and validate a move from the Player."""
while True:
position = input(
get_color(current_player=current_player) +
"{}, choose a box to place an '{}' into: \n >> ".format(
current_player.name,
current_player.marker) + BgColors.RESET)
if len(position) > 0:
try:
return Move(position=int(position))
except ValueError:
pass
def start_game(current_game: GameBoard):
"""
Start the game in an infinite loop so that the players can opt to continue
the game if they choose to.
"""
while True:
turn = 0
done = False
while turn <= current_game.max_moves and not done:
whose_turn = (turn + 1) % 2 + 1
while True:
print("\n")
print(current_game)
move = get_move(players[whose_turn])
try:
winner = move_and_check_if_over(
game=current_game, current_player=players[whose_turn],
move=move)
if winner is not None:
print("\n")
print(current_game)
print("Congratulations {} You have won.\n\n".format(
players[whose_turn].name))
done = True
break
else:
break
except AllMovesExhaustedWithNoWinner:
print("The Game has ended in a Tie. !!!")
done = True
break
except PositionOccupiedException:
print(
"Position has already been taken. Please pick another.")
except InvalidCellPosition:
print("You've picked an Invalid Cell. Please pick another.")
turn += 1
if turn == current_game.max_moves:
print("The Game has ended in a Tie. !!!")
break
c = input(
"You've successfully completed a game of Tic-Tac-Toe. "
"Do you want to continue? [y/N]: ")
if len(c) < 1:
sys.exit(0)
elif len(c) > 1:
sys.exit(0)
elif c.upper() == 'Y':
current_game.reset()
else:
sys.exit(0)
if __name__ == "__main__":
while True:
print("Welcome to a Game of Tic-Tac-Toe.\n\n")
default_size = 3
size = input(
"Please Enter the Size of Board you want to Play with "
"(NxN)[{}]: ".format(default_size))
if len(size) < 1:
size = 3
else:
try:
size = int(size)
if size < 0:
raise ValueError()
except ValueError:
print("You entered an Invalid Value. Defaulting to {}".format(
default_size))
size = default_size
default_win_count = size
win_count = input(
"Please Enter the Number of Marker in a Row that declares the "
"player a winner [{}]: ".format(default_win_count))
if len(win_count) < 1:
win_count = 3
else:
try:
win_count = int(win_count)
if win_count > size or win_count < 0:
raise ValueError()
except ValueError:
print("You entered an Invalid Value. Defaulting to {}".format(
default_win_count))
win_count = default_win_count
player1 = get_player_info(1)
player2 = get_player_info(2)
players[1] = player1
players[2] = player2
game = GameBoard(n=size, win_count=win_count, player1=player1,
player2=player2)
start_game(current_game=game)
| StarcoderdataPython |
3331552 | """
Sequence preprocessing functionality. Extends sklearn
transformers to sequences.
"""
import numpy as np
from sklearn.base import ClassifierMixin, BaseEstimator, TransformerMixin, clone
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC
# A set of procedures for preprocessing of sequences
def make_subsequences(x, y, step=1, max_len=2 ** 31):
"""
Creates views to all subsequences of the sequence x. For example if
x = [1,2,3,4]
y = [1,1,0,0]
step = 1
the result is a tuple a, b, where:
a = [[1],
[1,2],
[1,2,3],
[1,2,3,4]
]
b = [1,1,0,0]
Note that only a view into x is created, but not a copy of elements of x.
Parameters
----------
X : array [seq_length, n_features]
y : numpy array of shape [n_samples]
Target values. Can be string, float, int etc.
step : int
Step with which to subsample the sequence.
max_len : int, default 2 ** 31
Step with which to subsample the sequence.
Returns
-------
a, b : a is all subsequences of x taken with some step, and b is labels assigned to these sequences.
"""
r = range(step-1, len(x), step)
X = []
Y = []
for i in r:
start = max(0, i - max_len)
stop = i+1
X.append(x[start:stop])
Y.append(y[i])
return X, Y
class PadSubsequence(BaseEstimator, TransformerMixin):
"""
Takes subsequences of fixed length from input list of sequences.
If sequence is not long enough, it is left padded with zeros.
Parameters
----------
length : float, length of the subsequence to take
"""
def __init__(self, length=10, step=1):
self.length = length
self.step = step
def _check_input(self, X):
if len(X.shape) < 2:
raise ValueError("The input should be a sequence, found shape %s" % X.shape)
def fit(self,X,y=None):
# remeber the num. of features
self.n_features = X[0].shape[-1]
return self
def transform(self, X, y=None):
if not hasattr(self, 'step'):
self.step = 1
# X might be a list
R = []
for x in X:
if len(x) >= self.length:
R.append(x[-self.length::self.step])
else:
z = np.zeros((self.length - len(x), x.shape[-1]))
zx = np.row_stack((z,x))
zx = zx[::self.step]
R.append(zx)
R = np.array(R)
return R
class CalculateSpectrum(BaseEstimator, TransformerMixin):
"""Calculates spectrum of sequence.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
"""Perform fft on sequence along every feature
Parameters
----------
X : array-like, shape [n_samples, seq_len, n_features]
The data used to fft along the features axis.
"""
from scipy import fftpack
X = abs(fftpack.fft(X, axis=1))
return X
class FlattenShape(BaseEstimator, TransformerMixin):
"""
Flattens the shape of samples to a single vector. This is useful in cases
when "classic" models like SVM are used.
Parameters
----------
"""
def fit(self, X, y=None):
self.shape = X[0].shape
return self
def transform(self, X, y=None):
V = np.array([np.ravel(x) for x in X])
return V
def inverse_transform(self, X, y=None):
V = np.array([np.reshape(x, self.shape) for x in X])
return V
# Wrapper for the standard classes of sklearn to work with sequence labeling
class SequenceTransformer(BaseEstimator, TransformerMixin):
def __init__(self, transformer, mode='stack'):
"""
Applies transformer to every element in input sequence.
transformer: TransformerMixin
mode: How to preprocess sequences for transformer fitting.
default: stack all sequences into one huge sequence
so that then it looks like a normal 2d training set
"""
self.transformer = transformer
self.mode = mode
self.transformer_ = None
def fit(self, X, y=None):
"""
Fit base transformer to the set of sequences.
X: iterable of shape [n_samples, ...]
y: iterable of shape [n_samples, ...]
"""
# stack all the elements into one huge dataset
self.transformer_ = clone(self.transformer)
if self.mode == 'stack':
X_conc = np.row_stack(x for x in X)
# might have bugs here in future :(
if y is not None:
y_conc = np.concatenate([[v] * len(x) for x, v in zip(X, y)])
else:
X_conc = X
y_conc = y
if y is None:
self.transformer_.fit(X_conc)
else:
self.transformer_.fit(X_conc, y_conc)
return self
def transform(self, X, y=None):
if y is None:
result = [self.transformer_.transform(xx) for xx in X]
else:
result = [self.transformer_.transform(xx, [yy] * len(xx)) for xx, yy in zip(X, y)]
result = np.array(result)
return result
def set_params(self, **params):
self.base_transformer.set_params(**params)
return self
class Seq1Dto2D(BaseEstimator, TransformerMixin):
"""
Useful for working with text sequences.
Such sequence is just a list of characters.
This converts a sequence of elements to a sequence
of lists of size 1 of characters. So
"abc" -> [['a'], ['b'], ['c']]
Useful for applications where you do not want
to convert text to features explicitly.
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return [np.array(list(xx))[:, np.newaxis] for xx in X]
class Subsequensor(BaseEstimator, TransformerMixin):
"""
Creates views in all subsequences of a numpy sequence.
Parameters
----------
step: int, step with which the subsequences are taken.
max_subsequence: int or None, maximum subsequence size that is used
in order to predict a certain output value.
"""
def __init__(self, step, max_subsequence=None):
self.step = step
self.max_subsequence = max_subsequence
def fit(self, X, Y):
"""Fit the transformer according to the given training data.
Parameters
----------
X : list of numpy arrays
List of sequences, where every sequence is a 2d numpy array,
where the first dimension corresponds to time, and last for features.
Y : list of object
List of labels assigned to corresponding sequences in X.
Returns
-------
self : object
Returns self.
"""
return self
def transform(self, X, Y=None):
"""Transform the input data.
Parameters
----------
X : list of numpy arrays
List of sequences, where every sequence is a 2d numpy array,
where the first dimension corresponds to time, and last for features.
Y : list of object
List of labels assigned to corresponding sequences in X.
Returns
-------
X : list
Returns list of views into the sequences.
"""
test_time = Y is None
if test_time:
Y = [[None]*len(x) for x in X]
if self.max_subsequence is None:
args = (self.step, )
else:
args = (self.step, self.max_subsequence)
XY = [make_subsequences(*((x, y, ) + args)) for x, y in zip(X, Y)]
X = [z[0] for z in XY]
if test_time:
return X
return X, [z[1] for z in XY]
class SequenceEstimator(BaseEstimator):
"""
This generic estimator class can be used to label every element in a sequence using underlying subsequence estimator.
One example would be labeling which parts of sensory data correspond to what kind of activity of the user.
Consider the following example:
X = [[1,2,3]]
y = [[0,0,1]]
fit() will train the estimator to classify properly the following data:
X = [[1], [1,2], [1,2,3]]
y = [[0, 0, 1]]
predict() on X will return labels for every element in a sequence.
Parameters
----------
estimator: BaseEstimator, model which is used to do estimations on subsequences.
step: int, step with which the subsequences are taken for training of internal sestimator.
"""
def __init__(self, estimator, step=1, max_subsequence=None):
self.estimator = estimator
self.step = step
self.max_subsequence = max_subsequence
self.subsequencer = None # class instance that is responsible for getting views into the sequence
def set_params(self, **params):
step_name = self.__class__.__name__.lower() + "__step"
if step_name in params:
self.step = params[step_name]
params = params.copy()
del params[step_name]
self.estimator.set_params(**params)
return self
def fit(self, X, y):
X, y = Subsequensor(step=self.step, max_subsequence=self.max_subsequence).transform(X, y)
X, y = sum(X, []), sum(y, []) # concat all data together
self.estimator.fit(X, y)
return self
def predict(self, X):
X = Subsequensor(step=1).transform(X)
R = [self.estimator.predict(x) for x in X]
return R
def score(self, X, y):
X, y = Subsequensor(step=self.step, max_subsequence=self.max_subsequence).transform(X, y)
X, y = sum(X, []), sum(y, []) # concat all data together
return self.estimator.score(X, y)
# Classes that work with sequences directly
# Readers
def read_wav(filename, mono=False):
"""
Reads a wav file into a sequence of vectors, which represent
the intensity of sound at some time.
Every vector has a lenght of 1 if mono mode is used, else 2.
Parameters
----------
filename : string, file to read
mono: bool, whether to read audio as mono or stereo. Mono files are always read as mono.
Returns
-------
numpy array containing sequence of audio intensities.
"""
import scipy.io.wavfile as scw
framerate, data = scw.read(filename)
if len(data.shape) < 2:
data = data[:,np.newaxis]
if mono:
data = np.mean(data, axis=1)
data = data[:,np.newaxis]
return data
# Example pipelines
def rnn_pipe():
pipe = make_pipeline(
PadSubsequence(),
RNNClassifier()
)
grid = [
{
"paddedsubsequence__length":[2,4],
"rnnclassifier__n_neurons":[32]
}
]
return pipe, grid
def svm_pipe():
pipe = make_pipeline(
PadSubsequence(),
FlattenShape(),
StandardScaler(),
LinearSVC(),
)
grid = [
{
"paddedsubsequence__length":[1,2,4,8,16],
"linearsvc__C":10 ** np.linspace(-10, 10, 51)
}
]
return pipe, grid
if __name__ == "__main__":
pass
| StarcoderdataPython |
160701 | <reponame>Mopolino8/pylbm
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
"""
Example of a two velocities scheme for the shallow water system
d_t(h) + d_x(q) = 0, t > 0, 0 < x < 1,
d_t(q) + d_x(q^2/h+gh^2/2) = 0, t > 0, 0 < x < 1,
"""
import sympy as sp
import pylbm
# pylint: disable=invalid-name
H, Q, X = sp.symbols('h, q, X')
LA, G = sp.symbols('lambda, g', constants=True)
SIGMA_H, SIGMA_Q = sp.symbols('sigma_1, sigma_2', constants=True)
scheme_cfg = {
'dim': 1,
'scheme_velocity': LA,
'schemes': [
{
'velocities': [1, 2],
'conserved_moments': H,
'polynomials': [1, X],
'relaxation_parameters': [0, 1/(.5+SIGMA_H)],
'equilibrium': [H, Q],
},
{
'velocities': [1, 2],
'conserved_moments': Q,
'polynomials': [1, X],
'relaxation_parameters': [0, 1/(.5+SIGMA_Q)],
'equilibrium': [Q, Q**2/H+.5*G*H**2],
},
],
'parameters': {
LA: 1.,
G: 9.81,
SIGMA_H: 1/1.8-.5,
SIGMA_Q: 1/1.2-.5,
},
}
scheme = pylbm.Scheme(scheme_cfg)
eq_pde = pylbm.EquivalentEquation(scheme)
print(eq_pde)
| StarcoderdataPython |
17528 | #
# This script allows the user to control an Anki car using Python
# To control multiple cars at once, open a seperate Command Line Window for each car
# and call this script with the approriate car mac address.
# This script attempts to save lap times into local mysql db running on the pi
# Author: jstucken
# Created: 23-2-2021
#
SCRIPT_TITLE="Lap timer saving to Mysql"
# import required modules
import loader.bootstrapper
import time
from overdrive import Overdrive
from php_communicator import PhpCommunicator
from network import Network
# Setup our car
car = Overdrive(12) # init overdrive object
car.enableLocationData()
# get car mac address from our class object
car_mac = car.getMacAddress()
car_id = car.getCarId()
username = car.getUsername()
student_id = car.getStudentId()
# count number of laps completed
lap_count = 0
# start the car off
# usage: car.changeSpeed(speed, accel)
car.changeSpeed(400, 800)
last_lap_time = 0
last_lap_count = -1
# race 3 laps and time each one
while lap_count !=3:
time.sleep(0.1)
# lap count is incremented when cars pass over the finish line
lap_count = car.getLapCount()
# count laps done
if last_lap_count != lap_count:
last_lap_count = lap_count
print()
print("lap_count: "+str(lap_count))
# get lap time
prev_lap_time = car.getLapTime()
if last_lap_time != prev_lap_time:
print()
print("prev_lap_time: "+str(prev_lap_time))
# if car has completed at least 1 lap
if lap_count > 0:
# Save last_lap_time time to database now
# get cars current location and speed
location = car.getLocation()
speed = car.getSpeed()
# data to be sent to API
data = {
'student_id':student_id,
'car_id':car_id,
'lap_time':prev_lap_time,
'lap_count':lap_count,
'speed':speed
}
# get the local IP address of the server machine
local_ip_address = Network.getLocalIPAddress()
# build our PHP script URL where data will be sent to be saved
# eg "http://192.168.0.10/lap_times_save.php"
url = "http://"+local_ip_address+"/python_communicator/lap_times_save.php"
# Send data to PHP to save to database
php = PhpCommunicator()
return_text = php.getResponse(url, data) # get the response from PHP
# extracting response text
print("Response from PHP script: %s"%return_text)
# end if
print()
print("*****")
last_lap_time = prev_lap_time
# stop the car
car.stopCarFast()
print("Stopping as car has done the required number of laps")
car.disconnect()
quit() | StarcoderdataPython |
3355321 | name = str(input('Digite um nome: ').strip().upper())
nameS = 'SILVA' in name
print('Have "Silva" in this name:')
print(nameS)
| StarcoderdataPython |
1776402 | file_name = "show_ver.out"
with open(file_name, "r") as f:
output = f.read()
if 'Cisco' in output:
print "Found Cisco string"
| StarcoderdataPython |
1768753 | <reponame>Anancha/Programming-Techniques-using-Python
myl1 = []
num = int(input("Enter the number: "))
for loop in range(num):
mydata = input("Enter the data: ")
myl1.append(mydata)
print(myl1)
| StarcoderdataPython |
128394 | <gh_stars>1-10
import datetime
import itertools
import logging
from numbers import Real
from typing import List, Tuple, Union, Optional, Any
import dateutil.parser
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class HeaderParameter:
paths: List[Tuple[str, ...]]
defaultvalue: Optional[Tuple[Any, ...]] = None
def __init__(self, *paths: Optional[Tuple[str, ...]], default: Optional[Tuple[Any, ...]] = None):
self.paths = list(paths)
self.defaultvalue = default
def __get__(self, instance, objtype):
lis = []
for path, default in zip(self.paths,
itertools.repeat(None) if self.defaultvalue is None else self.defaultvalue):
if path is None:
lis.append(None)
continue
dic = instance._data
assert isinstance(dic, dict)
try:
for pcomponent in path:
dic = dic[pcomponent]
lis.append(dic)
except KeyError:
if self.defaultvalue is None:
raise
lis.append(default)
return tuple(lis)
def __set__(self, instance, value):
for path, val in zip(self.paths, value):
if path is None:
continue
dic = instance._data
assert isinstance(dic, dict)
for pcomponent in path[:-1]:
try:
dic = dic[pcomponent]
except KeyError:
dic[pcomponent] = {}
dic = dic[pcomponent]
dic[path[-1]] = val
def __delete__(self, instance):
pass
class StringHeaderParameter(HeaderParameter):
def __init__(self, path: Tuple[str, ...], default: Optional[Any] = None):
super().__init__(path, default=(default,))
def __get__(self, instance, owner) -> str:
return str(super().__get__(instance, owner)[0])
def __set__(self, instance, value: str):
assert isinstance(value, str)
super().__set__(instance, [value])
class ValueAndUncertaintyHeaderParameter(HeaderParameter):
def __init__(self, pathvalue: Optional[Tuple[str, ...]], pathuncertainty: Optional[Tuple[str, ...]],
default: Optional[Tuple[Any, Any]] = None):
super().__init__(pathvalue, pathuncertainty, default=default)
def __get__(self, instance, owner) -> Tuple[float, Optional[float]]:
return tuple([float(x) for x in super().__get__(instance, owner)])
def __set__(self, instance, value: Tuple[float, Optional[float]]):
if isinstance(value, Real):
value = float(value)
elif (isinstance(value, tuple) and isinstance(value[0], Real) and isinstance(value[1], Real) and
len(value) == 2):
value = (float(value[0]), float(value[1]))
else:
raise TypeError(value)
super().__set__(instance, value)
class IntHeaderParameter(HeaderParameter):
def __init__(self, path: Tuple[str, ...], default: Optional[Any] = None):
super().__init__(path, default=(default,))
def __get__(self, instance, owner) -> int:
return int(super().__get__(instance, owner)[0])
def __set__(self, instance, value: int):
super().__set__(instance, [int(value)])
class FloatHeaderParameter(HeaderParameter):
def __init__(self, path: Tuple[str, ...], default: Optional[Any] = None):
super().__init__(path, default=(default,))
def __get__(self, instance, owner) -> float:
return float(super().__get__(instance, owner)[0])
def __set__(self, instance, value: float):
super().__set__(instance, [float(value)])
class DateTimeHeaderParameter(HeaderParameter):
def __init__(self, path: Tuple[str, ...], default: Optional[Any] = None):
super().__init__(path, default=(default,))
def __get__(self, instance, owner) -> datetime.datetime:
val = super().__get__(instance, owner)[0]
if isinstance(val, datetime.datetime):
return val
elif isinstance(val, str):
return dateutil.parser.parse(val)
else:
raise TypeError(val)
def __set__(self, instance, value: Union[str, datetime.datetime]):
if isinstance(value, datetime.datetime):
super().__set__(instance, [value])
elif isinstance(value, str):
super().__set__(instance, [dateutil.parser.parse(value)])
else:
raise TypeError(value)
| StarcoderdataPython |
3283456 | """
Decorator is a structural design pattern wich allows add
new behaviors in objects by put them inside of a "wrapper"
(decorator) of objects
Decorators provide an flexible alternative by the use of
subclasses for the functionality extension
Decorator (design pattern) != Python decorator
Python decorator -> A decorator is a callable wich accepts
other function as argument (a decorated function). The
decorator can realize some processing within the decorated
function and wrapper it or change by another function or
invocable object (RAMALHO, 2015, p. 223)
"""
from __future__ import annotations
from abc import abstractmethod, ABC
from dataclasses import dataclass
from typing import List
from copy import deepcopy
# Dataclasses don't work well with inheritance
# We will use prototype pattern within decorators
# INGREDIENTS
@dataclass
class Ingredient:
"""Ingredient"""
price: float
@dataclass
class Bread(Ingredient):
"""Ingredient Specialization"""
price: float = 1.50
@dataclass
class Sausage(Ingredient):
"""Ingredient Specialization"""
price: float = 4.99
@dataclass
class Bacon(Ingredient):
"""Ingredient Specialization"""
price: float = 7.99
@dataclass
class Egg(Ingredient):
"""Ingredient Specialization"""
price: float = 1.50
@dataclass
class Cheese(Ingredient):
"""Ingredient Specialization"""
price: float = 6.35
@dataclass
class MashedPotatoes(Ingredient):
"""Ingredient Specialization"""
price: float = 2.25
@dataclass
class PotatoSticks(Ingredient):
"""Ingredient Specialization"""
price: float = 0.99
# HOT-DOGS
class Hotdog:
"""A concrete class and a abstract class
for Special Hotdog"""
_name: str
_ingredients: List[Ingredient]
@property
def price(self) -> float:
"""Calculate Hotdog Price, considering its ingredients"""
return round(
sum([ingredient.price for ingredient in self._ingredients]), 2)
@property
def name(self) -> str:
"""Hotdog name property
:return: str"""
return self._name
@property
def ingredients(self) -> List[Ingredient]:
""""Hotdog ingredients property
:return: List[Imgredient]"""
return self._ingredients
def __repr__(self) -> str:
"""representation of the object
:return: string"""
return f'{self.name}({self.price}) -> {self.ingredients}'
class SpecialHotdog(Hotdog):
"""Hotdog specialization"""
def __init__(self) -> None:
self._name: str = "SpecialHotdog"
self._ingredients: List[Ingredient] = [
Bread(),
Cheese(),
Egg(),
MashedPotatoes(),
PotatoSticks(),
Sausage()
]
class SimpleHotdog(Hotdog):
"""Hotdog specialization"""
def __init__(self) -> None:
self._name: str = "SimpleHotDog"
self._ingredients: List[Ingredient] = [
Bread(),
PotatoSticks(),
Sausage()
]
# And about custom hotdogs?
# Use ONE decorator to do that
class HotdogDecorator(Hotdog):
"""Hotdog Decorator"""
def __init__(self, hotdog: Hotdog, ingredient: Ingredient) -> None:
self.hotdog = hotdog
self._ingredient = ingredient
self._ingredients = deepcopy(self.hotdog.ingredients)
self._ingredients.append(self._ingredient)
@property
def name(self) -> str:
return f'{self.hotdog.name} + {self._ingredient.__class__.__name__}'
if __name__ == "__main__":
special_hotdog = SpecialHotdog()
simple_hotdog = SimpleHotdog()
bacon_simple_hotdog = HotdogDecorator(simple_hotdog, Bacon())
egg_bacon_simple_hotdog = HotdogDecorator(bacon_simple_hotdog, Egg())
mashed_potatoes_eggs_bacon_simple_hotdog = HotdogDecorator(
egg_bacon_simple_hotdog, MashedPotatoes())
print(special_hotdog)
print(simple_hotdog)
print(bacon_simple_hotdog)
print(egg_bacon_simple_hotdog)
print(mashed_potatoes_eggs_bacon_simple_hotdog)
| StarcoderdataPython |
1741317 | import configparser
import datetime
def parse_dates_from_config(dates_str):
return [datetime.datetime.strptime(day_string, "%Y.%m.%d").date()
for day_string in dates_str.split(',')]
class Config(object):
def __init__(self, api_key=None, work_hours_per_day=8.4, public_holidays=None,
vacation_days=None, working_days=None, user_id=1, workspace=1,
vacation_days_per_year=25, started_on=None):
self.api_key = api_key
self.work_hours_per_day = work_hours_per_day
self.vacation_days = vacation_days or []
self.public_holidays = public_holidays or []
self.working_days = working_days or []
self.user_id = user_id
self.workspace = workspace
self.vacation_days_per_year = vacation_days_per_year
self.started_on = started_on
def write_to_file(self, path):
cfg = configparser.ConfigParser()
cfg['Authentication'] = {}
cfg['Authentication']['API_KEY'] = self.api_key
with open(path, 'w') as configfile:
cfg.write(configfile)
@classmethod
def read_from_file(cls, path):
cfg = configparser.ConfigParser()
cfg.read(path)
api_key = cfg['Authentication']['API_KEY']
work_hours = cfg['Work Hours']['hours_per_day']
working_days_string = cfg['Work Hours']['working_days']
user_id = cfg['User Info']['id']
try:
started_on = parse_dates_from_config(cfg['User Info']['started_on'])[0]
except Exception:
started_on = datetime.datetime.today()
workspace = cfg['User Info']['workspace']
vacation_days_per_year = cfg['Work Hours']['vacation_days_per_year']
working_days = [int(day) for day in working_days_string.split(',')]
holidays = parse_dates_from_config(cfg['Work Hours']['public_holidays'])
vacation_days = parse_dates_from_config(cfg['Work Hours']['vacation_days'])
return cls(
api_key=api_key, work_hours_per_day=float(work_hours),
public_holidays=holidays,
vacation_days=vacation_days,
working_days=working_days,
user_id=user_id, workspace=workspace,
vacation_days_per_year=vacation_days_per_year,
started_on=started_on
)
| StarcoderdataPython |
3381164 | <reponame>bzg/acceslibre
import dj_database_url
import os
from django.contrib.messages import constants as message_constants
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name, required=True, type=str):
if required:
try:
return type(os.environ[var_name])
except TypeError:
raise ImproperlyConfigured(f"Unable to cast '{var_name}' to {type}.")
except KeyError:
raise ImproperlyConfigured(
f"The '{var_name}' environment variable must be set."
)
else:
return os.environ.get(var_name)
STAGING = False
SITE_NAME = "acceslibre"
SITE_HOST = "acceslibre.beta.gouv.fr"
SITE_ROOT_URL = f"https://{SITE_HOST}"
SECRET_KEY = get_env_variable("SECRET_KEY")
DATAGOUV_API_KEY = get_env_variable("DATAGOUV_API_KEY", required=False)
DATAGOUV_DOMAIN = "https://demo.data.gouv.fr"
DATAGOUV_DATASET_ID = "93ae96a7-1db7-4cb4-a9f1-6d778370b640"
# Security
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
# Maps
MAP_SEARCH_RADIUS_KM = 10
# Mapbox
# Note: this is NOT a sensitive information, as this token is exposed on the frontend anyway
MAPBOX_TOKEN = "<KEY>"
# Notifications
# number of days to send a ping notification after an erp is created but not published
UNPUBLISHED_ERP_NOTIF_DAYS = 7
# Mattermost hook
MATTERMOST_HOOK = get_env_variable("MATTERMOST_HOOK", required=False)
# Sentry integration
SENTRY_DSN = get_env_variable("SENTRY_DSN", required=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = False
# Static files
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, "staticfiles"))
STATIC_URL = "/static/"
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
# Messages
MESSAGE_TAGS = {
message_constants.DEBUG: "debug",
message_constants.INFO: "info",
message_constants.SUCCESS: "success",
message_constants.WARNING: "warning",
message_constants.ERROR: "danger",
}
# Application definition
INSTALLED_APPS = [
"admin_auto_filters",
"django_extensions",
"nested_admin",
"import_export",
"reset_migrations",
"django_admin_listfilter_dropdown",
"compte.apps.CompteConfig",
"erp.apps.ErpConfig",
"subscription.apps.SubscriptionConfig",
"contact.apps.ContactConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
"django.contrib.postgres",
"django.contrib.sitemaps",
"django.contrib.sites",
"corsheaders",
"logentry_admin",
"django_better_admin_arrayfield.apps.DjangoBetterAdminArrayfieldConfig",
"rest_framework",
"rest_framework_gis",
"crispy_forms",
"reversion",
]
def floc_middleware(get_response):
def middleware(request):
response = get_response(request)
response["Referrer-Policy"] = "same-origin"
return response
return middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"core.settings.floc_middleware",
]
SITE_ID = 1
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 50,
}
ROOT_URLCONF = "core.urls"
def expose_site_context(request):
"""Expose generic site related static values to all templates.
Note: we load these values from django.conf.settings so we can retrieve
those defined/overriden in env-specific settings module (eg. dev/prod).
"""
from django.conf import settings
return {
"MAP_SEARCH_RADIUS_KM": settings.MAP_SEARCH_RADIUS_KM,
"MAPBOX_TOKEN": settings.MAPBOX_TOKEN,
"SENTRY_DSN": settings.SENTRY_DSN,
"SITE_NAME": settings.SITE_NAME,
"SITE_HOST": settings.SITE_HOST,
"SITE_ROOT_URL": settings.SITE_ROOT_URL,
"STAGING": settings.STAGING,
}
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"debug": False,
"context_processors": [
"core.settings.expose_site_context",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "core.wsgi.application"
# Database connection
# see https://docs.djangoproject.com/en/3.0/ref/settings/#databases
# see https://doc.scalingo.com/languages/python/django/start#configure-the-database-access
# see https://pypi.org/project/dj-database-url/ for options management
database_url = os.environ.get(
"DATABASE_URL", "postgres://access4all:access4all@localhost/access4all"
)
DATABASES = {"default": dj_database_url.config()}
DATABASES["default"]["ENGINE"] = "django.contrib.gis.db.backends.postgis"
# Default field to use for implicit model primary keys
# see https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Cookie security
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "fr"
TIME_ZONE = "Europe/Paris"
DATETIME_FORMAT = "Y-m-d, H:i:s"
USE_I18N = True
USE_L10N = False
USE_TZ = True
# Crispy forms
CRISPY_TEMPLATE_PACK = "bootstrap4"
# Email configuration (production uses Mailjet - see README)
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
EMAIL_HOST = get_env_variable("EMAIL_HOST")
EMAIL_PORT = get_env_variable("EMAIL_PORT", type=int)
EMAIL_HOST_USER = get_env_variable("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = get_env_variable("EMAIL_HOST_PASSWORD")
DEFAULT_EMAIL = "<EMAIL>"
DEFAULT_FROM_EMAIL = f"L'équipe {SITE_NAME} <{DEFAULT_EMAIL}>"
MANAGERS = [("Acceslibre", DEFAULT_EMAIL)]
EMAIL_FILE_PATH = "/tmp/django_emails"
EMAIL_SUBJECT_PREFIX = f"[{SITE_NAME}]"
EMAIL_USE_LOCALTIME = True
LOGIN_URL = "/compte/login/"
LOGOUT_REDIRECT_URL = "/"
LOGIN_REDIRECT_URL = "/"
ACCOUNT_ACTIVATION_DAYS = 7
EMAIL_ACTIVATION_DAYS = 1
REGISTRATION_OPEN = True
REGISTRATION_SALT = "a4a-registration"
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"compte.auth.EmailOrUsernameModelBackend",
)
# graphviz
GRAPH_MODELS = {
"all_applications": True,
"group_models": True,
}
# Usernames blacklist
USERNAME_BLACKLIST = [
# English/generic
"anon",
"anonym",
"anonymous",
"abuse",
"admin",
"administrator",
"contact",
"deleted",
"error",
"ftp",
"hostmaster",
"info",
"is",
"it",
"list",
"list-request",
"mail",
"majordomo",
"marketing",
"mis",
"press",
"mail",
"media",
"moderator",
"news",
"noc",
"postmaster",
"reporting",
"root",
"sales",
"security",
"ssl-admin",
"ssladmin",
"ssladministrator",
"sslwebmaster",
"security",
"support",
"sysadmin",
"trouble",
"usenet",
"uucp",
"webmaster",
"www",
# French
"abus",
"aide",
"administrateur",
"anonyme",
"commercial",
"courriel",
"email",
"erreur",
"information",
"moderateur",
"presse",
"rapport",
"securite",
"sécurité",
"service",
"signalement",
"television",
"tv",
"vente",
"webmestre",
]
| StarcoderdataPython |
2698 | from django.conf.urls import include, url
from django.views.generic.base import TemplateView
from . import views as core_views
from .category.urls import urlpatterns as category_urls
from .collection.urls import urlpatterns as collection_urls
from .customer.urls import urlpatterns as customer_urls
from .discount.urls import urlpatterns as discount_urls
from .menu.urls import urlpatterns as menu_urls
from .order.urls import urlpatterns as order_urls
from .page.urls import urlpatterns as page_urls
from .product.urls import urlpatterns as product_urls
from .search.urls import urlpatterns as search_urls
from .shipping.urls import urlpatterns as shipping_urls
from .sites.urls import urlpatterns as site_urls
from .staff.urls import urlpatterns as staff_urls
from .taxes.urls import urlpatterns as taxes_urls
# BEGIN :: SoftButterfly Extensions --------------------------------------------
from .brand.urls import urlpatterns as brand_urls
from .widget.slider.urls import urlpatterns as slider_urls
from .widget.banner.urls import urlpatterns as banner_urls
from .widget.scene.urls import urlpatterns as scene_urls
from .widget.benefit.urls import urlpatterns as benefit_urls
from .store.physical_store.urls import urlpatterns as store_urls
from .store.social_network.urls import urlpatterns as social_network_urls
from .store.special_page.urls import urlpatterns as special_page_urls
from .store.bank_account.urls import urlpatterns as bank_account_urls
from .store.footer_item.urls import urlpatterns as footer_item_urls
# END :: SoftButterfly Extensions ----------------------------------------------
urlpatterns = [
url(r'^$', core_views.index, name='index'),
url(r'^categories/', include(category_urls)),
url(r'^collections/', include(collection_urls)),
url(r'^orders/', include(order_urls)),
url(r'^page/', include(page_urls)),
url(r'^products/', include(product_urls)),
url(r'^customers/', include(customer_urls)),
url(r'^staff/', include(staff_urls)),
url(r'^discounts/', include(discount_urls)),
url(r'^settings/', include(
site_urls + social_network_urls
+ special_page_urls + bank_account_urls + footer_item_urls)), # Extensions
url(r'^menu/', include(menu_urls)),
url(r'^shipping/', include(shipping_urls)),
url(r'^style-guide/', core_views.styleguide, name='styleguide'),
url(r'^search/', include(search_urls)),
url(r'^taxes/', include(taxes_urls)),
url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')),
# BEGIN :: SoftButterfly Extensions ----------------------------------------
url(r'^brand/', include(brand_urls)),
url(r'^slider/', include(slider_urls)),
url(r'^banner/', include(banner_urls)),
url(r'^scene/', include(scene_urls)),
url(r'^store/', include(store_urls)),
url(r'^benefit/', include(benefit_urls)),
# END :: SoftButterfly Extensions ------------------------------------------
]
| StarcoderdataPython |
3220451 | from setuptools import setup
setup(name='lipnet',
version='0.1.6',
description='End-to-end sentence-level lipreading',
url='http://github.com/rizkiarm/LipNet',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['lipnet'],
zip_safe=False,
install_requires=[
# 'Keras==2.0.2',
# 'editdistance==0.3.1',
# 'h5py==2.6.0',
# # 'matplotlib==2.0.0',
# 'numpy==1.12.1',
# 'python-dateutil==2.6.0',
# 'scipy==0.19.0',
# 'Pillow==4.1.0',
# # 'tensorflow==1.15.2',
# 'Theano==0.9.0',
# 'nltk==3.2.2',
# 'sk-video==1.1.7',
# 'dlib==19.4.0'
])
| StarcoderdataPython |
3235005 | from typing import Optional
import requests
import asyncio
from fastapi import FastAPI
from pydantic import BaseModel
import urllib
import urllib.parse
import json
import sys
import os
import platform
class Item(BaseModel):
name: str
description: Optional[str] = None
price: float
tax: Optional[float] = None
url="http://127.0.0.1:5700/send_group_msg"
app = FastAPI()
class GroupItem(BaseModel):
message: str
group_id: int
class FriendMessage(BaseModel):
message: str
friend_id: int
@app.post("/")
async def create_item(item: dict):
msg1=item.get("message")
group=item.get("group_id")
if msg1 and (msg1.startswith("来一份涩图") or (msg1.startswith("老鸭粉丝汤"))):
print("收到了请求。")
tiaojian = msg1[5:].strip()
p1, p2 ,p3 = tiaojian.partition("&")#阻止用户自行添加参数
word = urllib.parse.quote(p1)
ree = urllib.request.urlopen('https://api.lolicon.app/setu/v1/?size1200=true&keyword='+word) #从api获取json
de = ree.read().decode() #解码
data = json.loads(de)
code = int(data["code"])
msg = str(data["msg"])
if code == 0:
#quota = (data['quota'])
dlurl = data["data"][0]["url"]
pid = str(data["data"][0]["pid"])
author = str(data["data"][0]["author"])
title = str(data["data"][0]["title"])
tags = str(data["data"][0]["tags"])
print("PID:"+pid+" URL:"+dlurl)
requests.post(url,json={"group_id":group,"message":"PID:"+pid+" 作者:"+author+" 标题:"+title+"\n标签:"+tags+"\nURL:"+dlurl})
requests.post(url,json={"group_id":group,"message":"[CQ:image,file="+dlurl+"]"})
print("完成了请求。")
else:
requests.post(url,json={"group_id":group,"message":"代码:"+str(code)+"\n错误信息:"+msg})
del tiaojian
del word
if msg1=="ver":
requests.post(url,json={"group_id":group,"message":"setu_qqbot(https://github.com/Asankilp/setu-request)\n本机器人基于uvicorn。涩图API为Lolicon API v1(api.lolicon.app)。\n运行环境:\nPython "+sys.version+"\n操作系统:\n"+platform.platform()+" "+platform.version()})
if msg1=="目力":
requests.post(url,json={"group_id":group,"message":"[CQ:record,file=https://asankilp.github.io/muli.mp3]"})
return {}
| StarcoderdataPython |
1789775 | __author__ = 'mikeconlon'
| StarcoderdataPython |
4818878 | <gh_stars>1-10
#!/usr/bin/env python
# Copyright 2015-2017, Institute for Systems Biology.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to parse user generated data
"""
import os
import sys
import pandas as pd
from isb_cgc_user_data.bigquery_etl.extract.gcloud_wrapper import GcsConnector
from isb_cgc_user_data.bigquery_etl.extract.utils import convert_file_to_dataframe
from isb_cgc_user_data.bigquery_etl.load import load_data_from_file
from isb_cgc_user_data.bigquery_etl.transform.tools import cleanup_dataframe
from isb_cgc_user_data.utils.check_dataframe_dups import reject_row_duplicate_or_blank, find_key_column, reject_dup_col_pre_dataframe
from metadata_updates import update_metadata_data_list, insert_metadata_samples, insert_feature_defs_list
def process_user_gen_files(project_id, user_project_id, study_id, bucket_name, bq_dataset, cloudsql_tables, files, config, logger=None):
if logger:
logger.log_text('uduprocessor: Begin processing user_gen files.', severity='INFO')
# connect to the cloud bucket
gcs = GcsConnector(project_id, bucket_name, config, logger=logger)
data_df = pd.DataFrame()
# Collect all columns that get passed in for generating BQ schema later
all_columns = []
# For each file, download, convert to df
for idx, file in enumerate(files):
blob_name = file['FILENAME'].split('/')[1:]
all_columns += file['COLUMNS']
metadata = {
'sample_barcode': file.get('SAMPLEBARCODE', ''),
'case_barcode': file.get('CASEBARCODE', ''),
'project_id': study_id,
'platform': file.get('PLATFORM', ''),
'pipeline': file.get('PIPELINE', ''),
'file_path': file['FILENAME'],
'file_name': file['FILENAME'].split('/')[-1],
'data_type': file['DATATYPE']
}
# download, convert to df
filebuffer = gcs.download_blob_to_file(blob_name)
# Get column mapping
column_mapping = get_column_mapping(file['COLUMNS'])
if idx == 0:
# Reject duplicate and blank features and barcodes. Do before cleanup, because blanks
# will be converted to NANs:
reject_dup_col_pre_dataframe(filebuffer, logger, 'barcode')
data_df = convert_file_to_dataframe(filebuffer, skiprows=0, header=0)
# Reject duplicate and blank features. Do before cleanup, because blanks
# will be converted to NANs:
id_col = find_key_column(data_df, column_mapping, logger, 'sample_barcode')
reject_row_duplicate_or_blank(data_df, logger, 'barcode', id_col)
data_df = cleanup_dataframe(data_df, logger=logger)
data_df.rename(columns=column_mapping, inplace=True)
if metadata['case_barcode'] == '':
# Duplicate samplebarcode with prepended 'cgc_'
data_df['case_barcode'] = 'cgc_' + data_df['sample_barcode']
else:
# Make sure to fill in empty case barcodes
data_df[metadata['case_barcode']][data_df['case_barcode']==None] = 'cgc_' + data_df['sample_barcode'][data_df['case_barcode']==None]
# Generate Metadata for this file
insert_metadata(data_df, metadata, cloudsql_tables['METADATA_DATA'], config)
else:
# convert blob into dataframe
new_df = convert_file_to_dataframe(filebuffer, skiprows=0, header=0)
new_df = cleanup_dataframe(new_df, logger=logger)
new_df.rename(columns=column_mapping, inplace=True)
# Generate Metadata for this file
insert_metadata(new_df, metadata, cloudsql_tables['METADATA_DATA'], config)
# TODO: Write function to check for case barcodes, for now, we assume each file contains SampleBarcode Mapping
data_df = pd.merge(data_df, new_df, on='sample_barcode', how='outer')
# For complete dataframe, create metadata_samples rows
if logger:
logger.log_text('uduprocessor: Inserting into data into {0}.'.format(cloudsql_tables['METADATA_SAMPLES'], severity='INFO'))
data_df = cleanup_dataframe(data_df, logger=logger)
data_df['has_mrna'] = 0
data_df['has_mirna'] = 0
data_df['has_protein'] = 0
data_df['has_meth'] = 0
insert_metadata_samples(config, data_df, cloudsql_tables['METADATA_SAMPLES'])
# Update and create bq table file
temp_outfile = cloudsql_tables['METADATA_SAMPLES'] + '.out'
tmp_bucket = config['tmp_bucket']
gcs.convert_df_to_njson_and_upload(data_df, temp_outfile, tmp_bucket=tmp_bucket)
# Using temporary file location (in case we don't have write permissions on user's bucket?
source_path = 'gs://' + tmp_bucket + '/' + temp_outfile
schema = generate_bq_schema(all_columns)
table_name = 'cgc_user_{0}_{1}'.format(user_project_id, study_id)
load_data_from_file.run(
config,
project_id,
bq_dataset,
table_name,
schema,
source_path,
source_format='NEWLINE_DELIMITED_JSON',
write_disposition='WRITE_APPEND',
is_schema_file=False,
logger=logger)
# Generate feature_defs
feature_defs = generate_feature_defs(study_id, project_id, bq_dataset, table_name, schema)
# Update feature_defs table
insert_feature_defs_list(config, cloudsql_tables['FEATURE_DEFS'], feature_defs)
# Delete temporary files
if logger:
logger.log_text('uduprocessor: Deleting temporary file {0}'.format(temp_outfile), severity='INFO')
gcs = GcsConnector(project_id, tmp_bucket, config, logger=logger)
gcs.delete_blob(temp_outfile)
def get_column_mapping(columns):
column_map = {}
for column in columns:
if 'MAP_TO' in column.keys():
# pandas automatically replaces spaces with underscores, so we will too,
# then map them to provided column headers
column_map[column['NAME'].replace(' ', '_')] = column['MAP_TO']
return column_map
def insert_metadata(data_df, metadata, table, config):
sample_barcodes = list(set([k for d, k in data_df['sample_barcode'].iteritems()]))
sample_metadata_list = []
for barcode in sample_barcodes:
new_metadata = metadata.copy()
new_metadata['sample_barcode'] = barcode
sample_metadata_list.append(new_metadata)
update_metadata_data_list(config, table, sample_metadata_list)
def generate_bq_schema(columns):
obj = []
seen_columns = []
for column in columns:
# If the column has a mapping, use that as its name
if 'MAP_TO' in column.keys():
column['NAME'] = column['MAP_TO']
if column['NAME'] not in seen_columns:
seen_columns.append(column['NAME'])
if column['TYPE'].lower().startswith('varchar'):
type = 'STRING'
elif column['TYPE'].lower() == 'float':
type = 'FLOAT'
else:
type = 'INTEGER'
obj.append({'name': column['NAME'], 'type': type, 'shared_id': column['SHARED_ID']})
return obj
'''
Function to generate a feature def for each feature in the dataframe except SampleBarcode
FeatureName: column name from metadata_samples
BqMapId: bq_project:bq_dataset:bq_table:column_name
'''
def generate_feature_defs(study_id, bq_project, bq_dataset, bq_table, schema):
feature_defs = []
for column in schema:
if column['name'] != 'sample_barcode':
feature_name = column['name']
bq_map = ':'.join([bq_project, bq_dataset, bq_table, column['name']])
if column['type'] == 'STRING':
datatype = 0
else:
datatype = 1
feature_defs.append((study_id, feature_name, bq_map, column['shared_id'], datatype))
return feature_defs
if __name__ == '__main__':
project_id = sys.argv[1]
bucket_name = sys.argv[2]
filename = sys.argv[3]
outfilename = sys.argv[4]
metadata = {
'AliquotBarcode':'AliquotBarcode',
'SampleBarcode':'SampleBarcode',
'CaseBarcode':'CaseBarcode',
'Study':'Study',
'SampleTypeLetterCode':'SampleTypeLetterCode',
'Platform':'Platform'
}
| StarcoderdataPython |
1745958 | <gh_stars>1-10
__version__ = "0.3.3"
from mongogrant.client import Client | StarcoderdataPython |
52936 | #coding=utf-8
#抓取精品课网站中的课程,把有优惠券的课程筛选出来
#第一步:访问ke.youdao.com 获取精品课网页的所有的标签内容,例如:四六级,考研,实用英语...:
#第二步:访问标签页,获取课程详情页的url
#第三步:获取课程详情页需要的信息
#第四步:保存到Excel表中
import requests
import urllib3
import re
import sys
from bs4 import BeautifulSoup
from openpyxl import Workbook
from openpyxl import load_workbook
#抓取标签,"http://ke.youdao.com"
def get_labels(url,label_file):
urllib3.disable_warnings()
resq = requests.get(url,verify=False).text
labels = re.findall(r"href=\"(/tag/\d+)",resq)
valid_labellink=[]
for label in labels:
valid_labellink.append(url+label)
with open(label_file,"w") as fp:
for i in set(valid_labellink):
fp.writelines(i+"\n")
print "labes url get done"
#通过标签抓取课程详情页url
def get_kelink(labellink_file,kelink_file):
valid_kelink=[]
with open(labellink_file) as fp:
for line in fp:
requests.packages.urllib3.disable_warnings()
resq1=requests.get(line.strip(),verify=False).text
ke_urls = re.findall(r"href=\"(https://ke\.youdao\.com/course/detail/\d+)",resq1)
for kelink in ke_urls:
valid_kelink.append(kelink.strip())
with open(kelink_file,"w")as fp1:
for kelink in set(valid_kelink):
fp1.writelines(kelink+"\n")
print "ke url get done"
#爬取课程名称,价格,开课时间,主讲老师
def get_courseinfo(kelink_file):
result=[]
with open(kelink_file) as fp:
for keurl in fp:
urllib3.disable_warnings()
resq2 = requests.get(keurl.strip(),verify=False).text
soup = BeautifulSoup(resq2,'html.parser')
try:
#names = soup.select("div.info.info-without-video > h1")
names = soup.select("div.g-w.body > div > h1")
teachernames = soup.select("div.g-w.body > div > p")
coursetimes = soup.select("div.g-w.body > div > p")
if names!=None and teachernames!=None and coursetimes!=None:
data = {
'name': str(names[0]).strip().strip('<h1>').strip('</h1>'),
'teachername:': str(teachernames[0]).strip().strip("<p>").strip("</p>"),
'coursetimes': str(coursetimes[1]).strip().strip("<p>").strip("</p>"),
'url': keurl.strip()
}
result.append(data)
#print data
else:
print u"有属性为空了,skip skip"
except Exception, e:
print e
return result
#将爬虫下来的内容保存在Excel
def write_excel(filename,result):
wb = load_workbook(filename)
wb.guess_types = True
ws=wb.active
#excel表中有多少行,Excel的行和列是从第一行列开始的
for i in range(1,len(result)+1):
#取result列表中的每个data,每个data为一行
result_item = result[i-1]
#默认从第一列开始
column_num=1
#遍历字典data,每个属性增加一列
for key,value1 in result_item.items():
if value1.strip() != None:
ws.cell(row=i,column=column_num,value=value1)
column_num+=1
wb.save(filename)
def main():
'''
#抓去ke.youdao.com上面的标签
url="https://ke.youdao.com"
label_filename="d:\\label.txt"
get_labels(url,label_filename)
#抓取课程详情页的url
kelink_file="d:\\kelink.txt"
get_kelink(label_filename,kelink_file)
'''
kelink_file = "d:\\kelink.txt"
#抓取课程详细信息
get_courseinfo(kelink_file)
result = get_courseinfo(kelink_file)[1:10]
write_excel("d:\\test.xlsx",result)
if __name__=="__main__":
reload(sys)
sys.setdefaultencoding("utf-8")
main()
| StarcoderdataPython |
3211736 | <gh_stars>1-10
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
import pandas as pd
import numpy as np
import sklearn.linear_model as skl
import matplotlib.pyplot as plt
reg = skl.LinearRegression()
data = pd.read_csv('sleep_quality_data.csv', index_col=0)
x_train = data.as_matrix(['temperature', 'humidity', 'brightness'])[:13]
y_train = data.as_matrix(['sleep quality'])[:13]
reg.fit (x_train, y_train)
# if there is a higher correlation coefficient
# then you want to maximise that variable, and vice versa
fields = ["Temperature", "Humidity", "Room brightness"]
index = 0
for cof in reg.coef_[0]:
suggestion = ""
if cof > 0.5:
suggestion += "increase " + fields[index] + ", "
print suggestion
index += 1
elif cof > 0:
suggestion += "slightly increase " + fields[index] + ", "
print suggestion
index += 1
elif cof < -0.5:
suggestion += "decrease " + fields[index] + ", "
print suggestion
index += 1
elif cof < 0:
suggestion += "slightly decrease " + fields[index] + ", "
print suggestion
index+=1
else:
suggestion += "it's fine " + ", "
print suggestion
index+=1
#print suggestion
x_test = data.as_matrix(['temperature', 'humidity', 'brightness'])[-1:]
#print x_test
predicted_value = reg.predict(x_test)
print predicted_value
# if predicted_value < 3:
# for cof in reg.coef_[0]:
# suggestion = ""
# if cof > 0.5:
# suggestion += "increase " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof > 0:
# suggestion += "slightly increase " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof < -0.5:
# suggestion += "decrease " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof < 0:
# suggestion += "slightly decrease " + fields[index] + ", "
# print suggestion
# index+=1
# else:
# suggestion += "it's fine " + ", "
# print suggestion
# index+=1
# plot data
data.plot(kind='scatter', x='temperature', y='sleep quality')
# plot the least squares line
plt.plot(x_test, predicted_value, c='red', linewidth=2)
#plt.show() | StarcoderdataPython |
1788704 | <gh_stars>1-10
import pandas as pd
from pyfibre.core.base_multi_image_analyser import BaseMultiImageAnalyser
from .multi_images import ProbeMultiImage
class ProbeAnalyser(BaseMultiImageAnalyser):
database_names = ['probe']
def __init__(self, *args, **kwargs):
kwargs['multi_image'] = ProbeMultiImage()
super().__init__(*args, **kwargs)
def create_figures(self, *args, **kwargs):
pass
def create_metrics(self, *args, **kwargs):
pass
def image_analysis(self, *args, **kwargs):
pass
def save_databases(self, databases):
pass
def load_databases(self):
return [pd.DataFrame() for _ in self.database_names]
| StarcoderdataPython |
65618 | <gh_stars>0
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements feature selection algorithms."""
__author__ = ["aiwalter"]
__all__ = ["FeatureSelection"]
import math
import pandas as pd
from sktime.transformations.base import BaseTransformer
from sktime.utils.validation.forecasting import check_regressor
class FeatureSelection(BaseTransformer):
"""Select exogenous features.
Transformer to enable tuneable feauture selection of exogenous data. The
FeatureSelection implements multiple methods to select features (columns).
In case X is a pd.Series, then it is just passed through, unless method="none",
then None is returned in transform().
Parameters
----------
method : str, required
The method of how to select the features. Implemeted methods are:
* "feature-importances": Use feature_importances_ of the regressor (meta-model)
to select n_columns with highest importance values.
Requires parameter n_columns.
* "random": Randomly select n_columns features. Requires parameter n_columns.
* "columns": Select features by given names.
* "none": Remove all columns by setting Z to None.
* "all": Select all given features.
regressor : sklearn-like regressor, optional, default=None.
Used as meta-model for the method "feature-importances". The given
regressor must have an attribute "feature_importances_". If None,
then a GradientBoostingRegressor(max_depth=5) is used.
n_columns : int, optional
Number of feautres (columns) to select. n_columns must be <=
number of X columns. Some methods require n_columns to be given.
random_state : int, RandomState instance or None, default=None
Used to set random_state of the default regressor and to
set random.seed() if method="random".
columns : list of str
A list of columns to select. If columns is given.
Attributes
----------
columns_ : list of str
List of columns that have been selected as features.
regressor_ : sklearn-like regressor
Fitted regressor (meta-model).
n_columns_: int
Derived from number of features if n_columns is None, then
n_columns_ is calculated as int(math.ceil(Z.shape[1] / 2)). So taking
half of given features only as default.
feature_importances_ : dict or None
A dictionary with column name as key and feature imporatnce value as value.
The dict is sorted descending on value. This attribute is a dict if
method="feature-importances", else None.
Examples
--------
>>> from sktime.transformations.series.feature_selection import FeatureSelection
>>> from sktime.datasets import load_longley
>>> y, X = load_longley()
>>> transformer = FeatureSelection(method="feature-importances", n_columns=3)
>>> Xt = transformer.fit_transform(X, y)
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": ["pd.DataFrame", "pd.Series"],
# which mtypes do _fit/_predict support for X?
"y_inner_mtype": "pd.DataFrame", # which mtypes do _fit/_predict support for y?
"fit_is_empty": False,
"transform-returns-same-time-index": True,
"skip-inverse-transform": True,
"univariate-only": False,
}
def __init__(
self,
method="feature-importances",
n_columns=None,
regressor=None,
random_state=None,
columns=None,
):
self.n_columns = n_columns
self.method = method
self.regressor = regressor
self.random_state = random_state
self.columns = columns
super(FeatureSelection, self).__init__()
def _fit(self, X, y=None):
"""Fit transformer to X and y.
private _fit containing the core logic, called from fit
Parameters
----------
X : pd.Series or pd.DataFrame
Data to fit transform to
y : pd.DataFrame, default=None
Additional data, e.g., labels for transformation
Returns
-------
self: a fitted instance of the estimator
"""
self.n_columns_ = self.n_columns
self.feature_importances_ = None
# multivariate X
if not isinstance(X, pd.Series):
if self.method == "feature-importances":
self.regressor_ = check_regressor(
regressor=self.regressor, random_state=self.random_state
)
self._check_n_columns(X)
# fit regressor with X as exog data and y as endog data (target)
self.regressor_.fit(X=X, y=y)
if not hasattr(self.regressor_, "feature_importances_"):
raise ValueError(
"""The given regressor must have an
attribute feature_importances_ after fitting."""
)
# create dict with columns name (key) and feauter importance (value)
d = dict(zip(X.columns, self.regressor_.feature_importances_))
# sort d descending
d = {k: d[k] for k in sorted(d, key=d.get, reverse=True)}
self.feature_importances_ = d
self.columns_ = list(d.keys())[: self.n_columns_]
elif self.method == "random":
self._check_n_columns(X)
self.columns_ = list(
X.sample(
n=self.n_columns_, random_state=self.random_state, axis=1
).columns
)
elif self.method == "columns":
if self.columns is None:
raise AttributeError("Parameter columns must be given.")
self.columns_ = self.columns
elif self.method == "none":
self.columns_ = None
elif self.method == "all":
self.columns_ = list(X.columns)
else:
raise ValueError("Incorrect method given. Try another method.")
return self
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing the core logic, called from transform
Parameters
----------
X : pd.Series or pd.DataFrame
Data to be transformed
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
Xt : pd.Series or pd.DataFrame, same type as X
transformed version of X
"""
# multivariate case
if not isinstance(X, pd.Series):
if self.method == "none":
Xt = None
else:
Xt = X[self.columns_]
# univariate case
else:
if self.method == "none":
Xt = None
else:
Xt = X
return Xt
def _check_n_columns(self, Z):
if not isinstance(self.n_columns_, int):
self.n_columns_ = int(math.ceil(Z.shape[1] / 2))
@classmethod
def get_test_params(cls):
"""Return testing parameter settings for the estimator.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
return {"method": "all"}
| StarcoderdataPython |
3284448 | <gh_stars>0
"""
Script for running simulations to compare the payments of strategic and truthful agents in the continuous effort, biased agents setting.
@author: <NAME> <<EMAIL>>
"""
from numpy import ones
from statistics import mean, median, variance
import json
from setup import initialize_strategic_student_list, shuffle_students, initialize_submission_list
from grading import assign_grades, assign_graders, get_grading_dict
from grading_dmi import assign_graders_dmi_clusters
from mechanisms.baselines import mean_squared_error
from mechanisms.dmi import dmi_mechanism
from mechanisms.phi_divergence_pairing import phi_divergence_pairing_mechanism, parametric_phi_divergence_pairing_mechanism
from mechanisms.output_agreement import oa_mechanism
from mechanisms.parametric_mse import mse_p_mechanism
from mechanisms.peer_truth_serum import pts_mechanism
from evaluation import roc_auc_strategic
from graphing import plot_auc_strategic
import warnings
def run_simulation(num_iterations, num_assignments, strategy_map, mechanism, mechanism_param):
"""
Iteratively simulates semesters, scoring students according to a single mechanism, and recording the values of the relevant evaluation metrics.
Parameters
----------
num_iterations : int.
The number of semesters to simulate.
num_assignments : int.
The number of assignments to include in each simulated semester.
strategy_map: dict.
Maps the name of a strategy to a number of students who should adopt that strategy in each simulated semester.
mechanism : str.
The name of the mechanism to be used to score the students performance in the grading task.
One of the following:
- "BASELINE"
- "DMI"
- "OA"
- "Phi-DIV"
- "PTS"
- "MSE_P"
- "Phi-DIV_P"
mechanism_param : str.
Denotes different versions of the same mechanism, e.g. the choice phi divergence used in the phi divergence pairing mechanism.
"0" for mechanisms that do not require such a parameter.
Returns
-------
score_dict : dict.
score_dict maps the names of evaluation metrics to scores for those metrics.
{
"ROC-AUC Scores": [ score (float)],
"Mean ROC-AUC": mean_auc (float),
"Median ROC-AUC": median_auc (float),
"Variance ROC-AUC": variance_auc (float)
}
"""
score_dict = {}
auc_scores = []
print(" ", mechanism, mechanism_param)
for i in range(num_iterations):
"""
Simulating a "semester"
"""
students = initialize_strategic_student_list(strategy_map)
shuffle_students(students)
#necessary for PTS
H = ones(11)
for assignment in range(num_assignments):
"""
Simulating a single assignment
"""
submissions = initialize_submission_list(students, assignment)
if mechanism == "DMI":
cluster_size = int(mechanism_param)
grader_dict = assign_graders_dmi_clusters(students, submissions, cluster_size)
else:
grader_dict = assign_graders(students, submissions, 4)
grading_dict = get_grading_dict(grader_dict)
#Here is where you can change the number of draws an active grader gets
assign_grades(grading_dict, 3, assignment, True, True)
"""
Non-Parametric Mechanisms
"""
if mechanism == "BASELINE":
num_students = len(students)
mean_squared_error(grader_dict, num_students)
elif mechanism == "DMI":
cluster_size = int(mechanism_param)
dmi_mechanism(grader_dict, assignment, cluster_size)
elif mechanism == "OA":
oa_mechanism(grader_dict)
elif mechanism == "Phi-DIV":
phi_divergence_pairing_mechanism(grader_dict, mechanism_param)
elif mechanism == "PTS":
H = pts_mechanism(grader_dict, H)
"""
Parametric Mechanisms
"""
elif mechanism == "MSE_P":
mu = 7
gamma = 1/2.1
mse_p_mechanism(grader_dict, students, assignment, mu, gamma, True)
elif mechanism == "Phi-DIV_P":
mu = 7
gamma = 1/2.1
parametric_phi_divergence_pairing_mechanism(grader_dict, students, assignment, mu, gamma, True, mechanism_param)
else:
print("Error: The given mechanism name does not match any of the options.")
auc_score = roc_auc_strategic(students)
auc_scores.append(auc_score)
score_dict["ROC-AUC Scores"] = auc_scores
mean_auc = mean(auc_scores)
score_dict["Mean ROC-AUC"] = mean_auc
median_auc = median(auc_scores)
score_dict["Median ROC-AUC"] = median_auc
variance_auc = variance(auc_scores, mean_auc)
score_dict["Variance ROC-AUC"] = variance_auc
return score_dict
def compare_mechanisms(num_iterations, num_assignments, strategy_map, mechanisms):
"""
Iterates over a list of mechanisms, calling run_simulation for each one.
Parameters
----------
num_iterations : int.
The number of semesters to simulate.
num_assignments : int.
The number of assignments to include in each simulated semester.
strategy_map: dict.
Maps the name of a strategy to a number of students who should adopt that strategy in each simulated semester.
mechanisms : list of 2-tuples of strings.
Describes the mechanisms to be included in the form ("mechanism_name", "mechanism_param").
The complete list of possible mechanisms and associated params can be found below in the code for running simulations.
Returns
-------
eval_dict : dict.
Maps the string "mechanism_name: mechanism_param" to a score_dict (returned from the call to run_simulation).
"""
eval_dict = {}
for mechanism, param in mechanisms:
score_dict = run_simulation(num_iterations, num_assignments, strategy_map, mechanism, param)
key = mechanism + ": " + param
eval_dict[key] = score_dict
return eval_dict
def simulate(strategies, mechanisms, filename):
"""
Calls compare_mechanisms iteravely for each strategy, varying the number of strategic graders.
Saves a file containing the results of the experiment and generates and saves a plot of those results.
Results are saved as filename.json in the ./results directory.
Plots are saved as filename-*MECHANISM*.pdf in the ./figures directory.
Parameters
----------
strategies : list of strings.
Describes the strategies ot be included.
The list of all relevant strategies can be found below in the code for running simulations.
mechanisms : list of 2-tuples of strings.
Describes the mechanisms to be included in the form ("mechanism_name", "mechanism_param").
The complete list of possible mechanisms and associated params can be found below in the code for running simulations.
filename : str.
The filename used to save the .json file and .pdf plot associated with the experiment.
Returns
-------
None.
"""
results = {}
for strategy in strategies:
result = {}
print("Working on simulations for the following strategy:", strategy)
for strat in [10, 20, 30, 40, 50, 60, 70, 80, 90]:
strategy_map = {}
print(" Working on simulations for", strat, "strategic students.")
strategy_map[strategy] = strat
strategy_map["TRUTH"] = 100 - strat
evals = compare_mechanisms(100, 10, strategy_map, mechanisms)
result[strat] = evals
results[strategy] = result
"""
Export JSON file of simulation data to results directory
"""
json_file = "results/" + filename + ".json"
with open(json_file, 'w', encoding='utf-8') as f:
json.dump(results, f, ensure_ascii=False, indent=4)
"""
Graphing the results in the figures directory
"""
plot_auc_strategic(results, filename)
if __name__ == "__main__":
"""
Simulations are controlled and run from here.
"""
#Supress Warnings in console
warnings.filterwarnings("ignore")
"""
Uncomment the mechanisms to be included in an experiment.
"""
mechanisms = [
#NON-PARAMETRIC MECHANISMS
#("BASELINE", "MSE"),
#("DMI", "4"),
#("OA", "0"),
#("Phi-DIV", "CHI_SQUARED"),
#("Phi-DIV", "KL"),
#("Phi-DIV", "SQUARED_HELLINGER"),
#("Phi-DIV", "TVD"),
#("PTS", "0"),
#PARAMETRIC MECHANISMS
#("MSE_P", "0"),
#("Phi-DIV_P", "CHI_SQUARED"),
#("Phi-DIV_P", "KL"),
#("Phi-DIV_P", "SQUARED_HELLINGER"),
#("Phi-DIV_P", "TVD"),
]
"""
Uncomment the strategies to be included in an experiment.
Note that uninformative strategies are not considered in this experiment.
"""
strategies = [
#"NOISE",
#"FIX-BIAS",
#"MERGE",
#"PRIOR",
#"ALL10",
#"HEDGE"
]
"""
Change the filename before running a simulation to prevent overwriting previous results.
"""
filename = "truthful_vs_strategic_payments-ce-bias-filename"
"""
The function below runs the experiment.
"""
simulate(strategies, mechanisms, filename)
| StarcoderdataPython |
3387580 | <gh_stars>0
from django.shortcuts import render
from django.views.generic import View
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, JsonResponse
from django.utils import timezone
from .forms import PhotoForm, DeleteForm
from .models import PhotoModel
import os
import shutil
from .effects import double, nigeria, france, usa, kenya, russia
from django.contrib import messages
# Mixins
class LoginRequiredMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
# helper functions
def get_photos(user):
'''returns all photos of a user'''
photos = PhotoModel.objects.filter(owner=user)
return photos
def create_duplicate_file(image_file_path, image_file_url):
'''creates a duplicate file for editting'''
# create temporary file path
root, ext = os.path.splitext(image_file_path)
root = root + '_temp'
temp_file_path = root + ext
# create temporary file url
root, ext = os.path.splitext(image_file_url)
root = root + '_temp'
temp_file_url = root + ext
# create a duplicate of the image file
shutil.copy2(image_file_path, temp_file_path)
return temp_file_path, temp_file_url
def photo_effect(effect, temp_file_path, temp_file_url):
'''applies a photo effect on a temporary file'''
if effect == 'double':
double(temp_file_path)
return_value = temp_file_url
elif effect == 'france':
france(temp_file_path)
return_value = temp_file_url
elif effect == 'kenya':
kenya(temp_file_path)
return_value = temp_file_url
elif effect == 'nigeria':
nigeria(temp_file_path)
return_value = temp_file_url
elif effect == 'russia':
russia(temp_file_path)
return_value = temp_file_url
elif effect == 'usa':
usa(temp_file_path)
return_value = temp_file_url
else:
return_value = None
return return_value
# Views
class HomePageView(View):
'''handles homepage requests'''
template_name = 'photo/index.html'
def get(self, request):
# redirect to user dashboard if user is logged in
if request.user and not request.user.is_anonymous:
return HttpResponseRedirect(reverse('photo:user_home'))
else:
return render(request, self.template_name)
class DashboardView(LoginRequiredMixin, View):
'''displays dashboard'''
template_name = 'photo/dashboard.html'
def get(self, request):
# get the user's social details
social_user = request.user.social_auth.filter(provider='facebook').first()
message = {}
# check if any photo is to be editted
staged_photo = None
if 'photo' in request.GET:
# get object of the photo to be editted
staged_photo = PhotoModel.objects.get(id=request.GET['photo'])
# set original image as image to be displayed
staged_photo.display_image = staged_photo.photo.url
if 'effect' in request.GET:
# create a temporary image to use for editting
staged_photo.temp_file_path, staged_photo.temp_file_url = create_duplicate_file(staged_photo.photo.path, staged_photo.photo.url)
staged_photo.temp_file_url = photo_effect(request.GET['effect'], staged_photo.temp_file_path, staged_photo.temp_file_url)
# set temporary image as image to be displayed
staged_photo.display_image = staged_photo.temp_file_url
message = {'staged_photo': staged_photo.display_image + '? %s' % (timezone.now())}
return JsonResponse(message)
photos = get_photos(request.user)
context = {'social_user': social_user,
'photos': photos,
'staged_photo': staged_photo}
return render(request, self.template_name, context)
class ImageUploadView(LoginRequiredMixin, View):
'''handles image upload'''
def post(self, request):
form = PhotoForm(request.POST, request.FILES)
if form.is_valid():
# set the current user as the owner of the photo before saving the photo
photo = form.save(commit=False)
photo.owner = request.user
photo.save()
return HttpResponseRedirect(reverse('photo:user_home'))
else:
message = 'Error! Both Photo Caption and Select Photo are required'
messages.add_message(request, messages.WARNING, message)
return HttpResponseRedirect(reverse('photo:user_home'))
class DeleteImageView(LoginRequiredMixin, View):
''' handle deletion of images'''
def post(self, request):
form = DeleteForm(request.POST)
if form.is_valid():
photo_id = request.POST['photo_id']
original_photo_path = request.POST['orig']
temporary_photo_path = request.POST['temp']
# delete image from database
photo = PhotoModel.objects.get(pk=photo_id)
photo.delete()
# delete original and temporary images
os.remove(original_photo_path)
# first check if a temporary image exists
if temporary_photo_path != 'None':
os.remove(temporary_photo_path)
message = {'content': 'Photo was deleted successfully', 'status': True}
return JsonResponse(message)
else:
message = {'content': 'Error deleting file', 'status': False}
return JsonResponse(message)
| StarcoderdataPython |
3362559 | class IActionChooser:
def action(self, state, considerExploring):
pass
def get_brain(self):
pass
| StarcoderdataPython |
161422 | <reponame>RobertTownley/stonehenge<filename>stonehenge/components/ui/base.py
from stonehenge.components.component import Component
class UIComponent(Component):
pass
| StarcoderdataPython |
1714879 | <reponame>SalahAdDin/django-oscar-support
from django.db import models
from django.utils.timezone import now as utc_now
from django.utils.translation import ugettext_lazy as _
class ModificationTrackingMixin(models.Model):
date_created = models.DateTimeField(_("Created"), blank=True)
date_updated = models.DateTimeField(_("Last modified"), blank=True)
def save(self, *args, **kwargs):
if not self.date_created:
self.date_created = utc_now()
self.date_updated = utc_now()
super(ModificationTrackingMixin, self).save(*args, **kwargs)
class Meta:
abstract = True
| StarcoderdataPython |
152574 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class GroupPolicyAttachment(pulumi.CustomResource):
"""
Attaches a Managed IAM Policy to an IAM group
"""
def __init__(__self__, __name__, __opts__=None, group=None, policy_arn=None):
"""Create a GroupPolicyAttachment resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not group:
raise TypeError('Missing required property group')
elif not isinstance(group, basestring):
raise TypeError('Expected property group to be a basestring')
__self__.group = group
"""
The group the policy should be applied to
"""
__props__['group'] = group
if not policy_arn:
raise TypeError('Missing required property policy_arn')
elif not isinstance(policy_arn, basestring):
raise TypeError('Expected property policy_arn to be a basestring')
__self__.policy_arn = policy_arn
"""
The ARN of the policy you want to apply
"""
__props__['policyArn'] = policy_arn
super(GroupPolicyAttachment, __self__).__init__(
'aws:iam/groupPolicyAttachment:GroupPolicyAttachment',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'group' in outs:
self.group = outs['group']
if 'policyArn' in outs:
self.policy_arn = outs['policyArn']
| StarcoderdataPython |
1794398 | <filename>niimpy/preprocessing/test_sampledata.py
"""Test that sample data can be opened.
"""
import pandas as pd
import pytest
import niimpy
from niimpy.reading import read
from niimpy.config import config
from niimpy.preprocessing import sampledata
TZ = 'Europe/Helsinki'
@pytest.mark.parametrize("datafile",
['MULTIUSER_AWARE_BATTERY_PATH',
'MULTIUSER_AWARE_SCREEN_PATH',
'GPS_PATH',
'SURVEY_PATH'
])
def test_sampledata_csv(datafile):
"""Test existence of reading of CSV sampledata"""
filename = getattr(config, datafile)
data = niimpy.read_csv(filename, tz=TZ)
assert isinstance(data, pd.DataFrame)
# The index should be set to the times
#assert isinstance(data.index, pd.DatetimeIndex)
@pytest.mark.parametrize("datafile",
['MULTIUSER_AWARE_BATTERY_PATH',
'MULTIUSER_AWARE_SCREEN_PATH',
'GPS_PATH'
])
def test_datetime_index_csv(datafile):
"""Test that those CSV sampledata have datetime index"""
filename = getattr(config, datafile)
data = niimpy.read_csv(filename, tz=TZ)
# The index should be set to the times
assert isinstance(data.index, pd.DatetimeIndex)
@pytest.mark.parametrize("datafile",
['SQLITE_SINGLEUSER_PATH',
'SQLITE_MULTIUSER_PATH',
])
def test_sampledata_sqlite(datafile):
"""Test existence and openining (not reading) of sqlite sampledata"""
filename = getattr(config, datafile)
data = niimpy.open(filename)
assert isinstance(data, niimpy.Data1)
| StarcoderdataPython |
51881 | <reponame>gtnx/chrony<filename>chrony/core.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
def compute_category_index(categories):
return {category: index + 1 for index, category in enumerate(sorted(set(categories)))}
def weighted_interpolate(serie, weights):
sb = serie.fillna(method='ffill')
se = serie.fillna(method='bfill')
cw = weights.cumsum()
w2 = pd.Series(None, index=serie.index)
w2[~np.isnan(serie)] = cw[~np.isnan(serie)]
wb = w2.fillna(method='ffill')
we = w2.fillna(method='bfill')
cw = (cw - wb) / (we - wb)
r = sb + cw * (se - sb)
r.update(serie)
return r
| StarcoderdataPython |
1706259 | <filename>network_aware_heat/network_aware_resources.py<gh_stars>1-10
from heat.engine.properties import Properties
from heat.engine.resources.openstack.nova.server import Server as NovaServer
from oslo_log import log as logging
from heat.engine import properties
LOG = logging.getLogger("heat.engine.resource")
def merge_props(a, b):
c = a.copy()
c.update(b)
return c
class NetworkAwareServer(NovaServer):
OS_EXT_HOST_KEY = 'OS-EXT-SRV-ATTR:host'
EXPECTED_BANDWIDTH = 'expected_bandwidth'
MAXIMUM_LATENCY = 'maximum_latency'
properties_schema = merge_props(NovaServer.properties_schema, {
EXPECTED_BANDWIDTH: properties.Schema(
properties.Schema.LIST,
"Node - Bandwidth (kbps) map",
schema=properties.Schema(
properties.Schema.MAP,
schema={
'to_host': properties.Schema(
properties.Schema.STRING,
'Target host'
),
'bandwidth': properties.Schema(
properties.Schema.NUMBER,
'Expected bandwidth'
),
}
)
),
MAXIMUM_LATENCY: properties.Schema(
properties.Schema.LIST,
'Node - Latency (ms) map',
schema=properties.Schema(
properties.Schema.MAP,
schema={
'to_host': properties.Schema(
properties.Schema.STRING,
'Target host'
),
'latency': properties.Schema(
properties.Schema.NUMBER,
'Expected bandwidth'
),
}
)
)
})
def handle_create(self):
assert isinstance(self.properties, Properties)
if self.properties[self.SCHEDULER_HINTS] is None:
self.properties.data[self.SCHEDULER_HINTS] = {}
if self.properties[self.EXPECTED_BANDWIDTH]:
bandwidth_expectations = self.properties[self.EXPECTED_BANDWIDTH]
LOG.info("GLLS Got bandwidth expectations" + str(bandwidth_expectations))
self.properties.data[self.SCHEDULER_HINTS]['bandwidth_to'] = \
self.create_hints_from_bandwidth_properties(bandwidth_expectations)
if self.properties[self.MAXIMUM_LATENCY]:
maximum_latencies = self.properties[self.MAXIMUM_LATENCY]
LOG.info("GLLS Got maximum latency" + str(maximum_latencies))
self.properties.data[self.SCHEDULER_HINTS]['latency_to'] = \
self.create_hints_from_latency_properties(maximum_latencies)
LOG.info("GLLS " + str(self.properties[self.SCHEDULER_HINTS]))
return super(NetworkAwareServer, self).handle_create()
@classmethod
def create_hints_from_bandwidth_properties(cls, bandwidth_expectations):
return [str(be['bandwidth']) + ',' + be['to_host'] for be in bandwidth_expectations]
@classmethod
def create_hints_from_latency_properties(cls, maximum_latencies):
return [str(be['latency']) + ',' + be['to_host'] for be in maximum_latencies]
def get_attribute(self, key, *path):
if key == "host":
server, data = self.get_live_resource_data()
return data.get(self.OS_EXT_HOST_KEY)
return super(NetworkAwareServer, self).get_attribute(key, *path)
def resource_mapping():
return {
'OS::NetworkAware::Server': NetworkAwareServer,
}
| StarcoderdataPython |
27200 | from cheater import *
from main import *
# new Chain instance with
# mining difficulty = 4
c = Chain(4)
c.createGenesis()
# simulate transactions
c.addBlock(Block("3$ to Arthur"))
c.addBlock(Block("5$ to Bob"))
c.addBlock(Block("12$ to Jean"))
c.addBlock(Block("7$ to Jake"))
c.addBlock(Block("2$ to Camille"))
c.addBlock(Block("13$ to Marth"))
c.addBlock(Block("9$ to Felix"))
# chech chain validity
c.isChainValid()
# fake transaction
cheat(c, 1, "6 to jean")
# check chain validity
c.isChainValid()
# print all blocks
c.printChain()
print("len", len(c.blocks[0].hash) + 15)
| StarcoderdataPython |
3323834 | <filename>src/app/clients.py<gh_stars>0
from datetime import timedelta
from .constants import KIND_MAP
from .db import session
from .models import Message, User
def check_if_message_exists(data):
gap_30_minutes_up = data["scheduled"] + timedelta(minutes=30)
gap_30_minutes_down = data["scheduled"] - timedelta(minutes=30)
return (
session.query(Message)
.filter_by(text=data["text"], kind=data["kind"], user_id=data["user_id"],)
.filter(
Message.scheduled < gap_30_minutes_up,
Message.scheduled > gap_30_minutes_down,
)
.all()
)
def get_user_by_id(user_id):
return session.query(User).get(user_id)
def is_user_ok_to_recieve_this_kind(user_db, data):
if data["kind"] in [KIND_MAP["email"], KIND_MAP["push"]]:
if not user_db.email:
return False
elif data["kind"] in [KIND_MAP["sms"], KIND_MAP["whatsapp"]]:
if not user_db.phone:
return False
return True
| StarcoderdataPython |
1635891 | <gh_stars>0
import os
from dotenv import load_dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from beedare import create_app, create_admin
from beedare import db
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
admin = create_admin(app, db)
migrate = Migrate(app, db) # Migrate instance used for migrating the database
manager = Manager(app) # Manager instance
manager.add_command('db', MigrateCommand)
@manager.command
def test():
from beedare.fill_database import addDataToDB
addDataToDB()
@manager.command
def test_neo4j():
from beedare import neoconn
# conn.create_user(username='jelmer')
# conn.create_dare(code='test')
neoconn.completed_dare(username='jelmer', dare='test')
for dare in neoconn.get_completed_dares('jelmer'):
print(dare)
if __name__ == "__main__":
manager.run()
| StarcoderdataPython |
120904 | <gh_stars>1-10
def B():
n = int(input())
a = [int(x) for x in input().split()]
d = {i:[] for i in range(1,n+1)}
d[0]= [0,0]
for i in range(2*n):
d[a[i]].append(i)
ans = 0
for i in range(n):
a , b = d[i] , d[i+1]
ans+= min(abs(b[0]-a[0])+abs(b[1]-a[1]) , abs(b[0]-a[1])+abs(b[1]-a[0]))
print(ans)
B()
| StarcoderdataPython |
3386836 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``flocker.provision._ssh._monkeypatch``.
"""
from twisted.trial.unittest import SynchronousTestCase as TestCase
from .._ssh._monkeypatch import _patch_7672_needed, patch_7672_applied
class Twisted7672Tests(TestCase):
""""
Tests for ``flocker.provision._ssh._monkeypatch``.
"""
def test_needsPatch(self):
"""
Check to see if patch is still required.
"""
self.assertTrue((not _patch_7672_needed()) or patch_7672_applied,
"Monkeypatch for twisted bug #7672 can be removed.")
| StarcoderdataPython |
3324873 | <filename>tests/pint_units.py<gh_stars>1-10
import importlib.resources as pkg_resources
import random
_RAW_UNIT_DATA = None
def get_units():
global _RAW_UNIT_DATA # pylint: disable=global-statement
if _RAW_UNIT_DATA is None:
unit_data = {}
for raw_row in pkg_resources.open_text(__package__,
'project_haystack_units.txt', encoding='UTF-8'):
row = raw_row.split(',')
if not bool(row):
continue
if row[0].startswith('--'):
continue
if len(row) == 1:
unit_data[row[0]] = row[0]
else:
unit_data[row[0]] = row[1]
_RAW_UNIT_DATA = unit_data
return _RAW_UNIT_DATA.copy()
def get_random_unit():
return random.choice(list(get_units().values()))
| StarcoderdataPython |
1713406 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 13:13:09 2020
@author: praveshj
"""
#Calculating the NMI Score for all four clusterings
import math
import pandas as pd
import numpy as np
#X is a list of lists, where each list correponds to a class
#So to calculate entropy we just take the number in curr list and divide it by total number of elements,
#after apply the formula on this it gives us the final entropy for list
def entropy_list(X):
ele_val =[]
num_elem = 0
for i in X:
ele_val.append(len(i))
num_elem += len(i)
entropy = 0
for i in range(0, len(X)):
entropy -= (ele_val[i]/num_elem)*math.log2(ele_val[i]/num_elem)
return entropy
#To obtain the list of lists, in the format that we need to calculate entropy for each clustering
def get_list_from_file(file):
fin_lis = []
for i in range(0, 8):
X = list(map(int, file.readline().split(',')))
fin_lis.append(X)
return fin_lis
#Conditional Entropy depends on the orginal class of the vector, and the class assigned to it
#So in order to get the conditional entropy, we take true label and assigned label as input
#what we did here is, according to the formula mentrion in the assignment.
def get_conditional_entropy(label, cluster_list):
entropy=0
ele_val = []
num_elem= 0;
for i in cluster_list:
num_elem += len(i)
ele_val.append(len(i))
for i in range(0, len(cluster_list)):
tempE = 0
curr_clust = cluster_list[i]
# print(np.asarray(curr_clust))
for j in range(0, 8):
countClass = 0
if(len(curr_clust) == 0):
print("Error Encountered")
continue;
for k in range(0, len(curr_clust)):
if(label[curr_clust[k]] == j):
countClass+=1;
if(countClass >0): tempE -= (countClass/len(curr_clust))*math.log2(countClass/len(curr_clust));
entropy += (len(curr_clust)/num_elem)*tempE;
return entropy
#The final formula for nmi is applied on three values obtained from above formula
def get_nmi(entropy_class, entropy_clust, conditional_entropy):
return (2*(entropy_class - conditional_entropy)/(entropy_class+entropy_clust))
#The Final NMI are printed for a file with this function
def print_nmi(file, label, entropy_class):
clust_list = get_list_from_file(file)
entropy_clust = entropy_list(clust_list)
conditional_entropy = get_conditional_entropy(label, clust_list)
print(entropy_class, conditional_entropy, entropy_clust)
fin_nmi = get_nmi(entropy_class, entropy_clust, conditional_entropy)
print("NMI for the Given Clustering: ", fin_nmi)
datasetP = pd.read_csv('../data/AllBooks_baseline_DTM_Labelled.csv')
datasetP = datasetP.drop([13,13])
label = datasetP.iloc[:, 0]
#print(label)
#Label preprocessing from the initial dataset. Buddishm_CH12-> Buddish
label = np.asarray(label)
for i in range (0, len(label)):
listName = label[i].split('_')
label[i] = listName[0];
# print(label)
#Mapping the chapter to their numerical class
label[label == 'Buddhism'] = 0
label[label == 'TaoTeChing'] = 1
label[label == 'Upanishad'] = 2
label[label == 'YogaSutra'] = 3
label[label == 'BookOfProverb'] = 4
label[label == 'BookOfEcclesiastes'] = 5
label[label == 'BookOfEccleasiasticus'] = 6
label[label == 'BookOfWisdom'] = 7
#print(label)
class_list =[]
for i in range(0,8):
temp = label[label == i];
#print(temp)
class_list.append(temp)
#print(class_list)
#Calculating the entropy for the given data, it's fixed and not dependent on any clustering
entropy_class = entropy_list(class_list)
# print(entropy_class)
#Final Calculation of NMI Scores for the files saved in earlier tasks
print('NMI SCORES:')
print('For Agglomerative Clustering')
file = open('../../clusters/agglomerative.txt','r');
print_nmi(file, label, entropy_class)
print('For KMeans Clustering')
file = open('../../clusters/kmeans.txt', 'r');
print_nmi(file, label, entropy_class)
print('For Agglomerative Clustering after PCA')
file = open('../../clusters/agglomerative_reduced.txt', 'r');
print_nmi(file, label, entropy_class)
print('For KMeans After PCA')
file = open('../../clusters/kmeans_reduced.txt', 'r');
print_nmi(file, label, entropy_class)
| StarcoderdataPython |
3314937 | '''
Visualize sequences prepared by tools.prepare
Run after running main.py
'''
from tools import dataset
from tools.dataset import Dataset
from tools import prepare
import random
import os
import argparse
from glob import glob
import numpy as np
import cv2
import imageio
from tools import augmentation as augment
from tools.flow import farneback
FPS = 10
def visualize_temperature(visualize_dir, temperature_dir, files_list, clean, augment_fct = None, suffix = ""):
output_dir = os.path.join(visualize_dir, os.path.split(temperature_dir)[1])
if clean:
prepare.remove_dir_tree(output_dir)
prepare.ensure_dir_exists(output_dir)
files = None
if not os.path.exists(files_list):
files = []
for action in dataset.ACTION_LABELS:
generator = glob(os.path.join(temperature_dir, "*", action+"*.npy"))
for sample in random.sample(generator, 3):
_, fn = os.path.split(sample)
files.append(fn)
with open(files_list, 'w+') as handler:
for fn in files:
handler.write(fn+"\n")
else:
with open(files_list, 'r') as handler:
files = handler.read().split("\n")
for name in files:
if len(name) == 0:
files.remove(name)
temperatures = []
for name in files:
fn = glob(os.path.join(temperature_dir,"**",name))[0]
temperature = np.load(fn)
if augment_fct:
temperature = augment_fct(temperature)
temperatures.append(temperature)
grays = []
for temperature in temperatures:
gray = (255 * temperature).astype(np.uint8)
grays.append(gray)
def heatmap(sequence, cv_colormap: int = cv2.COLORMAP_JET):
heatmap_flat = cv2.applyColorMap(
sequence.flatten(), cv_colormap)
return heatmap_flat.reshape(sequence.shape + (3,))
bgrs = []
for gray in grays:
bgr = heatmap(gray)
bgrs.append(bgr)
for idx, bgr in enumerate(bgrs):
fn = files[idx]
gif_fn = fn.split(".")[0] + suffix + ".gif"
with imageio.get_writer(os.path.join(output_dir, gif_fn), mode='I', duration=1/FPS) as writer:
for frame in bgr:
writer.append_data(frame[:, :, ::-1])
def visualize_flow(visualize_dir, flow_dir, temperature_dir, files_list, clean, augment_fct = None, suffix = ""):
output_dir = os.path.join(visualize_dir, os.path.split(flow_dir)[1])
if clean:
prepare.remove_dir_tree(output_dir)
prepare.ensure_dir_exists(output_dir)
files = None
if not os.path.exists(files_list):
files = []
for action in dataset.ACTION_LABELS:
generator = glob(os.path.join(temperature_dir, "*", action+"*.npy"))
for sample in random.sample(generator, 3):
_, fn = os.path.split(sample)
files.append(fn)
with open(files_list, 'w+') as handler:
for fn in files:
handler.write(fn+"\n")
else:
with open(files_list, 'r') as handler:
files = handler.read().split("\n")
for name in files:
if len(name) == 0:
files.remove(name)
temperatures = []
for name in files:
fn = glob(os.path.join(temperature_dir,"**",name))[0]
temperature = np.load(fn)
if augment_fct:
temperature = augment_fct(temperature)
temperatures.append(temperature)
def flow2bgr(flow_frame):
hsv = np.zeros(flow_frame.shape[:-1] + (3,))
mag, ang = cv2.cartToPolar(flow_frame[...,0], flow_frame[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,1] = 255
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
hsv = hsv.astype(np.uint8)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
return bgr
bgrs = []
for temperature in temperatures:
flow = farneback(np.squeeze(temperature))
bgr = np.zeros(flow.shape[:-1] + (3,), dtype=np.uint8)
for idx, frame in enumerate(flow):
bgr[idx] = flow2bgr(frame)
bgrs.append(bgr)
for idx, bgr in enumerate(bgrs):
fn = files[idx]
gif_fn = fn.split(".")[0] + suffix + ".gif"
with imageio.get_writer(os.path.join(output_dir, gif_fn), mode='I', duration=1/FPS) as writer:
for frame in bgr:
writer.append_data(frame[:, :, ::-1])
#write extra one frame to make up for the difference between optical flow and temperature sequences lengths
writer.append_data(np.zeros_like(frame))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--visualize_dir',
type=str,
default="/"+os.path.join("tmps", "visualize"),
help='Where to save visualized sequences.'
)
parser.add_argument(
'--temperature_dir',
type=str,
default="/"+os.path.join("tmps", "cache", "temperature"),
help='Where cached sequences are saved (temperature).'
)
parser.add_argument(
'--flow_dir',
type=str,
default="/"+os.path.join("tmps", "cache", "optical_flow"),
help='Where cached sequences are saved (optical flow).'
)
parser.add_argument(
'--files_list',
type=str,
default="/"+os.path.join("tmps", "filestovisualize.txt"),
help='List of files to visualize saved in, e.g., *.txt; if the list does not exist it will be generated (random 2 samples for each class).'
)
parser.add_argument(
"--clean", action="store_true",
help='Clean the visualize_dir (remove all exisiting files in the directory).'
)
parser.add_argument(
"--augmentation", action="store_true",
help='Visualize augmentation.'
)
FLAGS, unparsed = parser.parse_known_args()
visualize_temperature(FLAGS.visualize_dir, FLAGS.temperature_dir, FLAGS.files_list, FLAGS.clean)
visualize_flow(FLAGS.visualize_dir, FLAGS.flow_dir, FLAGS.temperature_dir, FLAGS.files_list, FLAGS.clean)
if FLAGS.augmentation:
visualize_temperature(FLAGS.visualize_dir, FLAGS.temperature_dir, FLAGS.files_list, FLAGS.clean, augment_fct=augment.random_rotation, suffix="rot")
visualize_flow(FLAGS.visualize_dir, FLAGS.flow_dir, FLAGS.temperature_dir, FLAGS.files_list, FLAGS.clean, augment_fct=augment.random_rotation, suffix="rot")
visualize_temperature(FLAGS.visualize_dir, FLAGS.temperature_dir, FLAGS.files_list, FLAGS.clean, augment_fct=augment.random_flip, suffix="flip")
visualize_flow(FLAGS.visualize_dir, FLAGS.flow_dir, FLAGS.temperature_dir, FLAGS.files_list, FLAGS.clean, augment_fct=augment.random_flip, suffix="flip") | StarcoderdataPython |
133679 | from verifiers import *
from cipher import *
from rwaFiles import *
from colors import *
import getpass
passCounter = 0
idCounter = 0
userID = ''
############################################ Start of Register User ############################################
# Register User Function - userid_pswd.csv
def registerUser():
print("\n\ta) Register User")
print("\tb) Edit User")
print("\tc) Delete User")
print("\td) Retrieve User List")
print("\te) back to previous")
adminInput = input("\n\tSelect option: ")
if adminInput == "a": # a) Register User
tOrF = True
while tOrF == True:
userID = input("\nPlease enter the new user's ID: ") # ask for new user's userID
verifyUserID = checkComma(userID)
if verifyUserID == True:
print("Please do not enter a comma in your ID.")
tOrF = True
else:
tOrF = regUsrIDone(verifyUserID)
tOrF = True
while tOrF == True:
userPswd = input("\nPlease enter the new user's Password: ") # ask for new user's passwd
tOrF = passcheck(userPswd) # check if the passwd is secure
encryptedPass = encrypt(userPswd) # encrypt passwd with Caesar Cipher
tOrF = True
while tOrF == True:
aOrU = input("Enter (a) for admin account and (u) for user account: ") # ask if new account is admin or user account
tOrF = admOrUsr(aOrU)
if aOrU == 'a': # set total Attempts
attempts = 999
else:
attempts = qzSettings(3)
tOrF = True
while tOrF == True:
scrtQn = input("\nPlease enter a secret recovery question.\n>>> ") # ask for secret question
verifyScrtQn = checkComma(scrtQn)
if verifyScrtQn == True:
print("Please do not enter a comma in your question.")
tOrF = True
else:
tOrF = False
tOrF = True
while tOrF == True:
scrtAns = input("\nPlease enter your secret question's answer.\n>>> ") # ask for secret question's answer
verifyScrtAns = checkComma(scrtAns)
if verifyScrtAns == True:
print("Please do not enter a comma in your answer.")
tOrF = True
else:
tOrF = False
# concatenate userID + userPswd + aOrU + Attempts + secret question + secret question's answer'
writeIntoFile = (str(userID)+','+str(encryptedPass)+','+str(aOrU)+','+str(attempts) +',' + str(scrtQn)+','+str(scrtAns))
newUsr(writeIntoFile) # insert userID and passwd into userid_pswd.csv
print("\nRegistration Successful!\n") # print successful
rmvSpaceID()
registerUser() # back to menu
elif adminInput == "b": # b) Edit User Passwd
tOrF = True
passchecker = True
idCounter = 0
while tOrF == True:
userID = input("\nPlease enter the existing user's ID: ") # ask for existing user's userID
tOrF = regUsrIDtwo(userID,'a') and regUsrIDtwo(userID,'u')
if tOrF == False:
print("User exists.")
idCounter += 1
if idCounter == 3 and tOrF == True:
print("You have tried too many times, returning you to previous menu.\n")
startAdmin()
tOrF = True
passCounter = 0
while tOrF == True:
userPswd = input("\nPlease enter the user's Password: ") # ask for the user's passwd
encryptedPass = encrypt(userPswd)
tOrF = usrPswd(userID,encryptedPass) # check if the passwd is correct
passCounter += 1
if passCounter == 3 and tOrF == True: # limit password tries
print("You have tried too many times.")
tOrF = False
if tOrF == False:
registerUser() # back to menu
while passchecker == True :
newUsrPswd = input("\nPlease enter the new password: ") # ask for user's new passwd
passchecker = passcheck(newUsrPswd) # check if the passwd is secure
encryptedPass = encrypt(newUsrPswd)
editPswd(userID,encryptedPass) # insert passwd into correct line in userid_pswd.csv
rmvSpaceID()
print("Password edited successfully.") # print successful
registerUser() # back to menu
elif adminInput == "c": # c) Delete User
tOrF = True
idCounter = 0
while tOrF == True:
userID = input("\nPlease enter the existing user's ID: ") # ask for user's userID to delete
tOrF = regUsrIDtwo(userID,'a') and regUsrIDtwo(userID,'u')
idCounter += 1
if idCounter == 3 and tOrF == True:
print("You have tried too many times, returning you to previous menu.\n")
startAdmin()
if dblConfirm() == True: # ask for user's input to double confirm deletion, if not, back to menu
pass
else:
print("You entered (n), returning you to the menu.")
registerUser()
removeUsr(userID) # delete respective line in userid_pswd.csv
rmvSpaceID()
print("User deleted successfully.") # print successful
registerUser() # back to menu
elif adminInput == "d": # d) Retrieve User List
usrList() # print list of users from userid_pswd.csv
registerUser() # back to menu
elif adminInput == "e": # e) back to previous
startAdmin()
else:
print("You have not entered a valid option") # invalid input
registerUser()
############################################ Start of Quiz Settings ############################################
# Setup Quiz Function & Modules - quiz_settings.csv & quiz_topics.csv
def setupQuiz(): #set quiz timer,
print("\n\ta) Set Quiz Timer")
print("\tb) Set Number of Questions Tested")
print("\tc) Set Total Attempts for Quiz")
print("\td) Create New Quiz Topic")
print("\te) Delete Existing Quiz Topic")
print("\tf) Edit Name of a Topic")
print("\tg) back to previous")
adminInput = input("\n\tSelect option: ")
if adminInput == "a": # a) Set Quiz Timer
tOrF = True
selection = 'Quiz Timer'
while tOrF == True:
qzTimer = input("Please enter the new quiz timer: ") # ask for user's input for quiz timer
if qzTimer.isnumeric():
tOrF = intCheck(float(qzTimer))
else:
print("Please enter a valid integer.") # invalid input
tOrF = True
quizTimer(qzTimer,selection) # update quiz_settings.csv on quiz timer
print("Quiz timer updated successfully.") # print successful
setupQuiz() # back to menu
elif adminInput == "b": # b) Set Number of Questions Tested
tOrF = True
selection = 'No. of Questions'
while tOrF == True:
qzNum = input("Please enter the number of questions to be tested: ") # ask for user's input for number of questions to be tested
if qzNum.isnumeric():
tOrF = intCheck(float(qzNum))
else:
print("Please enter a valid integer.") # invalid input
tOrF = True
quizTimer(qzNum,selection) # update quiz_settings.csv on number of questions to be tested
print("Number of questions updated successfully.") # print successful
setupQuiz() # back to menu
elif adminInput == "c": # c) Set Total Marks for Quiz
tOrF = True
selection = 'Attempts'
while tOrF == True:
qzAtmpt = input("Please enter the total attempts to be set: ") # ask for user's input for total attempts set for the quiz
if qzAtmpt.isnumeric():
tOrF = intCheck(float(qzAtmpt))
else:
print("Please enter a valid integer.") # invalid input
tOrF = True
quizTimer(qzAtmpt,selection) # update quiz_settings.csv on total marks of entire quiz
print("Number of attempts updated successfully.") # print successful
setupQuiz() # back to menu
elif adminInput == "d": # add new module
tOrF = True
while tOrF == True:
newTopic = input("Enter the new topic name: ") # ask for existing module name
checkModule = checkTopicOne(newTopic) # check if module actually existing
if checkModule == False: # if module don't exist, check for comma
verifyNewTopic = checkComma(newTopic)
if verifyNewTopic == True or len(newTopic) == 0: # if got comma, print error message
print("Please do not enter (, or | or &) in your topic name.") # invalid input
tOrF = True
else: # if don't have comma, continue on to
tOrF = False
addNewTopic(newTopic) # add new module in
print("\n[SUCCESS] Topic successfully created. Sending you back to the menu.")
setupQuiz() #back to this menu
else: # elif module already exist, print error msg
print("\n[ERROR] Topic already exists, sending you back to the menu.")
setupQuiz() # back to this menu
elif adminInput == "e": # delete existing module
tOrF = False
while tOrF == False:
# ask for user's input on question to delete
moduleToDel = input("\nPlease enter the topic to delete: ")
tOrF = checkTopicOne(moduleToDel) # check if module exists, if not, print does not exist and ask again
if dblConfirm() == True: # ask for user's input to double confirm deletion, if not, back to menu
pass
else:
print("You entered (n), returning you to the menu.")
setupQuiz()
delTopic(moduleToDel) # delete selected question in question_pool.csv
rmvSpaceTopic()
print("Topic deleted successfully.") # print successful
setupQuiz()
elif adminInput == "f": # edit name of existing module
tOrF = False
while tOrF == False:
# ask for user's input on question to delete
moduleToEdit = input("\nPlease enter the topic to edit: ")
tOrF = checkTopicOne(moduleToEdit) # check if module exists, if not, print does not exist and ask again
while tOrF:
nameToChg = input("What do you want to change the name to? ") # ask user for change
verifyNameToChg = checkComma(nameToChg)
if verifyNameToChg == True or len(nameToChg) == 0:
print("Please do not enter (, or | or &) in your topic name.")
tOrF = True
else:
tOrF = False
if dblConfirm() == True: # ask for user's input to double confirm deletion, if not, back to menu
pass
else:
print("You entered (n), returning you to the menu.")
setupQuiz()
# if yes, write new name into csv file
editTopicName(moduleToEdit,nameToChg)
editQuesPoolTopics(moduleToEdit,nameToChg)
editQuizzesTopics(moduleToEdit,nameToChg)
rmvSpaceQn()
rmvSpaceTopic()
print("Module updated successfully.")
setupQuiz()
elif adminInput == "g": # d) back to previous
startAdmin()
else:
print("You have not entered a valid option") # invalid input
setupQuiz()
############################################ Start of Define Options ############################################
# Define Options Functions - question_pool.csv
def defineOptions():
print("\n\ta) Add Question")
print("\tb) Edit Question")
print("\tc) Delete Question")
print("\td) Add Quiz")
print("\te) Delete Quiz")
print("\tf) Retrieve Question List")
print("\tg) back to previous")
adminInput = input("\n\tSelect option: ")
if adminInput == "a": # a) Add Question
tOrF = True
while tOrF == True:
newQn = input("Enter the new question: ") # ask for user's input on new question
checkQn = regQnone(newQn) # check if question already exist, if exist, print already exist
if checkQn == False: # if question doesn't exist, check for comma
verifyNewQn = checkComma(newQn)
if verifyNewQn == True: # if got comma, print error message
print("Please do not enter a comma in your question.")
tOrF = True
else: # if don't have comma, continue on to
tOrF = False
else:
tOrF = True
# ask for question options
tOrF = True
while tOrF:
newQnA = input("Enter the first option a) ") # ask for question's first option
verifyNewQnA = checkComma(newQnA)
if verifyNewQnA == True: # if got comma, print error message
print("Please do not enter a comma in your question.")
tOrF = True
else: # if don't have comma, continue on to
tOrF = False
tOrF = True
while tOrF:
newQnB = input("Enter the second option b) ") # ask for question's first option
verifyNewQnB = checkComma(newQnB)
if verifyNewQnB == True: # if got comma, print error message
print("Please do not enter a comma in your question.")
tOrF = True
else: # if don't have comma, continue on to
tOrF = False
tOrF = True
while tOrF:
newQnC = input("Enter the third option c) ") # ask for question's first option
verifyNewQnC = checkComma(newQnC)
if verifyNewQnC == True: # if got comma, print error message
print("Please do not enter a comma in your question.")
tOrF = True
else: # if don't have comma, continue on to
tOrF = False
tOrF = True
while tOrF:
newQnD = input("Enter the fourth option d) ") # ask for question's first option
verifyNewQnD = checkComma(newQnD)
if verifyNewQnD == True: # if got comma, print error message
print("Please do not enter a comma in your question.")
tOrF = True
else: # if don't have comma, continue on to
tOrF = False
newQnAns = qnAnsOption() # ask for question answer
#what topic do you want to add this question to?
questionFormat = (f'{newQn},{newQnA},{newQnB},{newQnC},{newQnD},{newQnAns},{chooseTopic()}')# concatenate question answers and options together
addQuestion(questionFormat) # write question,a,b,c,d,ans into question_pool.csv
print("Question added successfully.") # print successful
defineOptions() # back to menu
elif adminInput == "b": # b) Edit Question
tOrF = True
options = True
while tOrF == True:
qnToEdit = input("\nPlease enter the question to edit: ") # ask for user's input on question to edit
tOrF = regQntwo(qnToEdit) # check if question exists, if not, print does not exist
while options == True:
qnOpToEdit = input("What do you want to edit (qn|a|b|c|d|ans)? ") # ask for user's input on whether to change question,a,b,c,d, or ans
if qnOpToEdit == 'qn':
tOrF = True
while tOrF:
qnOpChg = input("What do you want to change it to? ") # ask user for change
verifyQnOpChg = checkComma(qnOpChg)
if verifyQnOpChg == True:
print("Please do not enter a comma in your question.")
tOrF = True
else:
tOrF = False
editQn(qnToEdit,qnOpChg,0) # write changes into question_pool.csv
options = False
elif qnOpToEdit == 'a':
tOrF = True
while tOrF:
qnOpChg = input("What do you want to change it to? ") # ask user for change
verifyQnOpChg = checkComma(qnOpChg)
if verifyQnOpChg == True:
print("Please do not enter a comma in your option.") # if input has comma, prompt for change again
tOrF = True
else:
tOrF = False
editQn(qnToEdit,qnOpChg,1) # write changes into question_pool.csv
options = False
elif qnOpToEdit == 'b':
tOrF = True
while tOrF:
qnOpChg = input("What do you want to change it to? ") # ask user for change
verifyQnOpChg = checkComma(qnOpChg)
if verifyQnOpChg == True:
print("Please do not enter a comma in your option.") # if input has comma, prompt for change again
tOrF = True
else:
tOrF = False
editQn(qnToEdit,qnOpChg,2) # write changes into question_pool.csv
options = False
elif qnOpToEdit == 'c':
tOrF = True
while tOrF:
qnOpChg = input("What do you want to change it to? ") # ask user for change
verifyQnOpChg = checkComma(qnOpChg)
if verifyQnOpChg == True:
print("Please do not enter a comma in your option.") # if input has comma, prompt for change again
tOrF = True
else:
tOrF = False
editQn(qnToEdit,qnOpChg,3) # write changes into question_pool.csv
options = False
elif qnOpToEdit == 'd':
tOrF = True
while tOrF:
qnOpChg = input("What do you want to change it to? ") # ask user for change
verifyQnOpChg = checkComma(qnOpChg)
if verifyQnOpChg == True:
print("Please do not enter a comma in your option.") # if input has comma, prompt for change again
tOrF = True
else:
tOrF = False
editQn(qnToEdit,qnOpChg,4) # write changes into question_pool.csv
options = False
elif qnOpToEdit == 'ans': # ask user for change
qnOpChg = abcdOption() # verify the input is (a) to (d) only
editQn(qnToEdit,qnOpChg,5) # write changes into question_pool.csv
options = False
else:
print("Please input a valid options.") # invalid input
options = True
print("Successful!") # print successful
rmvSpaceQn()
defineOptions() # back to menu
elif adminInput == "c": # c) Delete Question
tOrF = True
while tOrF == True:
# ask for user's input on question to delete
qnToDel = input("\nPlease enter the question to delete: ")
tOrF = regQntwo(qnToDel) # check if question exists, if not, print does not exist and ask again
if dblConfirm() == True: # ask for user's input to double confirm deletion, if not, back to menu
pass
else:
print("You entered (n), returning you to the menu.")
defineOptions()
removeQn(qnToDel) # delete selected question in question_pool.csv
rmvSpaceQn()
print("Question deleted successfully.") # print successful
defineOptions() # back to menu
elif adminInput == 'd': # add quiz
school = {'course':'DISM','module':'Beginner Math'}
tOrF = True
while tOrF == True:
newQn = input("\nEnter the new quiz name: ") # ask for user's input on new quiz name
if len(newQn) == 0 :
print("Please enter a proper quiz name")
tOrF = True
else:
checkQn = checkQuizOne(newQn) # check if quiz already exist, if exist, print already exist
if checkQn == False: # if quiz doesn't exist, check for comma
verifyNewQn = checkComma(newQn)
if verifyNewQn == True: # if got comma, print error message
print("Please do not enter (, or | or &) in your quiz name.")
tOrF = True
else: # if don't have comma, continue on to
tOrF = False
else:
print("Quiz name already exists. Try again.")
tOrF = True
def addNewTopics():
topics = 0
vessel = []
tOrF = True
print(styleStr('\nInclude minimally 2 different topics in the quiz.',rgb=(255,255,0)))
while tOrF:
# ask for 1st topic to include inside
print("\nWhich topic do you want to include in the quiz?")
firstTopic = chooseTopic()
# ask for how many questions to add, MUST VERIFY IT IS A VALID INPUT
integer = True
while integer:
noOfQuestions = input("How many questions do you want to add (0-60)? ")
integer = intCheck(float(noOfQuestions))
vessel.append(f"{firstTopic},{noOfQuestions}") # <--------------------------------------------
topics += 1
if topics > 1:
# ask to stop or add more topics
newLine = ''
for a in vessel:
aSplit = a.split(sep=',')
vessel.remove(a)
for b in vessel:
bSplit = b.split(sep=',')
if bSplit[0] == aSplit[0]:
vessel.remove(b)
newCount = str(int(bSplit[1]) + int(aSplit[1]))
vessel.insert(0,f"{aSplit[0]},{newCount}")
break
else:
vessel.insert(0,a)
for i in vessel:
iSplit = i.split(sep=',')
newLine += f"&&{iSplit[0]},{iSplit[1]}"
if len(vessel) == 2:
tester = True
while tester:
moreTopics = input("\nDo you want to add more topics (y|n): ")
# if stop:
if moreTopics == "n":
# could strip and add the list items into a string before returning
return newLine
# elif add more topics
elif moreTopics == "y":
tester = False
tOrF = True
else:
print("\nInvalid input, try again.")
tester = True
newTopics = addNewTopics()
newQuizString = f"{school['course']}|{school['module']}|{newQn}|{newTopics}"
regNewQuiz(newQuizString)
print("Registration successful!")
rmSpaceQuiz()
defineOptions()
elif adminInput == "e": # delete quiz
tOrF = True
while tOrF == True:
newQn = input("Enter the quiz name to delete (case-sensitive): ") # ask for user's input on new quiz name
checkQn = checkQuizOne(newQn) # check if quiz already exist, if exist, print already exist
if checkQn == True:
if dblConfirm() == True: # ask for user's input to double confirm deletion, if not, back to menu
deleteQuiz(newQn)
defineOptions()
else:
print("You entered (n), returning you to the menu.")
defineOptions()
# back to menu
else:
print("Quiz doesn't exist, sending you back to the menu.")
tOrF = True
# send back to menu
defineOptions()
elif adminInput == "f": # d) Retrieve Question List
qnList() # print list of questions only from question_pool.csv
defineOptions() # back to menu
elif adminInput == "g": # e) back to previous
startAdmin()
else: # invalid input
print("You have not entered a valid option")
defineOptions()
############################################ Plus Minus Attempts ############################################
def plusMinusA():
global userID
print("\n\ta) Reset All Attempts")
print("\tb) Plus 1 Attempt to all users")
print("\tc) Minus 1 Attempt to all users")
print("\td) Set to unlimited attempts")
print("\te) back to previous")
#Filter Input
adminInput = input("\n\tSelect option: ")
if adminInput == "a": # a) Reset All Attempts
resetAttempt()
print("Successfully reset all attempts to users.")
plusMinusA()
elif adminInput == "b": # b) Plus 1 Attempt to all users
plusAttempt()
print("Successfully add 1 attempt to users.")
plusMinusA()
elif adminInput == "c": # c) Minus 1 Attempt to all users
minusAttempt()
print("Successfully minus 1 attempt to users.")
plusMinusA()
elif adminInput == "d": # d) Set to unlimited attempts
unlimitedAttempt()
print("Successfully set attempts to unlimited.")
plusMinusA()
elif adminInput == "e": # e) back to previous
startAdmin()
else: # invalid input
print("You have not entered a valid option.")
plusMinusA()
############################################ Start of Generating Report ############################################
# Generate Report Function - quiz_results.csv
def genReport():
print("\n\ta) Retrieve Report of Specific Quiz")
print("\tb) Retrieve Report of All")
print('\tc) Back to previous menu')
userInput = input("\n\tSelect Option: ")
if userInput == "a":
selectedQuiz = chooseQuiz()
if selectedQuiz == False:
print("\nNo existing quiz currently.")
genReport()
resultsList = ''
resultsList = quizResults(selectedQuiz)
r = 0
for i in resultsList:
r += 1
#resultsList +=str(r)+'. ' + i + '\n'
print(str(r)+'. ' + i)
genReport()
elif userInput == 'b':
resultList() # get quiz results from quiz_results.csv
genReport() # return back to menu
elif userInput == 'c':
startAdmin()
else: # invalid input
print("\n\tInvalid input.")
genReport() # return back to menu
############################################ Start of Admin Program ##########################################
def startAdmin():
#Provide Options
print("\na) User Functions")
print("b) Define various options")
print("c) Setup the pool of quiz questions")
print("d) Plus Minus Attempts")
print("e) Retrieve Results")
print("f) Exit")
#Filter Input
adminInput = input("\nSelect option: ")
if adminInput == "a": # a) Register User
registerUser()
elif adminInput == "b": # b) Define various options
setupQuiz()
elif adminInput == "c": # c) Setup the pool of quiz questions
defineOptions()
elif adminInput == "d": # d) Plus Minus Attempts
plusMinusA()
elif adminInput == "e": # e) Retrieve Results
genReport()
elif adminInput == "f": # f) Exit
exit()
else: # invalid input
print("You have not entered a valid option.")
startAdmin()
############################################ Start of Admin Login ##########################################
def adminLogin():
global userID
global idCounter
global passCounter
idCount = True
tOrF = True
print("<<<Welcome to Admin Login Page>>> ") # print welcome message
while tOrF == True and idCount == True:
userID = input("\nPlease enter the admin user's ID: ") # check if userID exists, if not, ask again
tOrF = adminORuser(userID)
if tOrF == True:
print("Invalid admin user")
idCounter += 1
if idCounter == 3 and tOrF == True: # limit id tries
print("You have tried too many times.")
exit()
tOrF = True
passCount = True
while tOrF == True and passCount == True:
userPswd = getpass.getpass("\nPlease enter the admin user's Password: ") # ask for password
# encrypt with Caesar cipher and check it against userid_pswd.csv, if it's not a match, ask to re-enter
encryptedPass = encrypt(userPswd)
tOrF = usrPswd(userID,encryptedPass)
passCounter += 1
if passCounter == 3 and tOrF == True: # limit password tries
print("You have tried too many times.")
passCount = False
exit()
if tOrF == False:
startAdmin() # if admin login successful, move onto options
adminLogin()
| StarcoderdataPython |
1783795 | #!/usr/bin/python3
import subprocess, os, sys, time, atexit, signal, psutil, datetime
if len(sys.argv) < 3:
print("Usage: watcher.py <directories and/or files to watch, comma separated> <command to terminate and repeat> <optional \"forever\">")
sys.exit(1)
forever = False
if len(sys.argv) > 3 and sys.argv[3] == "forever":
forever = True
watch = sys.argv[1].split(",")
print("COMMAND: " + sys.argv[2])
process = subprocess.Popen(sys.argv[2], shell=True, stdout=sys.stdout, stderr=sys.stderr)
done = False
# "&&" within the command spawns children. Must vanquish all.
def stop_process():
if done:
return
temp = psutil.Process(process.pid)
for proc in temp.children(recursive=True):
proc.kill()
temp.kill()
atexit.register(stop_process)
filetime = {}
def file_changed(file):
mtime = os.stat(file).st_mtime
if not file in filetime:
filetime[file] = mtime
elif filetime[file] != mtime:
filetime[file] = mtime
print("CHANGE" + " (" + str(datetime.datetime.now()) + "): " + file)
return True
return False
def dir_changed(dir):
for file in os.listdir(dir):
if os.path.isfile(dir + file):
if not file.endswith(".swp") and file_changed(dir + file):
return True
elif os.path.isdir(dir + file) and file != "index":
if dir_changed(dir + file):
return True
return False
def any_changed():
for item in watch:
if os.path.isfile(item):
if file_changed(item):
return True
elif os.path.isdir(item):
if dir_changed(item):
return True
else:
filetime[item] = 0
return False
print("WATCHING FOR CHANGES (" + str(datetime.datetime.now()) + "): " + sys.argv[1])
def restart():
global process
if process:
stop_process()
print("COMMAND: " + sys.argv[2])
process = subprocess.Popen(sys.argv[2], shell=True, stdout=sys.stdout, stderr=sys.stderr)
while True:
changed = False
while not changed:
if process and process.poll() is not None and not done:
if forever:
process = None
restart()
else:
done = True
print("DONE" + " (" + str(datetime.datetime.now()) + "), WATCHING FOR CHANGES: " + sys.argv[1])
time.sleep(1)
if any_changed():
restart()
done = False
| StarcoderdataPython |
136686 | <reponame>Robin5605/site<filename>pydis_site/apps/content/tests/test_utils.py<gh_stars>100-1000
from pathlib import Path
from django.http import Http404
from pydis_site.apps.content import utils
from pydis_site.apps.content.tests.helpers import (
BASE_PATH, MockPagesTestCase, PARSED_CATEGORY_INFO, PARSED_HTML, PARSED_METADATA
)
class GetCategoryTests(MockPagesTestCase):
"""Tests for the get_category function."""
def test_get_valid_category(self):
result = utils.get_category(Path(BASE_PATH, "category"))
self.assertEqual(result, {"title": "Category Name", "description": "Description"})
def test_get_nonexistent_category(self):
with self.assertRaises(Http404):
utils.get_category(Path(BASE_PATH, "invalid"))
def test_get_category_with_path_to_file(self):
# Valid categories are directories, not files
with self.assertRaises(Http404):
utils.get_category(Path(BASE_PATH, "root.md"))
def test_get_category_without_info_yml(self):
# Categories should provide an _info.yml file
with self.assertRaises(FileNotFoundError):
utils.get_category(Path(BASE_PATH, "tmp/category/subcategory_without_info"))
class GetCategoriesTests(MockPagesTestCase):
"""Tests for the get_categories function."""
def test_get_root_categories(self):
result = utils.get_categories(BASE_PATH)
info = PARSED_CATEGORY_INFO
categories = {
"category": info,
"tmp": info,
"not_a_page.md": info,
}
self.assertEqual(result, categories)
def test_get_categories_with_subcategories(self):
result = utils.get_categories(Path(BASE_PATH, "category"))
self.assertEqual(result, {"subcategory": PARSED_CATEGORY_INFO})
def test_get_categories_without_subcategories(self):
result = utils.get_categories(Path(BASE_PATH, "category/subcategory"))
self.assertEqual(result, {})
class GetCategoryPagesTests(MockPagesTestCase):
"""Tests for the get_category_pages function."""
def test_get_pages_in_root_category_successfully(self):
"""The method should successfully retrieve page metadata."""
root_category_pages = utils.get_category_pages(BASE_PATH)
self.assertEqual(
root_category_pages, {"root": PARSED_METADATA, "root_without_metadata": {}}
)
def test_get_pages_in_subcategories_successfully(self):
"""The method should successfully retrieve page metadata."""
category_pages = utils.get_category_pages(Path(BASE_PATH, "category"))
# Page metadata is properly retrieved
self.assertEqual(category_pages, {"with_metadata": PARSED_METADATA})
class GetPageTests(MockPagesTestCase):
"""Tests for the get_page function."""
def test_get_page(self):
# TOC is a special case because the markdown converter outputs the TOC as HTML
updated_metadata = {**PARSED_METADATA, "toc": '<div class="toc">\n<ul></ul>\n</div>\n'}
cases = [
("Root page with metadata", "root.md", PARSED_HTML, updated_metadata),
("Root page without metadata", "root_without_metadata.md", PARSED_HTML, {}),
("Page with metadata", "category/with_metadata.md", PARSED_HTML, updated_metadata),
("Page without metadata", "category/subcategory/without_metadata.md", PARSED_HTML, {}),
]
for msg, page_path, expected_html, expected_metadata in cases:
with self.subTest(msg=msg):
html, metadata = utils.get_page(Path(BASE_PATH, page_path))
self.assertEqual(html, expected_html)
self.assertEqual(metadata, expected_metadata)
def test_get_nonexistent_page_returns_404(self):
with self.assertRaises(Http404):
utils.get_page(Path(BASE_PATH, "invalid"))
| StarcoderdataPython |
57677 | <reponame>iamgroot42/opacus
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import hypothesis.strategies as st
import torch
import torch.nn as nn
from hypothesis import given, settings
from .common import GradSampleHooks_test
class GroupNorm_test(GradSampleHooks_test):
"""
We only test the case with ``affine=True`` here, because it is the only case that will actually
compute a gradient. There is no grad_sample from this module otherwise.
"""
@given(
N=st.integers(1, 4),
C=st.integers(1, 8),
H=st.integers(5, 10),
W=st.integers(4, 8),
num_groups=st.sampled_from([1, 4, "C"]),
)
@settings(deadline=10000)
def test_3d_input_groups(
self,
N: int,
C: int,
H: int,
W: int,
num_groups: Union[int, str],
):
if num_groups == "C":
num_groups = C
if C % num_groups != 0:
return
x = torch.randn([N, C, H, W])
norm = nn.GroupNorm(num_groups=num_groups, num_channels=C, affine=True)
self.run_test(x, norm, batch_first=True)
| StarcoderdataPython |
1639861 | #!/usr/bin/env python3.6
from user import User
from credential import Credential
def create_user(fname, lname, email, Password):
'''
function to create a new user
'''
new_user = User(fname, lname, email, Password)
return new_user
def save_users(user):
'''
function to save user
'''
user.save_user()
def del_user(user):
'''
function to delete a user
'''
user.delete_user()
def find_user(lname):
'''
function to find user
'''
return User.find_by_last_name()
def check_existing_user(lname):
'''
checking if user exists
'''
return User.user_exists(lname)
def display_user():
'''
function to return the saved user
:return:
'''
return User.display_user()
def login_auth(last_name, password):
'''
function to authorise login
'''
return User.check_user(last_name, password)
# ________________________________CREDENTIALS_______________________________________
def create_credential(site_name, userName, password, emailUsed):
'''
function to create new credential
:param site_name:
:param userName:
:param password:
:param emailUsed:
:return:
'''
new_credential = Credential(site_name, userName, password, emailUsed)
return new_credential
def save_credential(credential):
'''
function to save credential created
'''
credential.save_credential()
def del_credential(credential):
'''
function to delete credential
'''
credential.delete_credential()
def find_credential(name):
'''
function that finds credentials by site name and returns the credential
'''
return Credential.find_by_name(name)
def check_credential_exists(name):
'''
function that checks if the credential exists with that site name
:return: boolean
'''
return Credential.credential_exist(name)
def displaytarget_credential(name):
return Credential.find_by_name(name)
def display_credential():
'''
function to display all credentials
'''
return Credential.display_credential()
def main():
print("Hello Welcome to Passlock.What is your name?")
user_name = input()
print(f"Hello {user_name}.to sign up to Passlock create an account.")
print('\n')
while True:
print("Use these short codes to navigate the app :\n cu -> Sign up.\n da -> Display your account.\n ln -> "
"Login.\n ex ->exit Passlock.")
short_code = input().lower()
if short_code == 'cu':
print("Create a new Passlock Account")
print("-" * 100)
print("First name ....")
f_name = input()
print("Last name....")
l_name = input()
print("Email address ...")
e_address = input()
print("Your Password .....")
print("*" * 40)
pwd = input()
save_users(create_user(f_name, l_name, e_address, pwd))
print('\n')
print(f" A New account for {f_name}{l_name} created")
print(f"You can now login to your account {l_name} using your Password.")
print('\n')
elif short_code == 'da':
if display_user():
print("Here is your account Details ")
print('\n')
for user in display_user():
print(f"Full name:{user.first_name} {user.last_name} user name:{user.last_name} Email {user.email}")
print('\n')
else:
print('\n')
print("You currently dont have an account yet. Sign up to create user account")
print('\n')
elif short_code == "ln":
print("please enter your last name")
lname = input()
print("\n Enter your Password")
pwd = input()
if check_existing_user(lname):
if login_auth(lname, pwd):
while True:
print('_' * 60)
print(f"login successful .{lname} you are logged in")
print('_' * 60)
print('''
USE THESE SHORT CODES TO NAVIGATE THE APP:
cc -> Create Credential.
dc -> Display your credentials.
dltc -> Delete credential.
lut->Log out of your account.''')
short_code = input().lower()
if short_code == "cc":
print("Create new Credential")
print('-' * 20)
print("Site name:...")
site_name = input()
print(f"{site_name} userName:....")
usr_name = input()
print(f"{site_name} password:..")
print('*' * 20)
password = input()
print(f"{site_name} Email used:")
email = input()
save_credential(create_credential(site_name, usr_name, password, email))
print('\n')
print(f"A new {site_name} credential has been created.")
print('\n')
elif short_code == "dc":
if display_credential():
print("Here are credentials")
print('\n')
for credential in display_credential():
print(
f"Site: {credential.site_name} username:{credential.userName} Password:{<PASSWORD>} ")
print('\n')
else:
print('\n')
print("You currently don't have any credentials to display")
print('/n')
elif short_code == "dltc":
print("Enter the site name for the credential you want to delete!")
site = input()
sitefound = displaytarget_credential(site)
if check_credential_exists(site):
del_credential(sitefound)
print("Credentials for this site have been deleted ")
else:
print("Site not found")
elif short_code == "lut":
print('\n')
print(f"you have logged out of your {lname} account")
print('\n')
break
else:
print("Wrong Password or user name Please try again!")
else:
print("THIs user is currently none existent Please up first")
elif short_code == "ex":
print(f"Thanks {user_name} and feel free to recomend our services to your friends ")
break
else:
print("I really didnt get that.Please use the short codes")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3395077 | import shutil
from os import path
from pathlib import Path
from embers.rf_tools.align_data import (plot_savgol_interp, save_aligned,
savgol_interp)
# Save the path to this directory
dirpath = path.dirname(__file__)
# Obtain path to directory with test_data
test_data = path.abspath(path.join(dirpath, "../data"))
(ref_ali, tile_ali, time_array, _, _, _, _) = savgol_interp(
f"{test_data}/rf_tools/rf_data/rf0XX/2019-10-01/rf0XX_2019-10-01-14:30.txt",
f"{test_data}/rf_tools/rf_data/S06XX/2019-10-01/S06XX_2019-10-01-14:30.txt",
savgol_window_1=11,
savgol_window_2=15,
polyorder=2,
interp_type="cubic",
interp_freq=1,
)
out_str = save_aligned(
("rf0XX", "S06XX"),
"2019-10-01-14:30",
11,
15,
2,
"cubic",
1,
f"{test_data}/rf_tools/rf_data",
f"{test_data}/rf_tools",
)
def test_savgol_interp_ref_tile_shape():
assert ref_ali.shape == tile_ali.shape
def test_savgol_interp_ref_time_shape():
assert time_array.shape[0] == 1779
def test_savgol_interp_ref_time_arrow():
assert time_array[0] <= time_array[-1]
def test_plot_savgol_interp():
plot_savgol_interp(
ref=f"{test_data}/rf_tools/rf_data/rf0XX/2019-10-01/rf0XX_2019-10-01-14:30.txt",
tile=f"{test_data}/rf_tools/rf_data/S06XX/2019-10-01/S06XX_2019-10-01-14:30.txt",
savgol_window_1=11,
savgol_window_2=15,
polyorder=2,
interp_type="cubic",
interp_freq=1,
channel=14,
out_dir=".",
)
png = Path("savgol_interp_sample.png")
assert png.is_file() is True
if png.is_file() is True:
png.unlink()
def test_save_aligned_str():
assert (
out_str
== f"Saved aligned file to {test_data}/rf_tools/2019-10-01/2019-10-01-14:30/rf0XX_S06XX_2019-10-01-14:30_aligned.npz"
)
def test_save_aligned_file():
ali_file = Path(
f"{test_data}/rf_tools/2019-10-01/2019-10-01-14:30/rf0XX_S06XX_2019-10-01-14:30_aligned.npz"
)
assert ali_file.is_file() is True
if ali_file.is_file() is True:
shutil.rmtree(f"{test_data}/rf_tools/2019-10-01")
def test_save_aligned_err():
out_str = save_aligned(
("rf0XX", "S06XX"),
"2019-10-01-14:00",
11,
15,
2,
"cubic",
1,
f"{test_data}/rf_tools/rf_data",
f"{test_data}/rf_tools",
)
assert type(out_str).__name__ == "FileNotFoundError"
| StarcoderdataPython |
4842233 | <reponame>Philipuss1/cloob<gh_stars>1-10
import argparse
def get_default_params(model_name):
# Params from paper (https://arxiv.org/pdf/2103.00020.pdf)
if model_name in ["RN50", "RN101", "RN50x4"]:
return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.999, "eps": 1.0e-8}
elif model_name == "ViT-B/32":
return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6}
else:
return {}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--train-data",
type=str,
default=None,
help="Path to csv filewith training data",
)
parser.add_argument(
"--val-data",
type=str,
default=None,
help="Path to csv file with validation data",
)
parser.add_argument(
"--path-data",
type=str,
default=None,
help="Path to the datafiles",
)
parser.add_argument(
"--dataset-type",
choices=["webdataset", "csv", "auto", "synthetic"],
default="auto",
help="Which type of dataset to process."
)
parser.add_argument(
"--csv-separator",
type=str,
default="\t",
help="For csv-like datasets, which separator to use."
)
parser.add_argument(
"--csv-img-key",
type=str,
default="filepath",
help="For csv-like datasets, the name of the key for the image paths."
)
parser.add_argument(
"--csv-caption-key",
type=str,
default="title",
help="For csv-like datasets, the name of the key for the captions."
)
parser.add_argument(
"--imagenet-val",
type=str,
default=None,
help="Path to imagenet val set for conducting zero shot evaluation.",
)
parser.add_argument(
"--imagenet-v2",
type=str,
default=None,
help="Path to imagenet v2 for conducting zero shot evaluation.",
)
parser.add_argument(
"--logs",
type=str,
default="./logs/",
help="Where to store tensorboard logs. Use None to avoid storing logs.",
)
parser.add_argument(
"--name",
type=str,
default=None,
help="Optional identifier for the experiment when storing logs. Otherwise use current time.",
)
parser.add_argument(
"--workers", type=int, default=1, help="Number of workers per GPU."
)
parser.add_argument(
"--batch-size", type=int, default=64, help="Batch size per GPU."
)
parser.add_argument(
"--batch-size-eval", type=int, default=256, help="Batch size during evaluation (on one GPU)."
)
parser.add_argument(
"--epochs", type=int, default=32, help="Number of epochs to train for."
)
parser.add_argument("--lr", type=float, default=None, help="Learning rate.")
parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.")
parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.")
parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.")
parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.")
parser.add_argument(
"--warmup", type=int, default=10000, help="Number of steps to warmup for."
)
parser.add_argument("--lr-scheduler", choices=["cosine", "cosine-restarts"], default="cosine", help="LR scheduler")
parser.add_argument("--restart-cycles", type=int, default=1,
help="Number of restarts when using LR scheduler with restarts")
parser.add_argument("--use-bn-sync",
default=False,
action="store_true",
help="Whether to use batch norm sync.")
parser.add_argument(
"--gpu",
type=int,
default=None,
help="Specify a single GPU to run the code on for debugging."
"Leave at None to use all available GPUs.",
)
parser.add_argument(
"--skip-scheduler",
action="store_true",
default=False,
help="Use this flag to skip the learning rate decay.",
)
parser.add_argument(
"--save-frequency", type=int, default=1, help="How often to save checkpoints."
)
parser.add_argument(
"--zeroshot-frequency", type=int, default=2, help="How often to run zero shot."
)
parser.add_argument(
"--regression-frequency", type=int, default=2, help="How often to run zero shot."
)
parser.add_argument(
"--resume",
default=None,
type=str,
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--precision",
choices=["amp", "fp16", "fp32"],
default="amp",
help="Floating point precision."
)
parser.add_argument(
"--model",
choices=["RN50", "RN101", "RN50x4", "ViT-B/32"],
default="RN50",
help="Name of the vision backbone to use.",
)
parser.add_argument(
"--method",
choices=["cloob", "clip"],
default="cloob",
help="Choice of method (default: cloob)"
)
parser.add_argument("--init-inv-tau", type=float, default=14.3, help="Initial inverse tau.")
parser.add_argument("--init-scale-hopfield", type=float, default=14.3, help="Initial scale for Hopfield retrieval.")
parser.add_argument(
"--learnable-inv-tau",
default=False,
action='store_true',
help='Use a trainable logit scale for the nce loss.'
)
parser.add_argument(
"--learnable-scale-hopfield",
default=False,
action='store_true',
help='Use a trainable logit scale for the Hopfield retrieval.'
)
parser.add_argument(
"--openai-pretrained",
default=False,
action='store_true',
help="Use the openai pretrained models.",
)
# arguments for distributed training
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:6100",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--skip-aggregate",
default=False,
action="store_true",
help="whether to aggregate features across gpus before computing the loss"
)
parser.add_argument(
"--report-to",
default='',
type=str,
help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']"
)
parser.add_argument(
"--wandb-notes",
default='',
type=str,
help="Notes if logging with wandb"
)
parser.add_argument(
"--C", type=float, default=3.16, help="inverse regularizer for logistic reg."
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="If true, more information is logged."
)
parser.add_argument(
"--debug-run",
default=False,
action="store_true",
help="If true, only subset of data is used."
)
parser.add_argument(
"--copy-codebase",
default=False,
action="store_true",
help="If true, we copy the entire base on the log diretory, and execute from there."
)
parser.add_argument(
"--dp",
default=False,
action="store_true",
help="Use DP instead of DDP."
)
parser.add_argument(
"--multigpu",
default=None,
type=lambda x: [int(a) for a in x.split(",")],
help="In DP, which GPUs to use for multigpu training",
)
parser.add_argument("--seed", default=1234, type=int, help="Seed for reproducibility")
args = parser.parse_args()
args.aggregate = not args.skip_aggregate
# If some params are not passed, we use the default values based on model name.
default_params = get_default_params(args.model)
for name, val in default_params.items():
if getattr(args, name) is None:
setattr(args, name, val)
return args
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.