id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3321608 | <reponame>juanluisrosaramos/keras_to_tf_to_tflite
import tensorflow as tf
import numpy as np
from pathlib import Path
from absl import flags
from absl import app
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string('input_model', None, 'Path to the input model.')
flags.DEFINE_string('input_arrays', None, 'Name of the input layer')
flags.DEFINE_string('output_arrays', None, 'Name of the output layer')
flags.DEFINE_string('output_model', None, 'Path where the converted model will '
'be stored.')
flags.DEFINE_string('input_shape', None, 'Shape of the inputs')
flags.mark_flag_as_required('input_model')
flags.mark_flag_as_required('output_model')
def convert(input_model,input_arrays,output_arrays,input_shape, output_model):
# Converting a GraphDef from file.
input_arrays = [input_arrays]
output_arrays = [output_arrays]
converter = tf.lite.TFLiteConverter.from_frozen_graph(
input_model, input_arrays, output_arrays,input_shapes={"input":input_shape})
tflite_model = converter.convert()
open(output_model, "wb").write(tflite_model)
def test(output_model):
# Load TFLite model and allocate tensors.
#
interpreter = tf.lite.Interpreter(model_path=str(output_model))
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print('---------------------------------------------')
print(output_details)
# Test model on random input data.
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data = interpreter.get_tensor(output_details[0]['index'])
#print(output_data)
def main(args):
##Output model
# If output_model path is relative and in cwd, make it absolute from root
output_model = FLAGS.output_model
if str(Path(output_model).parent) == '.':
output_model = str((Path.cwd() / output_model))
output_fld = Path(output_model).parent
output_model_name = Path(output_model).name
output_model_pbtxt_name = output_model_name + '.tflite'
output_model_name = Path(output_fld,output_model_pbtxt_name)
print(output_model_name)
# Create output directory if it does not exist
Path(output_fld).parent.mkdir(parents=True, exist_ok=True)
##convert shapes args
input_shape = list(FLAGS.input_shape.split(","))
convert(FLAGS.input_model,FLAGS.input_arrays,FLAGS.output_arrays,input_shape,output_model_name)
logging.info('Saved the tflite export at %s',
str(output_model_name))
test(output_model_name)
if __name__ == "__main__":
app.run(main) | StarcoderdataPython |
3463603 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0913-Cat-and-Mouse.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-04
=================================================================="""
import sys
import time
from typing import List
"""
LeetCode - 0913 - (Hard) - Cat and Mouse
https://leetcode.com/problems/cat-and-mouse/
Description:
A game on an undirected graph is played by two players, Mouse and Cat, who alternate turns.
The graph is given as follows: graph[a] is a list of all nodes b such that a-b is an edge of the graph.
The mouse starts at node 1 and goes first, the cat starts at node 2 and goes second, and there is a hole at node 0.
During each player's turn, they must travel along one edge of the graph that meets where they are.
For example, if the Mouse is at node 1, it must travel to any node in graph[1].
Additionally, it is not allowed for the Cat to travel to the Hole (node 0.)
Then, the game can end in three ways:
If ever the Cat occupies the same node as the Mouse, the Cat wins.
If ever the Mouse reaches the Hole, the Mouse wins.
If ever a position is repeated (i.e., the players are in the same position as a previous turn,
and it is the same player's turn to move), the game is a draw.
Requirement:
Given a graph, and assuming both players play optimally, return
1 if the mouse wins the game,
2 if the cat wins the game, or
0 if the game is a draw.
Example 1:
Input: graph = [[2,5],[3],[0,4,5],[1,4,5],[2,3],[0,2,3]]
Output: 0
Example 2:
Input: graph = [[1,3],[0],[3],[0,2]]
Output: 1
Constraints:
3 <= graph.length <= 50
1 <= graph[i].length < graph.length
0 <= graph[i][j] < graph.length
graph[i][j] != i
graph[i] is unique.
The mouse and the cat can always move.
"""
class Solution:
def __init__(self):
self.GAME_DRAW = 0
self.MOUSE_WIN = 1
self.CAT_WIN = 2
def catMouseGame(self, graph: List[List[int]]) -> int:
# exception case
if not isinstance(graph, list) or len(graph) <= 0:
return 0
# main method: (Game Theory & Dynamic Programming O(n^4))
return self._catMouseGame(graph)
def _catMouseGame(self, graph: List[List[int]]) -> int:
"""
- According to Game Theory [Zermelo's theorem](https://abel.math.harvard.edu/~elkies/FS23j.03/zermelo.pdf)
In game theory, Zermelo's theorem is a theorem about finite two-person games of perfect information
in which the players move alternately and in which chance does not affect the decision-making process.
It says that if the game cannot end in a draw, then one of the two players must have a winning strategy
(i.e. force a win). An alternate statement is that for a game meeting all of these conditions
except the condition that a draw is not possible, then either the first-player can force a win,
or the second-player can force a win, or both players can force a draw.
The theorem is named after <NAME>, a German mathematician and logician.
Zermelo's theorem can be proofed by mathematical induction.
- In this problem, either the mouse wins or the cat wins or draw.
- Dynamic Programming state: dp[mouse_position][cat_position][turn_count] = -1; 0; 1; 2
- If mouse_position == 0, then return self.MOUSE_WIN: dp[0][cat_position][turn_count] = self.MOUSE_WIN;
- If mouse_position == cat_position = x != 0, then return self.CAT_WIN: dp[x][x][turn_count] = self.CAT_WIN;
- If turn_count >= 2n-1, then return self.GAME_DRAW, because:
- The turn_count has increased from 0 to 2n-1, which means players have moved 2n-1 turns (mouse first).
- The mouse have traveled n nodes but didn't reach the hole, so it must have repeated at least one node;
- And the cat have traveled n-1 nodes but didn't catch the mouse, so it must have repeated too.
- (The cat can't reach to hole, so there are only n-1 nodes for it to move, including its initial node.)
"""
n = len(graph)
max_turn = (n << 1) - 1
# max_turn = n << 1
# dp[mouse_position][cat_position][turn_count] = -1: can't tell who will definitely win at the moment
# dp[mouse_position][cat_position][turn_count] = 0: the game will definitely be a draw
# dp[mouse_position][cat_position][turn_count] = 1: the mouse will definitely win
# dp[mouse_position][cat_position][turn_count] = 2: the cat will definitely win
dp = [[[-1 for _ in range(n << 1)] for _ in range(n)] for _ in range(n)] # n * n * 2n
def __get_result_from_dp_state(mouse_position: int, cat_position: int, turn_count: int) -> int:
"""
- Judge the game result from the current dp state.
- -1: not sure; 0: must be a draw; 1: the mouse must win; 2: the cat must win.
- If 0/1/2 cannot be determined from the current state, then call `__get_next_dp_state` to continue game.
"""
if turn_count == max_turn: # both the cat and mouse can't win anymore, it must be a draw
return self.GAME_DRAW
res = dp[mouse_position][cat_position][turn_count] # get current state value
if res != -1: # if res != -1: the game must be either a draw or mouse's win or cat's win
return res
# now, res == -1: can't tell who will definitely win at the moment
# according to current position state, decide either mouse or cat or no one will definitely win
if mouse_position == 0: # the mouse reach the hole, so the mouse will definitely win
res = self.MOUSE_WIN
elif cat_position == mouse_position: # the cat catch the mouse, so the cat will definitely win
res = self.CAT_WIN
else: # can't tell who will definitely win now, so take the next turn and keep moving
res = __get_next_dp_state(mouse_position, cat_position, turn_count) # DFS till determine a must-winner
# now, the dp value has been calculated, the game must be either a draw or the mouse's win or the cat's win
dp[mouse_position][cat_position][turn_count] = res # update the dp state tensor
return res # return res recursively (note that res won't be -1 now)
def __get_next_dp_state(mouse_position: int, cat_position: int, turn_count: int) -> int:
"""
- Entering this function means that the former turn/state cannot determine a winner or the game is a draw,
- So the game will continue and search every possible position to move
- After each movement, call `__get_result_from_dp_state` to see if the game result can be determined
"""
cur_move = cat_position if (turn_count & 0x01) else mouse_position # odd: cat move; even: mouse move
# set an impossible res (If this is not the mouse's turn, it cannot win, so does the cat) as default
default_res = self.MOUSE_WIN if cur_move != mouse_position else self.CAT_WIN
res = default_res
# cur_move player move to all possible next_move_position
for next_move_position in graph[cur_move]:
if cur_move == cat_position and next_move_position == 0:
continue # cat can't move to the hole (node 0)
# next_mouse_position is the mouse_position in next dp state;
# if currently mouse move, then next_mouse_position = next_move_position, else mouse will stay still
next_mouse_position = next_move_position if cur_move == mouse_position else mouse_position
next_cat_position = next_move_position if cur_move == cat_position else cat_position
# do move, and check the next dp state, see if the winner or draw can be determined
next_res = __get_result_from_dp_state(next_mouse_position, next_cat_position, turn_count + 1)
# trim DFS tree (note that the return value of __get_result_from_dp_state won't be -1)
if next_res != default_res: # this means next_res is not impossible (= 0 or (1 xor 2))
res = next_res # update res, which will be returned to the upper recursion
if res != self.GAME_DRAW: # this means next_res has determined a winner (1 xor 2)
break # game over, stop DFS. (when res is 0, there may be a must-winner later, so keep moving)
return res
# initial state: mouse_position = 1, cat_position = 2, turn_count = 0
return __get_result_from_dp_state(1, 2, 0)
def main():
# Example 1: Output: 0
graph = [[2, 5], [3], [0, 4, 5], [1, 4, 5], [2, 3], [0, 2, 3]]
# Example 2: Output: 1
# graph = [[1, 3], [0], [3], [0, 2]]
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.catMouseGame(graph)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1915153 | <filename>FunHouse_Fume_Extractor/code.py
# SPDX-FileCopyrightText: 2021 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import board
import simpleio
import adafruit_sgp30
import displayio
import adafruit_imageload
from adafruit_emc2101 import EMC2101
from adafruit_funhouse import FunHouse
i2c = board.I2C()
# setup for SGP30 sensor
sgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)
# setup for fan controller
emc = EMC2101(i2c)
print("SGP30 serial #", [hex(i) for i in sgp30.serial])
#SGP30 start-up
sgp30.iaq_init()
sgp30.set_iaq_baseline(0x8973, 0x8AAE)
# FunHouse setup
funhouse = FunHouse(default_bg=0x0F0F00)
# start-up bitmap
bitmap, palette = adafruit_imageload.load("/scene1_fume.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
tile_grid = displayio.TileGrid(bitmap, pixel_shader=palette)
# connecting bitmap
bitmap2, palette2 = adafruit_imageload.load("/scene2_fume.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
grid2 = displayio.TileGrid(bitmap2, pixel_shader=palette2)
# default background
bitmap3, palette3 = adafruit_imageload.load("/scene3_fume.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
grid3 = displayio.TileGrid(bitmap3, pixel_shader=palette3)
# internet connection icon
bitmap4, palette4 = adafruit_imageload.load("/connect_icon.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
icon1 = displayio.TileGrid(bitmap4, pixel_shader=palette4, x = 2, y = 2)
# red x icon
bitmap5, palette5 = adafruit_imageload.load("/x_icon.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
icon2 = displayio.TileGrid(bitmap5, pixel_shader=palette5, x = 2, y = 2)
# display group
group = displayio.Group()
# adding start-up bitmap to group
group.append(tile_grid)
funhouse.splash.append(group)
# text for fume data
fume_text = funhouse.add_text(
text=" ",
text_position=(110, 90),
text_anchor_point=(0.5, 0.5),
text_color=0xf57f20,
text_font="fonts/Arial-Bold-24.pcf",
)
# text for fan RPM data
fan_text = funhouse.add_text(
text=" ",
text_position=(110, 165),
text_anchor_point=(0.5, 0.5),
text_color=0x7fffff,
text_font="fonts/Arial-Bold-24.pcf",
)
# showing graphics
funhouse.display.show(funhouse.splash)
# state machines
run = False # state if main code is running
connected = False # checks if connected to wifi
start_up = False # state for start-up
clock = 0 # time.monotonic() device
# function for sending fume data to adafruit.io
def send_fume_data(solder_fumes):
funhouse.network.push_to_io("fumes", solder_fumes)
# function for sending fan rpm to adafruit.io
def send_fan_data(fan_rpm):
funhouse.network.push_to_io("fan-speed", fan_rpm)
while True:
# if main program has not started
if not run:
# if you press the down button
if funhouse.peripherals.button_down:
print("run")
# remove start-up bitmap
group.remove(tile_grid)
# add main bitmap
group.append(grid3)
# add red x icon to show not connected to internet
group.append(icon2)
# change state for main program
run = True
# if you press the middle button
if funhouse.peripherals.button_sel:
# remove start-up bitmap
group.remove(tile_grid)
# add connecting... bitmap
group.append(grid2)
# connect to the network
funhouse.network.connect()
print("connecting")
# change state for network
connected = True
# start main program
start_up = True
# start time.monotonic()
clock = time.monotonic()
# after connecting to the internet
if start_up:
# remove connecting bitmap
group.remove(grid2)
# add main bitmap
group.append(grid3)
# add internet icon
group.append(icon1)
# start main program
run = True
# reset start-up state
start_up = False
# run state for main program after selecting whether or not to connect to wifi
if run:
# print eCO2 and TVOC data to REPL
print("eCO2 = %d ppm \t TVOC = %d ppb" % (sgp30.eCO2, sgp30.TVOC))
# 2 second delay
time.sleep(2)
# fumes variable for reading from SGP30
# comment out either TVOC or eCO2 depending on data preference
fumes = sgp30.TVOC
# fumes = sgp30.eCO2
# mapping fumes data to fan RPM
# value for TVOC
mapped_val = simpleio.map_range(fumes, 10, 1000, 10, 100)
# value for eCO2
# mapped_val = simpleio.map_range(fumes, 400, 2500, 10, 100)
# adding fume text
# PPB is for TVOC, PPM is for eCO2
funhouse.set_text("%d PPB" % fumes, fume_text)
# funhouse.set_text("%d PPM" % fumes, fume_text)
# adding fan's RPM text
funhouse.set_text("%d%s" % (mapped_val, "%"), fan_text)
# printing fan's data to the REPL
print("fan = ", mapped_val)
# setting fan's RPM
emc.manual_fan_speed = int(mapped_val)
# if you're connected to wifi and 15 seconds has passed
if connected and ((clock + 15) < time.monotonic()):
# send fume data to adafruit.io
send_fume_data(fumes)
# send fan RPM to adafruit.io
send_fan_data(mapped_val)
# REPL printout
print("data sent")
# reset clock
clock = time.monotonic()
# if you're connected to wifi and you press the up button
if connected and funhouse.peripherals.button_up:
# the internet icon is removed
group.remove(icon1)
# the red x icon is added
group.append(icon2)
# reset connected state - no longer sending data to adafruit.io
connected = False
# REPL printout
print("disconnected")
# 1 second delay
time.sleep(1)
# if you're NOT connected to wifi and you press the up button
if not connected and funhouse.peripherals.button_up:
# the red x icon is removed
group.remove(icon2)
# the internet icon is added
group.append(icon1)
# the connection state is true - start sending data to adafruit.io
connected = True
# REPL printout
print("connected")
# 1 second delay
time.sleep(1)
| StarcoderdataPython |
5122830 | from sys import argv
with open(argv[1], 'r', encoding='UTF-8') as input:
with open(argv[2], 'w', encoding='Shift-JIS') as output:
for line in input:
data = line.split(',')
if len(data) == 3: #which it always is
expression = data[0]
if expression == "blendShape.1147791917":
data[0] = 'あ'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.1253204511":
data[0] = 'い'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.1588805200":
data[0] = 'う'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.1124347444":
data[0] = 'え'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.2748434218":
data[0] = 'お'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.1598983058":
data[0] = 'あ2'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.2543255962":
data[0] = 'い2'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.1910529223":
data[0] = 'う2'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.992104086":
data[0] = 'え2'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.3251480092":
data[0] = 'お2'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.2286178617":
data[0] = '笑顔'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.631788340":
data[0] = 'まばたき'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.226821586":
data[0] = '泣き'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.146247590":
data[0] = 'への字'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.456140828":
data[0] = '上ぎ見る'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.2813365169":
data[0] = '下を見て'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.3240688866":
data[0] = '左を見て'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.3105640497":
data[0] = '右を見て'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.2585484937":
data[0] = 'きみ'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.3464135367":
data[0] = '笑い'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.1400781561":
data[0] = 'ウィンク'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.2842757018":
data[0] = 'ウィンク右'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.1948989295":
data[0] = 'にこり'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.890025586":
data[0] = '困る'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.463008637":
data[0] = '困る2'
output.write(f'{data[0]},{data[1]},{data[2]}')
elif expression == "blendShape.3360470953":
data[0] = 'にこり2'
output.write(f'{data[0]},{data[1]},{data[2]}')
else:
output.write(f'{data[0]},{data[1]},{data[2]}') | StarcoderdataPython |
1627334 | #!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
.. py:module:: examples.TSClient
It works by implementing `both` a wall clock client and a CSS-TS client. A
:class:`~dvbcss.protocol.client.ts.TSClientClockController` object is instantiated
and provided with a :class:`~dvbcss.clock.CorrelatedClock` object to represent
the synchronisation timeline. The controller adjusts the clock object to match
the timeline information coming from the server.
At the command line you must specify:
* the WebSocket URL of the CSS-TS server, in the form `ws://<host>:<port>/<path>`
* a `udp://<host>:<port>` format URL for the Wall Clock server
* The content ID stem and timeline selector to be used when requesting the timeline
* The tick rate of the timeline.
Default options can be overridden for the IP address and port that the Wall Clock client
binds to and to reduce the amount of logging output.
Use the ``--help`` command line option for usage information.
"""
if __name__ == "__main__":
import _useDvbCssUninstalled # Enable to run when dvbcss not yet installed ... @UnusedImport
from dvbcss.clock import SysClock
from dvbcss.clock import CorrelatedClock
from dvbcss.clock import Correlation
from dvbcss.protocol.client.wc import WallClockClient
from dvbcss.protocol.client.wc.algorithm import LowestDispersionCandidate
from dvbcss.protocol.client.ts import TSClientClockController
import dvbcss.monotonic_time as time
import logging
import argparse
import dvbcss.util
import sys
DEFAULT_WC_BIND=("0.0.0.0","random")
parser=argparse.ArgumentParser(
description="Run a DVB TM-CSS Wall Clock Client (WC-Client) and connection to CSS-TS to obtain a timeline.")
parser.add_argument("-q","--quiet",dest="quiet",action="store_true",default=False,help="Suppress extraneous output during runtime. Overrides loglevel option")
parser.add_argument("--loglevel",dest="loglevel",action="store",type=dvbcss.util.parse_logLevel, nargs=1, help="Set logging level to one of: critical, error, warning, info, debug. Default=info",default=[logging.INFO])
parser.add_argument("--wcloglevel",dest="wcloglevel",action="store",type=dvbcss.util.parse_logLevel, nargs=1, help="Set logging level for the wall clock client to one of: critical, error, warning, info, debug. Default=warn",default=[logging.WARN])
parser.add_argument("tsUrl", action="store", type=dvbcss.util.wsUrl_str, nargs=1, help="ws:// URL of CSS-TS end point")
parser.add_argument("wcUrl", action="store", type=dvbcss.util.udpUrl_str, nargs=1, help="udp://<host>:<port> URL of CSS-WC end point")
parser.add_argument("contentIdStem", action="store", type=str, nargs=1, help="contentIdStem")
parser.add_argument("timelineSelector", action="store", type=str, nargs=1, help="Timeline selector")
parser.add_argument("timelineFreq", action="store", type=int, nargs=1, help="Ticks per second of the media timeline")
parser.add_argument("wc_bindaddr",action="store", type=dvbcss.util.iphost_str, nargs="?",help="IP address or host name to bind WC client to (default="+str(DEFAULT_WC_BIND[0])+")",default=DEFAULT_WC_BIND[0])
parser.add_argument("wc_bindport",action="store", type=dvbcss.util.port_int_or_random, nargs="?",help="Port number to bind WC client to (default="+str(DEFAULT_WC_BIND[1])+")",default=DEFAULT_WC_BIND[1])
args = parser.parse_args()
tsUrl = args.tsUrl[0]
wc_dest=args.wcUrl[0]
wc_bind=(args.wc_bindaddr, args.wc_bindport)
contentIdStem = args.contentIdStem[0]
timelineSelector= args.timelineSelector[0]
timelineFreq = args.timelineFreq[0]
if args.quiet:
logging.disable(logging.CRITICAL)
else:
logging.basicConfig(level=args.loglevel[0])
logging.getLogger("dvbcss.protocol.client.wc").setLevel(args.wcloglevel[0])
sysclock=SysClock()
wallClock=CorrelatedClock(sysclock,tickRate=1000000000) # nanos
algorithm = LowestDispersionCandidate(wallClock,repeatSecs=1,timeoutSecs=0.5)
wc_client=WallClockClient(wc_bind, wc_dest, wallClock, algorithm)
wc_client.start()
timelineClock = CorrelatedClock(wallClock, timelineFreq)
timelineClock.setAvailability(False)
print "Connecting, requesting timeline for:"
print " Any contentId beginning with:",contentIdStem
print " and using timeline selector: ",timelineSelector
print
ts = TSClientClockController(tsUrl, contentIdStem, timelineSelector, timelineClock, correlationChangeThresholdSecs=0.001)
exiting=False
tsClientLogger = logging.getLogger("TSClient")
def reportCallback(msg,exit=False):
def callback(*a,**k):
global exiting
tsClientLogger.info(msg+"\n")
if exit:
exiting=True
wc_client.stop()
sys.exit(0)
return callback
ts.onConnected = reportCallback("connected")
ts.onDisconnected = reportCallback("disconnected",exit=True)
ts.onTimelineAvailable = reportCallback("timeline became available")
ts.onTimelineUnavailable = reportCallback("timeline became un-available")
ts.onTimingChange = reportCallback("change in timing and/or play speed")
ts.connect()
while not exiting:
time.sleep(0.4)
print ts.getStatusSummary(),
print " Uncertainty (dispersion) = +/- %0.3f milliseconds" % (algorithm.getCurrentDispersion()/1000000.0)
| StarcoderdataPython |
8055918 | # Copyright (c) 2020-2021 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import nnabla as nn
import _init_paths
from models.model import create_model, load_model
from opts import opts
from utils import debugger
########
# Main
########
if __name__ == '__main__':
nn.set_auto_forward(True)
opt = opts().init()
model = create_model(opt.arch, opt.heads, opt.head_conv,
opt.num_layers, training=False, channel_last=opt.channel_last)
if opt.checkpoint != '':
extension = os.path.splitext(opt.checkpoint)[1]
assert (extension == '.h5' or
extension == ".protobuf"), "incorrect file extension, should be .h5 or .protobuf"
load_model(model, opt.checkpoint, clear=True)
debugger.save_nnp(opt, model)
| StarcoderdataPython |
3458386 | <reponame>techdragon/django-auth0-auth<gh_stars>1-10
from django_auth0_user.models import AbstractAuth0User
class Auth0User(AbstractAuth0User):
"""
A user model designed for easy use with Auth0
"""
pass
| StarcoderdataPython |
11251956 | <gh_stars>10-100
"""Representation of a text block within the HTML canvas."""
from html import unescape
from inscriptis.html_properties import WhiteSpace
class Block:
"""The current block of text.
A block usually refers to one line of output text.
.. note::
If pre-formatted content is merged with a block, it may also contain
multiple lines.
Args:
idx: the current block's start index.
prefix: prefix used within the current block.
"""
__slots__ = ('idx', 'prefix', '_content', 'collapsable_whitespace')
def __init__(self, idx: int, prefix: str):
self.idx = idx
self.prefix = prefix
self._content = ''
self.collapsable_whitespace = True
def merge(self, text: str, whitespace: WhiteSpace) -> None:
"""Merge the given text with the current block.
Args:
text: the text to merge.
whitespace: whitespace handling.
"""
if whitespace == WhiteSpace.pre:
self.merge_pre_text(text)
else:
self.merge_normal_text(text)
def merge_normal_text(self, text: str) -> None:
"""Merge the given text with the current block.
Args:
text: the text to merge
"""
normalized_text = []
for ch in text:
if not ch.isspace():
normalized_text.append(ch)
self.collapsable_whitespace = False
elif not self.collapsable_whitespace:
normalized_text.append(' ')
self.collapsable_whitespace = True
if normalized_text:
text = ''.join((self.prefix.first, *normalized_text)) if not \
self._content else ''.join(normalized_text)
text = unescape(text)
self._content += text
self.idx += len(text)
def merge_pre_text(self, text: str) -> None:
"""Merge the given pre-formatted text with the current block.
Args:
text: the text to merge
"""
text = ''.join((self.prefix.first,
text.replace('\n', '\n' + self.prefix.rest)))
text = unescape(text)
self._content += text
self.idx += len(text)
self.collapsable_whitespace = False
def is_empty(self) -> bool:
return len(self.content) == 0
@property
def content(self):
if not self.collapsable_whitespace:
return self._content
if self._content.endswith(' '):
self._content = self._content[:-1]
self.idx -= 1
return self._content
def new_block(self) -> 'Block':
"""Return a new Block based on the current one."""
self.prefix.consumed = False
return Block(idx=self.idx + 1, prefix=self.prefix)
| StarcoderdataPython |
3390311 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-20 10:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0013_formpage_button_name'),
]
operations = [
migrations.AddField(
model_name='impactmodelspage',
name='private_model_message',
field=models.TextField(default='Your Model is currently not publicly visible. Please review your model details and contact <EMAIL> to make your model public.'),
preserve_default=False,
),
]
| StarcoderdataPython |
5040896 | <filename>test/hlt/pytest/python/com/huawei/iotplatform/client/dto/DeviceEventHeader.py
class DeviceEventHeader(object):
def __init__(self):
self.requestId = "requestId"
self.from_ = "from_"
self.to = "to"
self.deviceId = "deviceId"
self.serviceType = "serviceType"
self.method = "method"
#1 | StarcoderdataPython |
8108870 | # this file was created by <NAME> it contains large portions of code from
# https://github.com/lanl-ansi/bqpsolvers/ repository
import warnings
import dwavebinarycsp
from gurobipy import Model, GRB
import dimod
from utils.jobshop_helpers import ones_from_sample
from utils.jobshopproblem import JobShopProblem, pruned_big_number
class GurobiSampler(dimod.Sampler):
properties = None
parameters = None
runnable = None
def __init__(self):
self.parameters = {
'method': []
}
self.properties = {}
def sample(self, bqm: dimod.BinaryQuadraticModel, method="miqp", num_reads=1, gurobi_params_kw=None):
assert (method in ["mip", "miqp"])
bqm_bin = bqm.change_vartype(vartype=dimod.BINARY, inplace=False)
variable_ids = frozenset(bqm_bin.variables)
variable_product_ids = frozenset(bqm_bin.quadratic)
m = Model()
gurobi_params = {
"OutputFlag": 0,
"TimeLimit": 60,
"Threads": 12,
"Cuts": 1,
"MIPFocus": 2,
"PoolSearchMode": 2,
"PoolSolutions": num_reads
}
if gurobi_params_kw is None:
gurobi_params_kw = {}
gurobi_params.update(gurobi_params_kw)
for param, value in gurobi_params.items():
m.setParam(param, value)
variable_lookup = {}
for vid in variable_ids:
variable_lookup[vid] = m.addVar(lb=0, ub=1, vtype=GRB.BINARY, name="var_{}".format(vid))
if method == "mip":
for pair in variable_product_ids:
variable_lookup[pair] = m.addVar(lb=0, ub=1, vtype=GRB.BINARY,
name="link_{}_{}".format(str(pair[0]), str(pair[1])))
m.update()
if method == "mip":
for i, j in variable_product_ids:
m.addConstr(variable_lookup[(i, j)] >= variable_lookup[i] + variable_lookup[j] - 1)
m.addConstr(variable_lookup[(i, j)] <= variable_lookup[i])
m.addConstr(variable_lookup[(i, j)] <= variable_lookup[j])
bqm_ising = bqm.change_vartype(vartype=dimod.SPIN, inplace=False)
if len(bqm_ising.linear) <= 0 or all(bqm_ising.linear[lt] == 0.0 for lt in bqm_ising.linear):
warnings.warn('detected spin symmetry, adding symmetry breaking constraint')
v1 = variable_ids[0]
m.addConstr(variable_lookup[v1] == 0)
obj = 0.0
for lt in bqm_bin.linear:
obj += bqm_bin.linear[lt] * variable_lookup[lt]
if method == "mip":
for qt in bqm_bin.quadratic:
obj += bqm_bin.quadratic[qt] * variable_lookup[qt]
elif method == "miqp":
for qt in bqm_bin.quadratic:
i = qt[0]
j = qt[1]
obj += bqm_bin.quadratic[qt] * variable_lookup[i] * variable_lookup[j]
m.setObjective(obj, GRB.MINIMIZE)
m.update()
m.optimize()
energies = []
samples = []
for i in range(m.SolCount):
m.Params.SolutionNumber = i # set solution numbers
energy = m.ObjVal + bqm_bin.offset
sample = {k: int(variable_lookup[k].X) for k in variable_ids}
energies.append(energy)
samples.append(sample)
ss = dimod.SampleSet.from_samples(samples, vartype=bqm.BINARY, energy=energies, aggregate_samples=True)
return ss.change_vartype(bqm.vartype)
def create_jsp():
job_shop_problem = JobShopProblem.from_data([[2, 1], [2, 1, 2]], 2, 7)
alfa = 0.95
beta = 1.
ni = 0.7
job_shop_problem.init_coefficients(beta, alfa, ni)
job_shop_problem.add_starts_once_constraint()
job_shop_problem.add_one_job_one_machine_constraint()
job_shop_problem.add_operations_order_constraint()
job_shop_problem.add_late_penalty()
linear = {}
quadratic = {}
qubits_number = pruned_big_number
for i in range(qubits_number):
# linear['x{}'.format(i), 'x{}'.format(i)] = int(job_shop_problem.qubo_pruned_big[i, i])
linear[i] = int(job_shop_problem.qubo_pruned_big[i, i])
for i in range(qubits_number):
for j in range(i + 1, qubits_number):
val = job_shop_problem.qubo_pruned_big[i, j]
if (val != 0):
quadratic[(i, j)] = int(val)
# quadratic['x{}'.format(i), 'x{}'.format(j)] = int(val)
return linear, quadratic
if __name__ == "__main__":
import random
import itertools
num_vars = 10
# linear_theirs = {k: v for k, v in enumerate(random.random() for _ in range(num_vars))}
# quadratic_theirs = {(k1, k2): random.random() for k1, k2 in itertools.product(range(num_vars), range(num_vars)) if
# k1 != k2}
#
# bqm = dimod.BinaryQuadraticModel(linear_theirs,
# quadratic_theirs,
# offset=3.0,
# vartype=dimod.SPIN)
# linear, quadratic = create_jsp()
# Q = dict(linear)
# Q.update(quadratic)
# num = 5
# linear3 = {i: -1.0 + 0.1 * (num - i) for i in range(num)}
# quadratic3 = {(a, b): 2 for a, b in [x for x in itertools.product(range(5), range(5))] if a < b}
# print(quadratic3)
# bqm2 = dimod.BinaryQuadraticModel(linear3,
# quadratic3,
# offset=0.0,
# vartype=dimod.BINARY)
sampler = GurobiSampler()
sampling_result = sampler.sample(bqm2, method="mip", num_reads=2000, gurobi_params_kw={"TimeLimit": 30})
for s in list(sampling_result.data()):
print(s.sample, "Energy: ", s.energy, "Occurrences: ", s.num_occurrences)
# print(sampler.sample(bqm, method="miqp", num_reads=20, gurobi_params_kw={"TimeLimit": 1}))
if num_vars <= 10:
sampler = dimod.ExactSolver()
# print(sampler.sample(bqm))
| StarcoderdataPython |
3599060 | import requests
from flask import Blueprint, current_app
from api.utils import (
get_jwt,
jsonify_data,
url_for,
get_response_data,
catch_ssl_errors
)
health_api = Blueprint('health', __name__)
@catch_ssl_errors
def check_spycloud_health():
url = url_for('watchlist/example.org')
headers = {
**current_app.config['SPYCLOUD_BASE_HEADERS'],
'X-API-Key': get_jwt()
}
response = requests.get(url, headers=headers)
return get_response_data(response)
@health_api.route('/health', methods=['POST'])
def health():
check_spycloud_health()
return jsonify_data({'status': 'ok'})
| StarcoderdataPython |
1977103 | #!/usr/bin/env python3
from paramiko import SSHClient
import json
import sys
# Should be in the form "user@server:folder_path"
folder_path = sys.argv[1]
user = folder_path.split("@")[0]
server = folder_path.split("@")[1].split(":")[0]
folders = folder_path.split("@")[1].split(":")[1]
folder_name = folder_path.split("@")[1].split(":")[1].split("/")[-1]
client = SSHClient()
client.load_host_keys("/home/eweishaar/.ssh/known_hosts")
client.connect(server, username=user)
stdin, stdout, stderr = client.exec_command(f"ls {folders}")
folders = stdout.read().decode("utf8").split("\n")[:-1]
with open(f"folders_{server}_{folder_name}.json", "w") as f:
json.dump(folders, f)
stdin.close()
stdout.close()
stderr.close()
client.close()
| StarcoderdataPython |
5146604 | from ProjectEulerCommons.Base import *
from ProjectEulerCommons.PrimeNumbers import generate_prime
Answer(
nth(generate_prime(), 10001 - 1)
)
"""
------------------------------------------------
ProjectEuler.Problem.007.py
The Answer is: 104743
Time Elasped: 0.3361032009124756sec
------------------------------------------------
"""
| StarcoderdataPython |
9780392 | '''
Write a code to receive an integer (n). Then, the code outputs the three lines as follows:
- 1st line shows n of *.
- 2nd line shows (n-2) of *. (If n-2 is less than 1, no need to output any asterisks.)
- 3rd line shows (n-4) of *. (If n-4 is less than 1, no need to output any asterisks.)
Input
An integer (n).
Output
1st line shows n of *.
2nd line shows (n-2) of *. (If n-2 is less than 1, no need to output any asterisks.)
3rd line shows (n-4) of *. (If n-4 is less than 1, no need to output any asterisks.)
'''
n = int(input())
c = 3
while c > 0 and n > 0:
print('*'*n)
n -= 2
c -= 1
| StarcoderdataPython |
11320513 | # coding: utf-8
from pathlib import Path
from collections import Counter
from itertools import chain
import os, fire, re, csv, pickle
import pandas as pd
from typing import Callable, List, Collection
from concurrent.futures.process import ProcessPoolExecutor
from sacremoses import MosesTokenizer
ORTH = 65
def partition(a: Collection, sz: int) -> List[Collection]:
"Split iterables `a` in equal parts of size `sz`"
return [a[i:i + sz] for i in range(0, len(a), sz)]
def partition_by_cores(a: Collection, n_cpus: int) -> List[Collection]:
"Split data in `a` equally among `n_cpus` cores"
return partition(a, len(a) // n_cpus + 1)
def num_cpus() -> int:
"Get number of cpus"
try:
return len(os.sched_getaffinity(0))
except AttributeError:
return os.cpu_count()
class BaseTokenizer():
"Basic class for a tokenizer function."
def __init__(self, lang: str): self.lang = lang
def tokenizer(self, t: str) -> List[str]: return t.split(' ')
class SpacyTokenizer(BaseTokenizer):
"Wrapper around a spacy tokenizer to make it a `BaseTokenizer`."
def __init__(self, lang: str):
self.tok = MosesTokenizer('pt')
def tokenizer(self, t: str) -> List[str]:
return [token for token in self.tok.tokenize(t)]
class VocabularyTokenizer():
"Put together rules, a tokenizer function and a language to tokenize text with multiprocessing."
def __init__(self, tok_func: Callable = SpacyTokenizer, lang: str = 'pt', n_cpus: int = None):
self.tok_func, self.lang = tok_func, lang
self.n_cpus = n_cpus or num_cpus() // 2
def process_text(self, t: str, tok: BaseTokenizer) -> List[str]:
"Processe one text `t` with tokenizer `tok`."
return tok.tokenizer(t)
def _process_all_1(self, texts: Collection[str]) -> List[List[str]]:
"Process a list of `texts` in one process."
tok = self.tok_func(self.lang)
return [self.process_text(t, tok) for t in texts]
def process_all(self, texts: Collection[str]) -> List[List[str]]:
"Process a list of `texts`."
if self.n_cpus <= 1: return self._process_all_1(texts)
with ProcessPoolExecutor(self.n_cpus) as e:
return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), [])
def save_texts(paths, filename, lang):
classes = ['unsup']
file_count = 0
filename = filename + '_' + lang + '.csv'
if os.path.isfile(filename):
os.remove(filename)
with open(filename, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE, escapechar='\\')
for idx, label in enumerate(classes):
for path in paths:
for fname in (path).glob('*'):
file_count += 1
print('writing from %s' % fname)
[writer.writerow([line, idx]) for line in fname.open('r', encoding='utf-8').read().split('\n')]
print('%d texts saved to %s' % (file_count, filename))
def get_tokens(filename):
data = pd.read_csv(filename, header=None, escapechar='\\', chunksize=500000)
for idx, df in enumerate(data):
print(idx)
yield VocabularyTokenizer().process_all(df[0].astype(str))
def filter_currency(array: list) -> list:
filtered = [item for item in array if not re.match(
pattern=r"(?<![.,])(?:- *)?\b[0-9]{1,3}(?:\.?[0-9]{3})*\,?[0-9]{2}(?![.,-])\b|(?<![.,])(?:- *)?\b[0-9]{1,3}(?:,?[0-9]{3})*\.[0-9]{2}(?![.,-])\b",
string=item)]
print('Size before filtering currency: %d, Size after filtering currency: %d' % (len(array), len(filtered)))
return filtered
def filter_doc_id(array: list) -> list:
filtered = [doc_id for doc_id in array if not (
re.match(pattern=r"\b[0-9a-f]{6,7}\b", string=doc_id) and not re.match(pattern=r"\b[a-f]{6,7}\b",
string=doc_id))]
print('Size before filtering doc ids: %d, Size after filtering doc ids: %d' % (len(array), len(filtered)))
return filtered
def filter_processes(array: list) -> list:
filtered = [item for item in array if
not re.match(pattern='\d{7}-\d{2}.\d{4}.\d{1}.\d{2}.\d{4}', string=item)]
print('Size before filtering processes: %d, Size after filtering processes: %d' % (len(array), len(filtered)))
return filtered
def write_list(array: list, path_out: Path):
array.insert(0, '<UNK>')
array.insert(0, '<S>')
array.insert(0, '</S>')
with path_out.open(mode='w', encoding='utf8') as file:
for item in array:
file.write(item + '\n')
file.close()
def get_freq_filename(corpus_prefix: str, lang: str = 'pt'):
return 'freq_' + corpus_prefix + '_' + lang + '.pickle'
def generate_vocabulary(corpus_prefix: str, path_in: str, lang: str = 'pt', min_count: int = 3,
discard_currency: bool = False, discard_processes: bool = False,
discard_doc_ids: bool = False) -> None:
"""
:param corpus_prefix: Prefix identifying the corpus for training
:param path_in: Example:
Path('/media/discoD/repositorios/1-billion-word-language-modeling-benchmark/')
:param path_out: Example:
Path('/media/discoD/repositorios/1-billion-word-language-modeling-benchmark/vocabulary.txt')
:param lang: Language of the model for training
:param min_count: Minimum count of occurrences for the words to be added to the vocabulary
:param discard_currency: Determine if currency values should be filtered out
:param discard_processes: Determine if process numbers should be filtered out
:param discard_doc_ids: Determine if document ids should be filtered out
:return:
"""
print('Reading data from %s' % path_in)
path_in = Path(path_in)
file_out = 'vocabulary_' + corpus_prefix + '_' + lang + '.txt'
file_original_out = 'vocabulary_original_' + corpus_prefix + '_' + lang + '.txt'
path_out = Path(path_in) / file_out
path_original_out = Path(path_in) / file_original_out
freq_filename = get_freq_filename(corpus_prefix, lang)
freq_file = path_in / freq_filename
if not freq_file.exists():
if not path_out.exists():
training_folder = 'training-' + corpus_prefix
heldout_folder = 'heldout-' + corpus_prefix
save_texts([path_in / training_folder], 'train_' + corpus_prefix, lang)
save_texts([path_in / heldout_folder], 'test_' + corpus_prefix, lang)
save_texts([path_in / training_folder, path_in / heldout_folder], 'full_' + corpus_prefix, lang)
full_file = 'full_' + corpus_prefix + '_' + lang + '.csv'
freq_full = Counter(p for o in chain.from_iterable(get_tokens(full_file)) for p in o)
with freq_file.open(mode='wb') as f:
pickle.dump(freq_full, f)
f.close()
else:
with freq_file.open(mode='rb') as f:
freq_full = pickle.load(f)
f.close()
total_number_of_tokens = sum(freq_full.values())
print('Total number of tokens: %d' % total_number_of_tokens)
original_vocabulary = sorted([palavra for palavra, contagem in freq_full.most_common()])
original_vocabulary_length = len(original_vocabulary)
print('Original vocabulary length: %d' % original_vocabulary_length)
write_list(original_vocabulary, path_original_out)
filtered = [palavra for palavra, contagem in freq_full.most_common() if contagem >= min_count]
print('Total of words that occurred more than or equal %d times: %d' % (min_count, len(filtered)))
if discard_currency:
filtered = filter_currency(filtered)
if discard_processes:
filtered = filter_processes(filtered)
if discard_doc_ids:
filtered = filter_doc_id(filtered)
write_list(sorted(filtered), path_out)
print('Final length of the vocabulary: %d' % len(filtered))
print('Number of training tokens: %d' % total_number_of_tokens)
if __name__ == '__main__': fire.Fire(generate_vocabulary)
| StarcoderdataPython |
6665190 | <gh_stars>0
import midi
from tb3step import TB3Step
from tb3pattern import TB3Pattern
class MIDIParser:
def __init__(self,midi_path):
self.midi_path = midi_path
def parse(self,track_index=0):
pattern = midi.read_midifile(self.midi_path)
track = pattern[track_index]
#Pulses per quarter
ppq = pattern.resolution
#4 TB-3 notes in a quarter
tb3_step_distance = ppq / 4
#Count how many notes are current playing, used to calculate sliding
noteon = 0
#Flags
slide = False
accent = False
#MIDI Tick time during which the last note event happened
last_note_on = 0
#Midi event tick counter
current_time = 0
#Build a list of TB3Step objects
steps = []
step_count = 0
for event in track:
current_time += event.tick
#We are only interested in NoteOn / NoteOff events
if(isinstance(event,midi.NoteOnEvent)):
#Check for skipped areas with no notes
while(current_time >= last_note_on + tb3_step_distance*1.5):
steps.append(TB3Step())
step_count += 1
last_note_on += tb3_step_distance
if(noteon > 0):
slide = True
else:
slide = False
#Based on MIDI recorded from the TB-3 accented notes should work with 127 velocity and unaccented should work with 64
if(event.get_velocity() <= 64):
accent = False
elif(event.get_velocity() <= 127):
accent = True
step = {}
step[TB3Step.KEY_NOTE] = event.get_pitch()
step[TB3Step.KEY_ACCENT] = accent
step[TB3Step.KEY_CLEAR] = False
step[TB3Step.KEY_SLIDE] = slide
steps.append(TB3Step(step))
step_count += 1
last_note_on = current_time
noteon += 1
if(noteon > 2):
print "Warning: more than two noteon event active! Expect wierd behavior"
elif(isinstance(event,midi.NoteOffEvent)):
noteon -= 1
pass
params = {}
params[TB3Pattern.KEY_PARAM_TRIPLET] = 0
params[TB3Pattern.KEY_PARAM_LAST_STEP] = step_count-1
params[TB3Pattern.KEY_PARAM_GATE_WIDTH] = 67
params[TB3Pattern.KEY_PARAM_BANK] = 0
params[TB3Pattern.KEY_PARAM_PATCH] = -1
return TB3Pattern(steps,params)
| StarcoderdataPython |
6517540 | import numpy as np
import cv2
import scipy
import scipy.signal
def add_salt_and_pepper(image):
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
def do():
image = cv2.imread('raw_images/dog.png')
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = add_salt_and_pepper(gray_image)
cv2.imwrite('processed_images/dog_salt_pepper.png', image)
convolution = scipy.signal.medfilt(image, kernel_size=5)
cv2.imwrite('processed_images/median_dog.png', convolution)
if __name__ == '__main__':
do() | StarcoderdataPython |
3310508 | <filename>paw2018/userpicks/admin.py
from django.contrib import admin
from .models import UserPick
# Register your models here.
class UserPickAdmin(admin.ModelAdmin):
list_display = ('team', 'game', 'pick')
admin.site.register(UserPick, UserPickAdmin)
| StarcoderdataPython |
3594859 | import torch
from torch import nn
import math
from torch.nn import functional as F
from utils import cat_boxlist, Assigner
from utils import GIoULoss,SigmoidFocalLoss,concat_box_prediction_layers,get_num_gpus,reduce_sum
class ATSSHead(nn.Module):
def __init__(self, in_channels, n_class, n_conv, prior, regression_type):
super(ATSSHead, self).__init__()
num_classes = n_class - 1
num_anchors = 1
self.regression_type = regression_type
cls_tower = []
bbox_tower = []
for i in range(n_conv):
# if self.cfg.MODEL.ATSS.USE_DCN_IN_TOWER and i == n_conv - 1:
# conv_func = DFConv2d
# else:
conv_func = nn.Conv2d
cls_tower.append(
conv_func(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True
)
)
cls_tower.append(nn.BatchNorm2d(num_features=in_channels,momentum=0.01, eps=1e-3))
cls_tower.append(nn.ReLU())
bbox_tower.append(
conv_func(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True
)
)
bbox_tower.append(nn.BatchNorm2d(num_features=in_channels, momentum=0.01, eps=1e-3))
# bbox_tower.append(nn.GroupNorm(32, in_channels))
bbox_tower.append(nn.ReLU())
self.add_module('cls_tower', nn.Sequential(*cls_tower))
self.add_module('bbox_tower', nn.Sequential(*bbox_tower))
self.cls_logits = nn.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1,
padding=1
)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1,
padding=1
)
self.quality = nn.Conv2d(
in_channels, num_anchors * 1, kernel_size=3, stride=1,
padding=1
)
# -90 < angle <= 0, channel = 90
self.angle = nn.Conv2d(
in_channels, num_anchors * 90, kernel_size=3, stride=1,
padding=1
)
# initialization
for modules in [self.cls_tower, self.bbox_tower,
self.cls_logits, self.bbox_pred,
self.quality]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
# initialize the bias for focal loss
prior_prob = prior
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
if regression_type == 'POINT':
assert num_anchors == 1, "regressing from a point only support num_anchors == 1"
torch.nn.init.constant_(self.bbox_pred.bias, 4)
self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(5)])
def forward(self, x):
logits = []
bbox_reg = []
quality = []
angle = []
for l, feature in enumerate(x):
cls_tower = self.cls_tower(feature)
box_tower = self.bbox_tower(feature)
logits.append(self.cls_logits(cls_tower))
bbox_pred = self.scales[l](self.bbox_pred(box_tower))
if self.regression_type == 'POINT':
bbox_pred = F.relu(bbox_pred)
bbox_reg.append(bbox_pred)
angle.append(self.angle(box_tower))
quality.append(self.quality(box_tower))
return logits, bbox_reg, quality, angle
class Scale(nn.Module):
def __init__(self, init_value=1.0):
super().__init__()
self.scale = nn.Parameter(torch.tensor([init_value], dtype=torch.float32))
def forward(self, input):
return input * self.scale
class ATSSLoss(object):
def __init__(self, gamma, alpha, fg_iou_threshold, bg_iou_threshold, positive_type,
reg_loss_weight, angle_loss_weight, cls_loss_weight,
top_k, box_coder):
self.cls_loss_func = SigmoidFocalLoss(gamma, alpha)
self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="sum")
self.angle_loss_func = nn.CrossEntropyLoss(reduction='sum')
#self.reg_loss_func = GIoULoss(box_coder)
self.reg_loss_func = nn.SmoothL1Loss(reduction='none')
self.reg_loss_weight = reg_loss_weight
self.angle_loss_weight = angle_loss_weight
self.cls_loss_weight = cls_loss_weight
self.box_coder = box_coder
self.assigner = Assigner(positive_type,box_coder,fg_iou_threshold,bg_iou_threshold,top_k)
def compute_centerness_targets(self, reg_targets, anchors):
reg_targets_with_angel = torch.cat([reg_targets, reg_targets.new_zeros(reg_targets.shape[0],1)],dim=-1)
gts = self.box_coder.decode(reg_targets_with_angel, anchors)
anchors_cx = anchors[:, 0]
anchors_cy = anchors[:, 1]
gts_left_x = gts[:,0] - gts[:,2]/2
gts_right_x = gts[:,0] + gts[:,2]/2
gts_upper_y = gts[:,1] - gts[:,3]/2
gts_bottom_y = gts[:,1] + gts[:,3]/2
l = anchors_cx - gts_left_x
t = anchors_cy - gts_upper_y
r = gts_right_x - anchors_cx
b = gts_bottom_y - anchors_cy
left_right = torch.stack([l, r], dim=1)
top_bottom = torch.stack([t, b], dim=1)
centerness = torch.sqrt(torch.abs((left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0])) * \
torch.abs((top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])))
assert not torch.isnan(centerness).any()
return centerness
def __call__(self, box_cls, box_regression, centerness, angle, targets, anchors):
'''
box_cls: list(tensor) tensor shape (N,class_num,H,W) classification branch output for every feature level ,
N is the batchsize,
box_regression : list(tensor) tensor shape (N,4,H,W) localization branch output for every feature level
centerness: list(tensor) tensor shape (N,1.H,W) centerness branch output for every feature level
angle: list(tensor) tensor shape (N,90,H,W) angle branch output for every feature level
taregts: list(boxlist) , boxlist object, ground_truth object for every image,
anchos: list(list) [image_1_anchors,...,image_N_anchors],
image_i_anchors : [leverl_1_anchor,...,leverl_n_anchor]
level_i_anchor:boxlist
'''
labels, reg_targets, weights_label = self.assigner(targets, anchors)
# prepare prediction
N = len(labels)
box_cls_flatten, box_regression_flatten = concat_box_prediction_layers(box_cls, box_regression)
centerness_flatten = [ct.permute(0, 2, 3, 1).reshape(N, -1, 1) for ct in centerness]
centerness_flatten = torch.cat(centerness_flatten, dim=1).reshape(-1)
angle_flatten = [an.permute(0, 2, 3, 1).reshape(N, -1, 90) for an in angle]
angle_flatten = torch.cat(angle_flatten, dim=1).reshape(-1, 90)
# prepare ground truth
labels_flatten = torch.cat(labels, dim=0)
reg_targets_flatten = torch.cat([reg_target[:, :4] for reg_target in reg_targets], dim=0)
angel_targets_flatten = torch.cat([reg_target[:, 4] for reg_target in reg_targets], dim=0)
weights_label_flatten = torch.cat(weights_label, dim=0)
# prepare anchors
anchors_flatten = torch.cat([cat_boxlist(anchors_per_image).bbox for anchors_per_image in anchors], dim=0)
pos_inds = torch.nonzero(labels_flatten > 0).squeeze(1)
num_gpus = get_num_gpus()
total_num_pos = reduce_sum(pos_inds.new_tensor([pos_inds.numel()])).item()
num_pos_avg_per_gpu = max(total_num_pos / float(num_gpus), 1.0)
cls_loss = self.cls_loss_func(box_cls_flatten, labels_flatten.int(),weights_label_flatten) / num_pos_avg_per_gpu
if pos_inds.numel() > 0:
anchors_flatten = anchors_flatten[pos_inds]
# prepare positive sample matched gt
reg_targets_flatten = reg_targets_flatten[pos_inds]
angel_targets_flatten = angel_targets_flatten[pos_inds]
centerness_targets = self.compute_centerness_targets(reg_targets_flatten, anchors_flatten)
weights_label_flatten = weights_label_flatten[pos_inds]
# prepare positive sample prediction
box_regression_flatten = box_regression_flatten[pos_inds]
centerness_flatten = centerness_flatten[pos_inds]
angle_flatten = angle_flatten[pos_inds]
sum_centerness_targets_avg_per_gpu = reduce_sum(centerness_targets.sum()).item() / float(num_gpus)
# attention here
reg_loss = self.reg_loss_func(box_regression_flatten, reg_targets_flatten)
reg_loss = reg_loss.sum(dim=-1) * centerness_targets*weights_label_flatten
reg_loss = reg_loss.sum() / sum_centerness_targets_avg_per_gpu
# reg_loss = self.reg_loss_func(box_regression_flatten, reg_targets_flatten, anchors_flatten,
# weight=centerness_targets*weights_label_flatten) \
# /sum_centerness_targets_avg_per_gpu
centerness_loss = self.centerness_loss_func(centerness_flatten, centerness_targets) / num_pos_avg_per_gpu
angle_loss = self.angle_loss_func(angle_flatten, angel_targets_flatten.to(torch.long)) / num_pos_avg_per_gpu
else:
reg_loss = torch.tensor([0]).to(torch.float32)
centerness_loss = reg_loss * 0
angle_loss = reg_loss * 0
return cls_loss * self.cls_loss_weight, reg_loss * self.reg_loss_weight, centerness_loss, angle_loss * self.angle_loss_weight
def test():
from utils import BoxCoder
coder = BoxCoder(regression_type='bbox',anchor_sizes=[64, 128, 256, 512, 1024],
anchor_strides=[8, 16, 32, 64, 128])
loss_obj = ATSSLoss(gamma=2.0, alpha=0.25, fg_iou_threshold=0.5,bg_iou_threshold=0.4,positive_type='ATSS',
reg_loss_weight=2.0, top_k=9, box_coder=coder)
batchsize = 1
H = 12
W = 12
class_num = 2 | StarcoderdataPython |
6531525 | from env import *
from scipy import integrate
from scipy.stats import entropy
from copy import deepcopy
# normalization trix from tim viera blog
def exp_normalize(x):
b = x.max()
y = np.exp(x - b)
return y / y.sum()
K_DIS = 5
# this is just the hypothesis space, which is same for every casino
H_SPACE = []
for i in range(1, K_DIS+1):
for j in range(i):
RNG = (j/K_DIS, i/K_DIS)
H_SPACE.append(RNG)
# I changed the state representation in the new env
# Adapting this to make the new env repr backward compatible
def backfit_cas_obs(cas_obs):
to_add = []
for arm_ob in cas_obs:
add_to_add = [1 for _ in range(arm_ob[0])] + [0 for _ in range(arm_ob[1])]
to_add.append(add_to_add)
return to_add
def backfit_ob(obs):
ret = []
for cas_obs in obs:
ret.append(backfit_cas_obs(cas_obs))
return ret
class EntPolicy(NaivePolicy):
# given observatio of a casino, i.e. [[1,0,0],[1,1,1]]
# produce the posterior of hypothesis Ranges given these observations
def get_posterior_R(self, cas_obs):
# cas_obs = backfit_ob(cas_obs)
loglikelihoood = []
# for all the discrete range hypothesis of a casino
for R in H_SPACE:
R_loglikelihood = 0
P_theta_R = 1 / (R[1] - R[0])
# for all the arm obserrvations in that casino
for arm_obs in cas_obs:
arm_heads = sum(arm_obs)
arm_tails = len(arm_obs) - arm_heads
def likelihoood_armobs_theta(theta):
return theta**arm_heads * (1-theta)**arm_tails
likelihood_armobs_omega = P_theta_R * integrate.quad(likelihoood_armobs_theta, R[0], R[1])[0]
R_loglikelihood += np.log(likelihood_armobs_omega)
loglikelihoood.append(R_loglikelihood)
# exponentiate and normalise into normal probabilities
loglikelihoood = np.array(loglikelihoood)
return exp_normalize(loglikelihoood)
# given distribution of R, get the distribution of max
# we do so normalizing away the a in (a, OPT)
def get_distribution_Opt(self, distr_R):
buckets = [0.0 for _ in range(K_DIS)]
for prob, R in zip(distr_R, H_SPACE):
buckets[round(R[1] * K_DIS) - 1] += prob
return buckets
# get the distribution of theta given a distribution of ranges R
def get_distribution_theta(self, distr_R):
buckets = [0.0 for _ in range(K_DIS)]
for prob, R in zip(distr_R, H_SPACE):
low_idx = round(R[0] * K_DIS)
high_idx = round(R[1] * K_DIS)
for idx in range(low_idx, high_idx):
buckets[idx] += prob * (1 / (R[1] - R[0]))
pdf_info = np.array(buckets)
def theta_distr(theta):
bucket_id = int(theta * K_DIS)
return pdf_info[bucket_id]
return theta_distr
# get the predition of whether an arm in a casino would be 1 or 0
def get_arm_pred(self, arm_id, cas_obs):
# cas_obs = backfit_cas_obs(cas_obs)
assert arm_id == -1 or arm_id in range(len(cas_obs))
# self arm's outcomes
self_arm_outcomes = [] if arm_id == -1 else cas_obs[arm_id]
# other arm's outcomes
other_arm_obs = [cas_obs[j] for j in range(len(cas_obs)) if j != arm_id]
# compute the posterior of R given other arm's observation
R_posterior = self.get_posterior_R(other_arm_obs)
# compute the theta posterior given other_arms_obs
theta_posterior = self.get_distribution_theta(R_posterior)
# print ("mark 1")
# print (self_arm_outcomes, other_arm_obs)
# print (sum(R_posterior))
# print (theta_posterior)
# print (integrate.quad(theta_posterior, 0, 1))
# if we're pulling from a new arm, V2 is already computed
V2 = theta_posterior if arm_id == -1 else None
if V2 is None:
# if we're pulling an existing arm, we need to encorporate in existing arm's outcomes
def V1(theta):
arm_heads = sum(self_arm_outcomes)
arm_tails = len(self_arm_outcomes) - arm_heads
return theta**arm_heads * (1-theta)**arm_tails * theta_posterior(theta)
# the Z1 normalization constant
Z1 = integrate.quad(V1, 0, 1)[0]
# the V2 all put together
def V22(theta):
return V1(theta) / Z1
V2 = V22
def H_likelihood(theta):
return V2(theta) * theta
def T_likelihood(theta):
return V2(theta) * (1-theta)
# quiote useful debugging, do not remove
# if abs(integrate.quad(V2, 0, 1)[0] - 1.0) > 0.1:
# print (integrate.quad(V2, 0, 1))
# print (cas_obs)
# print (arm_id)
# assert 0, "something wrong with my posterior . . ."
H_prob = integrate.quad(H_likelihood, 0, 1)[0]
# T_prob = integrate.quad(T_likelihood, 0, 1)[0]
T_prob = 1.0 - H_prob
return H_prob, T_prob
def act(self, observations):
observations = backfit_ob(observations)
# print ("acting ")
# print ("observations")
# print (observations)
# ret = super().act(observations)
actions = []
entropy_reductions = []
for cas_ids, cas_obs in enumerate(observations):
posterior_R = self.get_posterior_R(cas_obs)
posterior_Opt = self.get_distribution_Opt(posterior_R)
opt_entropy = entropy(posterior_Opt)
# print (f"casino {cas_ids} obs {cas_obs}")
for arm_id in [-1] + [_ for _ in range(len(cas_obs))]:
H_prob, T_prob = self.get_arm_pred(arm_id, cas_obs)
# entropy on opt if head is tossed
hallucinate_head = deepcopy(cas_obs)
if arm_id == -1:
hallucinate_head.append([1])
else:
hallucinate_head[arm_id].append(1)
opt_distr_if_head = self.get_distribution_Opt(self.get_posterior_R(hallucinate_head))
head_ent = entropy(opt_distr_if_head)
# entropy on opt if tail is tossed
hallucinate_tail = deepcopy(cas_obs)
if arm_id == -1:
hallucinate_tail.append([0])
else:
hallucinate_tail[arm_id].append(0)
opt_distr_if_tail = self.get_distribution_Opt(self.get_posterior_R(hallucinate_tail))
tail_ent = entropy(opt_distr_if_tail)
opt_cond_entropy = H_prob * head_ent + T_prob * tail_ent
actions.append((cas_ids, arm_id))
entropy_reductions.append(opt_entropy - opt_cond_entropy)
# return the action with the least conditional entropy of opt
chosen_action = actions[np.argmax(entropy_reductions)]
# print ("chosen action ", chosen_action)
return chosen_action
def guess(self, observations):
observations = backfit_ob(observations)
ret = []
for cas_ids, cas_obs in enumerate(observations):
arm_probs = [self.get_arm_pred(arm_id, cas_obs)[0] for arm_id in range(len(cas_obs))]
#print (arm_probs)
#assert 0
ret.append(np.argmax(arm_probs))
return ret
if __name__ == '__main__':
policies = [NaivePolicy(), TilePolicy(), JankPolicy(), EntPolicy()]
cums = [[] for _ in policies]
for jj in range(1000):
# do a roll out
cas_par = make_casino_params()
env = CasEnv(cas_par)
for j in range(len(cums)):
policy = policies[j]
regret = roll_out(env, policy)
cums[j].append(regret)
stats = [(np.mean(x), np.std(x)) for x in cums]
print (f"iteration {jj} regret_stats {stats}") | StarcoderdataPython |
5144714 | #!/usr/bin/python
# example string: BIGip<ervername>110536896.20480.0000
import struct
import sys
import re
if len(sys.argv) != 2:
print "Usage: %s cookie" % sys.argv[0]
exit(1)
cookie = sys.argv[1]
print "\n[*] Cookie to decode: %s\n" % cookie
(cookie_name, cookie_value) = cookie.split('=')
pool = re.search('^BIGipServer([.\w\.]*)', cookie_name)
(host, port, end) = cookie_value.split('.')
(a, b, c, d) = [ord(i) for i in struct.pack("<I", int(host))]
(e) = [ord(e) for e in struct.pack("<H", int(port))]
port = "0x%02X%02X" % (e[0],e[1])
print "[*] Pool name: %s" % (pool.group(1))
print "[*] Decoded IP and Port: %s.%s.%s.%s:%s\n" % (a,b,c,d, int(port,16))
| StarcoderdataPython |
9606517 | """
Description:
Test argparser.
Author: lincoln12w
Github: https://github.com/Lincoln12w
Module:
argparse
Doc: https://docs.python.org/2/howto/argparse.html &
https://docs.python.org/3/library/argparse.html
APIs:
ArgumentParser.add_argument()
action - The basic type of action to be taken when this
argument is encountered.
store - This just stores the argument's value. This
is the default action.
store_const - This stores the value specified by the
const keyword argument.
store_true/store_false -
append - Appends each argument value to a list.
append_const - Appends the value specified by the const
keyword argument to a list.
count - This counts the number of times a keyword argument occurs.
nargs - The number of command-line arguments that should be consumed.
N - N arguments from the command line will be gathered
together into a list.
? - One argument will be consumed from the command line if possible.
* - All command-line arguments present are gathered into a list.
+ - Just like '*'. And, an error message will be generated if
there was none.
argparse.REMAINDER - All the remaining command-line arguments are
gathered into a list.
const - A constant value required by some action and nargs selections.
default - The value produced if the argument is absent from
the command line.
type - The type to which the command-line argument should be converted.
can take any callable that takes a single string and returns the
converted value.
choices - A container of the allowable values for the argument.
required - Whether or not the command-line option may be omitted
(optionals only).
dest - The name of the attribute to be added to the object returned by
parse_args().
ArgumentParser.add_subparsers()
ArgumentParser.add_argument_group()
ArgumentParser.add_mutually_exclusive_group()
Modify History
--------------
00a 16apr17 lzw create
01a 26apr17 lzw update
"""
import argparse
def form_parser():
"""
Create a parser.
"""
parser = argparse.ArgumentParser()
# Positional arguments
parser.add_argument("lhs", help="left hand side value", type=int)
# Set choices
parser.add_argument("op",
help="operations", choices=['+', '-', '*', '/', '%'])
parser.add_argument("rhs", help="right hand side value", type=int)
# Optional arguments
# Set default value
parser.add_argument("--name", help="name of the actions", default="test")
# Set as a flag
parser.add_argument("-v", "--verbose",
help="turn on verbosity", action="store_true")
| StarcoderdataPython |
6569899 | <filename>algorithms/mDSDI/src/models/model_factory.py
from algorithms.mDSDI.src.models.mnistnet import MNIST_CNN, Color_MNIST_CNN
from algorithms.mDSDI.src.models.resnet import ResNet
nets_map = {"mnistnet": MNIST_CNN, "cmnistnet": Color_MNIST_CNN, "resnet50": ResNet}
def get_model(name):
if name not in nets_map:
raise ValueError("Name of model unknown %s" % name)
def get_model_fn(**kwargs):
return nets_map[name](**kwargs)
return get_model_fn
| StarcoderdataPython |
1704022 | <gh_stars>0
import boto3
import os
import time
AUTOSCAL = os.environ['AUTOSCAL']
CLUSTER = os.environ['ECS']
TASK = os.environ['TASKDEF']
def lambda_handler(event, context):
# TODO implement
try:
set_autoscaling(AUTOSCAL)
time.sleep(300)
run_task(CLUSTER, TASK)
except:
raise
def set_autoscaling(Name):
auto = boto3.client('autoscaling')
auto.set_desired_capacity(
AutoScalingGroupName=Name,
DesiredCapacity=1)
def run_task(Name, task):
ecs = boto3.client('ecs')
ecs.run_task(
cluster=Name,
taskDefinition=task)
| StarcoderdataPython |
11227245 | import numpy as np
from numpy.testing import run_module_suite
from skimage import filter, data, color
from skimage import img_as_uint, img_as_ubyte
class TestTvDenoise():
def test_tv_denoise_2d(self):
"""
Apply the TV denoising algorithm on the lena image provided
by scipy
"""
# lena image
lena = color.rgb2gray(data.lena())[:256, :256]
# add noise to lena
lena += 0.5 * lena.std() * np.random.randn(*lena.shape)
# clip noise so that it does not exceed allowed range for float images.
lena = np.clip(lena, 0, 1)
# denoise
denoised_lena = filter.tv_denoise(lena, weight=60.0)
# which dtype?
assert denoised_lena.dtype in [np.float, np.float32, np.float64]
from scipy import ndimage
grad = ndimage.morphological_gradient(lena, size=((3, 3)))
grad_denoised = ndimage.morphological_gradient(
denoised_lena, size=((3, 3)))
# test if the total variation has decreased
assert grad_denoised.dtype == np.float
assert (np.sqrt((grad_denoised**2).sum())
< np.sqrt((grad**2).sum()) / 2)
def test_tv_denoise_float_result_range(self):
# lena image
lena = color.rgb2gray(data.lena())[:256, :256]
int_lena = np.multiply(lena, 255).astype(np.uint8)
assert np.max(int_lena) > 1
denoised_int_lena = filter.tv_denoise(int_lena, weight=60.0)
# test if the value range of output float data is within [0.0:1.0]
assert denoised_int_lena.dtype == np.float
assert np.max(denoised_int_lena) <= 1.0
assert np.min(denoised_int_lena) >= 0.0
def test_tv_denoise_3d(self):
"""
Apply the TV denoising algorithm on a 3D image representing
a sphere.
"""
x, y, z = np.ogrid[0:40, 0:40, 0:40]
mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
mask = 100 * mask.astype(np.float)
mask += 60
mask += 20 * np.random.randn(*mask.shape)
mask[mask < 0] = 0
mask[mask > 255] = 255
res = filter.tv_denoise(mask.astype(np.uint8), weight=100)
assert res.dtype == np.float
assert res.std() * 255 < mask.std()
# test wrong number of dimensions
a = np.random.random((8, 8, 8, 8))
try:
res = filter.tv_denoise(a)
except ValueError:
pass
if __name__ == "__main__":
run_module_suite()
| StarcoderdataPython |
3443514 | <filename>PythonCode/Iteration1.py
nums = [2, 3, 4, 5, 6, 7] # created a list
for i in nums: # using a for loop, we can easily iterate through the list. The variable i could be anything.
print(i) # printing each part of the list, iterating through it. | StarcoderdataPython |
9612772 | import argparse
import numpy as np
import csv
import pandas as pd
import json
import scipy.sparse as sp
from sparsebm import (
SBM,
LBM,
ModelSelection,
generate_LBM_dataset,
generate_SBM_dataset,
)
from sparsebm.utils import reorder_rows, ARI
import logging
logger = logging.getLogger(__name__)
try:
import cupy
_DEFAULT_USE_GPU = True
except ImportError:
_DEFAULT_USE_GPU = False
def define_parsers():
main = argparse.ArgumentParser(prog="sparsebm")
subparsers = main.add_subparsers(
help="algorithm to use", dest="subparser_name"
)
sbm_parser = subparsers.add_parser(
"sbm", help="use the stochastic block model"
)
lbm_parser = subparsers.add_parser(
"lbm", help="use the latent block model"
)
ms_parser = subparsers.add_parser(
"modelselection", help="use the model selection with LBM or SBM"
)
input_grp = ms_parser.add_argument_group("mandatory arguments")
input_grp.add_argument(
"ADJACENCY_MATRIX", help="List of edges in CSV format"
)
input_grp.add_argument(
"-t",
"--type",
help="model to use. Either 'lbm' or 'sbm'",
required=True,
)
input_grp = ms_parser.add_argument_group("optional arguments")
input_grp.add_argument(
"-sep",
"--sep",
default=",",
help="CSV delimiter to use. Default is ',' ",
)
input_grp.add_argument(
"-gpu",
"--use_gpu",
help="specify if a GPU should be used.",
default=_DEFAULT_USE_GPU,
type=bool,
)
input_grp.add_argument(
"-idgpu",
"--gpu_index",
help="specify the gpu index if needed.",
default=None,
type=bool,
)
input_grp.add_argument(
"-s",
"--symmetric",
help="specify if the adajacency matrix is symmetric. For sbm only",
default=False,
)
input_grp.add_argument(
"-p", "--plot", help="display model exploration plot", default=True
)
output_grp = ms_parser.add_argument_group("output")
output_grp.add_argument(
"-o",
"--output",
help="File path for the json results.",
default="results.json",
)
generate_sbm_parser = subparsers.add_parser(
"generate", help="use sparsebm to generate a data matrix"
)
subparsers_generate = generate_sbm_parser.add_subparsers(
help="model to generate data with", dest="subparsers_generate_name"
)
sbm_generation_parser = subparsers_generate.add_parser(
"sbm", help="use the stochastic block model to generate data"
)
lbm_generation_parser = subparsers_generate.add_parser(
"lbm", help="use the latent block model to generate data"
)
help_example_base = """A json configuration file that specify the parameters
of the data to generate. If no file is given a random graph is generated."""
help_sbm_gen = """\n Example of json configuration file for SBM: \n{\n
"type": "sbm",\n "number_of_nodes": 1000,\n "number_of_clusters": 4,\n
"symmetric": true,\n "connection_probabilities": [\n [\n 0.1,\n
0.036,\n 0.012,\n 0.0614\n ],\n [\n 0.036,\n
0.074,\n 0,\n 0\n ],\n [\n 0.012,\n 0,\n
0.11,\n 0.024\n ],\n [\n 0.0614,\n 0,\n
0.024,\n 0.086\n ]\n ],\n "cluster_proportions": [\n 0.25
,\n 0.25,\n 0.25,\n 0.25\n ]\n}"""
sbm_generation_parser.add_argument(
"-f",
"--file",
default=None,
help=help_example_base + help_sbm_gen,
required=False,
)
lbm_generation_parser.add_argument(
"-f", "--file", default=None, help=help_example_base, required=False
)
for parser in [sbm_parser, lbm_parser]:
input_grp = parser.add_argument_group("mandatory arguments")
input_grp.add_argument(
"ADJACENCY_MATRIX", help="List of edges in CSV format"
)
if parser == lbm_parser:
input_grp.add_argument(
"-k1",
"--n_row_clusters",
help="number of row clusters",
default=4,
type=int,
required=True,
)
input_grp.add_argument(
"-k2",
"--n_column_clusters",
help="number of row clusters",
default=4,
type=int,
required=True,
)
if parser == sbm_parser:
input_grp.add_argument(
"-k",
"--n_clusters",
help="number of clusters",
default=4,
type=int,
required=True,
)
output_grp = parser.add_argument_group("output")
output_grp.add_argument(
"-o",
"--output",
help="File path for the json results.",
default="results.json",
)
param_grp = parser.add_argument_group("optional arguments")
param_grp.add_argument(
"-sep",
"--sep",
default=",",
help="CSV delimiter to use. Default is ',' ",
)
if parser == sbm_parser:
param_grp.add_argument(
"-s",
"--symmetric",
help="Specify if the adajacency matrix is symmetric",
default=False,
# type=bool,
)
param_grp.add_argument(
"-niter",
"--max_iter",
help="Maximum number of EM step",
default=10000,
type=int,
)
param_grp.add_argument(
"-ninit",
"--n_init",
help="Number of initializations that will be run",
default=100,
type=int,
)
param_grp.add_argument(
"-early",
"--n_iter_early_stop",
help="Number of EM steps to perform for each initialization.",
default=10,
type=int,
)
param_grp.add_argument(
"-ninitt",
"--n_init_total_run",
help="Number of the best initializations that will be run\
until convergence.",
default=2,
type=int,
)
param_grp.add_argument(
"-t",
"--tol",
help="Tolerance of likelihood to declare convergence.",
default=1e-4,
type=float,
)
param_grp.add_argument(
"-v",
"--verbosity",
help="Degree of verbosity. Scale from 0 (no message displayed)\
to 3.",
default=1,
type=int,
)
param_grp.add_argument(
"-gpu",
"--use_gpu",
help="Specify if a GPU should be used.",
default=_DEFAULT_USE_GPU,
type=bool,
)
param_grp.add_argument(
"-idgpu",
"--gpu_index",
help="Specify the gpu index if needed.",
default=None,
type=bool,
)
return main
def graph_from_csv(file, type, sep=","):
try:
pda = pd.read_csv(file, sep=sep, header=None)
npa = pda[[0, 1]].to_numpy()
if type == "sbm":
node_i_from = np.unique(npa)
node_i_to = np.arange(node_i_from.size)
i_mapping = {
f: t for f, t in np.stack((node_i_from, node_i_to), 1)
}
rows = pda[0].map(i_mapping)
cols = pda[1].map(i_mapping)
graph = sp.coo_matrix(
(np.ones(npa.shape[0]), (rows, cols)),
shape=(node_i_from.size, node_i_from.size),
)
return graph, i_mapping, None
else:
node_i_from = np.unique(npa[:, 0])
node_i_to = np.arange(node_i_from.size)
i_mapping = {
f: t for f, t in np.stack((node_i_from, node_i_to), 1)
}
rows = pda[0].map(i_mapping)
node_j_from = np.unique(npa[:, 1])
node_j_to = np.arange(node_j_from.size)
j_mapping = {
f: t for f, t in np.stack((node_j_from, node_j_to), 1)
}
cols = pda[1].map(j_mapping)
graph = sp.coo_matrix(
(np.ones(npa.shape[0]), (rows, cols)),
shape=(node_i_from.size, node_j_from.size),
)
return graph, i_mapping, j_mapping
except Exception as e:
logger.error(e)
raise e
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def process_sbm(args):
graph, row_from_to, _ = graph_from_csv(
args["ADJACENCY_MATRIX"], args["subparser_name"], sep=args["sep"]
)
model = SBM(
max_iter=args["max_iter"],
n_clusters=args["n_clusters"],
n_init=args["n_init"],
n_iter_early_stop=args["n_iter_early_stop"],
n_init_total_run=args["n_init_total_run"],
verbosity=args["verbosity"],
atol=args["tol"],
use_gpu=args["use_gpu"],
gpu_index=args["gpu_index"],
)
symmetric = str2bool(args["symmetric"])
logger.info(
"Runing with symmetric adjacency matrix : {}".format(symmetric)
)
model.fit(graph, symmetric=symmetric)
if not model.trained_successfully:
logger.error("FAILED, model has not been trained successfully.")
return None
logger.info("Model has been trained successfully.")
logger.info(
"Value of the Integrated Completed Loglikelihood is {:.4f}".format(
model.get_ICL()
)
)
labels = model.labels
groups = [
np.argwhere(labels == q).flatten() for q in range(args["n_clusters"])
]
row_to_from = {v: k for k, v in row_from_to.items()}
groups = [pd.Series(g).map(row_to_from).tolist() for g in groups]
results = {
"ILC": model.get_ICL(),
"edge_probability_between_groups": model.pi_.tolist(),
"group_membership_probability": model.group_membership_probability.flatten().tolist(),
"node_ids_clustered": groups,
}
with open(args["output"], "w") as outfile:
json.dump(results, outfile)
logger.info("Results saved in {}".format(args["output"]))
def process_lbm(args):
graph, row_from_to, col_from_to = graph_from_csv(
args["ADJACENCY_MATRIX"], args["subparser_name"], sep=args["sep"]
)
model = LBM(
max_iter=args["max_iter"],
n_row_clusters=args["n_row_clusters"],
n_column_clusters=args["n_column_clusters"],
n_init=args["n_init"],
n_iter_early_stop=args["n_iter_early_stop"],
n_init_total_run=args["n_init_total_run"],
verbosity=args["verbosity"],
atol=args["tol"],
use_gpu=args["use_gpu"],
gpu_index=args["gpu_index"],
)
model.fit(graph)
if not model.trained_successfully:
logger.error("FAILED, model has not been trained successfully.")
return None
logger.info("Model has been trained successfully.")
logger.info(
"Value of the Integrated Completed Loglikelihood is {:.4f}".format(
model.get_ICL()
)
)
row_labels = model.row_labels
row_groups = [
np.argwhere(row_labels == q).flatten()
for q in range(args["n_row_clusters"])
]
row_to_from = {v: k for k, v in row_from_to.items()}
row_groups = [pd.Series(g).map(row_to_from).tolist() for g in row_groups]
col_labels = model.column_labels
col_groups = [
np.argwhere(col_labels == q).flatten()
for q in range(args["n_column_clusters"])
]
col_to_from = {v: k for k, v in col_from_to.items()}
col_groups = [pd.Series(g).map(col_to_from).tolist() for g in col_groups]
results = {
"ILC": model.get_ICL(),
"edge_probability_between_groups": model.pi_.tolist(),
"row_group_membership_probability": model.row_group_membership_probability.flatten().tolist(),
"column_group_membership_probability": model.column_group_membership_probability.flatten().tolist(),
"node_type_1_ids_clustered": row_groups,
"node_type_2_ids_clustered": col_groups,
}
with open(args["output"], "w") as outfile:
json.dump(results, outfile)
logger.info("Results saved in {}".format(args["output"]))
def generate_sbm(args):
if "JSON_FILE" in args:
with open(args["JSON_FILE"]) as f:
conf = json.load(f)
else:
conf = {}
number_of_nodes = (
conf["number_of_nodes"] if "number_of_nodes" in conf else None
)
number_of_clusters = (
conf["number_of_clusters"] if "number_of_clusters" in conf else None
)
connection_probabilities = (
np.array(conf["connection_probabilities"])
if "connection_probabilities" in conf
else None
)
cluster_proportions = (
np.array(conf["cluster_proportions"])
if "cluster_proportions" in conf
else None
)
symmetric = conf["symmetric"] if "symmetric" in conf else False
dataset = generate_SBM_dataset(
number_of_nodes,
number_of_clusters,
connection_probabilities,
cluster_proportions,
symmetric=symmetric,
)
graph = dataset["data"]
graph = np.stack((graph.row, graph.col), 1)
cluster_indicator = dataset["cluster_indicator"]
labels = cluster_indicator.argmax(1)
number_of_clusters = cluster_indicator.shape[1]
groups = [
np.argwhere(labels == q).flatten().tolist()
for q in range(number_of_clusters)
]
results = {
"node_ids_grouped": groups,
"number_of_nodes": number_of_nodes,
"number_of_clusters": number_of_clusters,
"connection_probabilities": connection_probabilities.flatten().tolist()
if connection_probabilities
else None,
"cluster_proportions": cluster_proportions.tolist()
if cluster_proportions
else None,
}
file_groups = "./groups.json"
file_edges = "./edges.csv"
with open(file_groups, "w") as outfile:
json.dump(results, outfile)
logger.info("\n Groups and params saved in {}".format(file_groups))
np.savetxt(file_edges, graph, delimiter=",")
logger.info("Edges saved in {}".format(file_edges))
def generate_lbm(args):
if "JSON_FILE" in args:
with open(args["JSON_FILE"]) as f:
conf = json.load(f)
else:
conf = {}
number_of_rows = (
conf["number_of_rows"] if "number_of_rows" in conf else None
)
number_of_columns = (
conf["number_of_columns"] if "number_of_columns" in conf else None
)
nb_row_clusters = (
conf["nb_row_clusters"] if "nb_row_clusters" in conf else None
)
nb_column_clusters = (
conf["nb_column_clusters"] if "nb_column_clusters" in conf else None
)
connection_probabilities = (
np.array(conf["connection_probabilities"])
if "connection_probabilities" in conf
else None
)
row_cluster_proportions = (
np.array(conf["row_cluster_proportions"])
if "row_cluster_proportions" in conf
else None
)
column_cluster_proportions = (
np.array(conf["column_cluster_proportions"])
if "column_cluster_proportions" in conf
else None
)
dataset = generate_LBM_dataset(
number_of_rows,
number_of_columns,
nb_row_clusters,
nb_column_clusters,
connection_probabilities,
row_cluster_proportions,
column_cluster_proportions,
)
graph = dataset["data"]
number_of_rows, number_of_columns = graph.shape
graph = np.stack((graph.row, graph.col), 1)
row_cluster_indicator = dataset["row_cluster_indicator"]
column_cluster_indicator = dataset["column_cluster_indicator"]
row_labels = row_cluster_indicator.argmax(1)
col_labels = column_cluster_indicator.argmax(1)
nb_row_clusters = row_cluster_indicator.shape[1]
nb_column_clusters = column_cluster_indicator.shape[1]
row_groups = [
np.argwhere(row_labels == q).flatten().tolist()
for q in range(nb_row_clusters)
]
col_groups = [
np.argwhere(col_labels == q).flatten().tolist()
for q in range(nb_column_clusters)
]
results = {
"row_ids_grouped": row_groups,
"column_ids_grouped": col_groups,
"number_of_rows": number_of_rows,
"number_of_columns": number_of_columns,
"nb_row_clusters": nb_row_clusters,
"nb_column_clusters": nb_column_clusters,
"connection_probabilities": connection_probabilities.flatten().tolist()
if connection_probabilities
else None,
"row_cluster_proportions": row_cluster_proportions.tolist()
if row_cluster_proportions
else None,
"column_cluster_proportions": column_cluster_proportions.tolist()
if column_cluster_proportions
else None,
}
file_groups = "./groups.json"
file_edges = "./edges.csv"
with open(file_groups, "w") as outfile:
json.dump(results, outfile)
logger.info("\nGroups and params saved in {}".format(file_groups))
np.savetxt(file_edges, graph, delimiter=",")
logger.info("Edges saved in {}".format(file_edges))
def process_model_selection(args):
if args["type"].upper() not in ["SBM", "LBM"]:
raise Exception("Invalid type argument. Must be 'SBM' or 'LBM'")
graph, row_from_to, col_from_to = graph_from_csv(
args["ADJACENCY_MATRIX"], args["type"].lower(), sep=args["sep"]
)
model_selection = ModelSelection(
model_type=args["type"].upper(),
use_gpu=args["use_gpu"],
gpu_index=args["gpu_index"],
plot=args["plot"],
)
model = model_selection.fit(graph, symmetric=args["symmetric"])
if not model.trained_successfully:
logger.error("FAILED, model has not been trained successfully.")
return None
logger.info("Model has been trained successfully.")
logger.info(
"Value of the Integrated Completed Loglikelihood is {:.4f}".format(
model.get_ICL()
)
)
if args["type"] == "lbm":
logger.info(
"The model selection picked {} row classes".format(
model.n_row_clusters
)
)
logger.info(
"The model selection picked {} column classes".format(
model.n_column_clusters
)
)
nb_row_clusters = model.n_row_clusters
nb_column_clusters = model.n_column_clusters
row_labels = model.row_labels
row_groups = [
np.argwhere(row_labels == q).flatten()
for q in range(nb_row_clusters)
]
row_to_from = {v: k for k, v in row_from_to.items()}
row_groups = [
pd.Series(g).map(row_to_from).tolist() for g in row_groups
]
col_labels = model.column_labels
col_groups = [
np.argwhere(col_labels == q).flatten()
for q in range(nb_column_clusters)
]
col_to_from = {v: k for k, v in col_from_to.items()}
col_groups = [
pd.Series(g).map(col_to_from).tolist() for g in col_groups
]
results = {
"ILC": model.get_ICL(),
"nb_row_clusters": nb_row_clusters,
"nb_column_clusters": nb_column_clusters,
"edge_probability_between_groups": model.pi_.tolist(),
"row_group_membership_probability": model.row_group_membership_probability.flatten().tolist(),
"column_group_membership_probability": model.column_group_membership_probability.flatten().tolist(),
"node_type_1_ids_clustered": row_groups,
"node_type_2_ids_clustered": col_groups,
}
else:
logger.info(
"The model selection picked {} classes".format(model.n_clusters)
)
nb_clusters = model.n_clusters
labels = model.labels
groups = [
np.argwhere(labels == q).flatten() for q in range(nb_clusters)
]
row_to_from = {v: k for k, v in row_from_to.items()}
groups = [pd.Series(g).map(row_to_from).tolist() for g in groups]
results = {
"ILC": model.get_ICL(),
"nb_clusters": nb_clusters,
"edge_probability_between_groups": model.pi_.tolist(),
"group_membership_probability": model.group_membership_probability.flatten().tolist(),
"node_ids_clustered": groups,
}
with open(args["output"], "w") as outfile:
json.dump(results, outfile)
logger.info("Results saved in {}".format(args["output"]))
def main():
parsers = define_parsers()
args = vars(parsers.parse_args())
if args["subparser_name"] == "sbm":
process_sbm(args)
elif args["subparser_name"] == "lbm":
process_lbm(args)
elif args["subparser_name"] == "modelselection":
process_model_selection(args)
elif args["subparser_name"] == "generate":
if (
args["subparsers_generate_name"]
and args["subparsers_generate_name"].upper() == "SBM"
):
generate_sbm(args)
elif (
args["subparsers_generate_name"]
and args["subparsers_generate_name"].upper() == "LBM"
):
generate_lbm(args)
else:
raise Exception(
"Specify positional argument 'sbm' or 'lbm' to generate data"
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3323563 | import numpy as np
import pandas as pd
import DataAnalysisRoutines as dar
def create_voyager_file_list_years(start,end):
import glob
# set directory for voyager files
basedir='/archive/cdaweb.gsfc.nasa.gov/pub/data/voyager/voyager1/'
filelist=[]
# sort all the files collected in the for each year in the range and append to file list
for i in range(start,end+1):
l=sorted(glob.iglob(basedir+'magnetic_fields_cdaweb/mag_2s/'+str(i)+'/*_v01.cdf'))
for j in l:
#print(j)
filelist.append(j)
return filelist
def create_voyager_file_list_for_velocity_years(start,end):
import glob
basedir = '/archive/cdaweb.gsfc.nasa.gov/pub/data/voyager/voyager1/coho1hr_magplasma/'
filelist = []
for i in range(start,end+1):
l = sorted(glob.iglob(basedir+str(i)+'/*_v01.cdf'))
for j in l:
filelist.append(j)
return filelist
def create_voyager_file_list_days(year, day):
import glob
# set directory for voyager files
basedir='/archive/cdaweb.gsfc.nasa.gov/pub/data/voyager/voyager1/'
filelist=[]
# retrieve the file in directory by specified year, month and day (yyyy, mmdd)
l=sorted(glob.iglob(basedir+'magnetic_fields_cdaweb/mag_2s/'+str(year) + '/voyager1_2s_mag_' + str(year) + day+'_v01.cdf'))
for j in l:
#print(j)
filelist.append(j)
return filelist
def voyager_kurtosis(series, lags, au, delta_t, scaling):
"""
Routine to take the kurtosis of voyager data specifically of 1 component of the magnetic field
Input:
series: the series to calculate the kurtosis of
lags: the array of lags with units of scaling
au: the AU that the series is within the range of au to au+1
delta_t: the time difference between each point in series
scaling: either to be di or lambda to determine the lag as a constant multiple of di or a constant fraction of lambda
Output:
k: the Calculated Kurtosis of the series at the given lag with proper scaling
lags: the lags in di or the proper di varied to keep the constant fraction of lambda
"""
var = pd.read_pickle('/data/DATA-CODES/processed_data/Data/VY1DailyAverageVelocityDensityDistance.pkl')
v = var.v
n = var.n
d = var.dist
aus = np.loadtxt('/data/DATA-CODES/processed_data/Data/NewAuIndices_06222018.txt').astype(int)
start = d[d > au].reset_index()['index'][0]
end = d[d > au+1].reset_index()['index'][0]
v_arr = v[start:end]
n_arr = n[start:end]
if v_arr.mean() == np.NaN:
v_arr_mean = 450
else:
v_arr_mean = v_arr.mean()
if n_arr.mean() == np.NaN:
n_arr_mean = var.n[var.dist < 2].mean()/(au**2)
else:
n_arr_mean = n_arr.mean()
series = series.copy()
lags = np.asarray(lags)
if scaling == 'di':
ptlag = dar.diToDeltaT(lags, v_arr_mean, n_arr_mean)/delta_t
elif scaling == 'lambda':
au1 = d[d > 1].reset_index()['index'][0]
au2 = d[d > 2].reset_index()['index'][0]
ptlag = dar.lambdaToDeltaT(lags, v_arr_mean, n[au1:au2].mean(), au)/delta_t
k, ptlag = dar.kurtosis(series, ptlag)
return k, lags
| StarcoderdataPython |
1629694 | <filename>blog/forms.py
from xml.etree.ElementTree import Comment
from django import forms
from blog.models import commnet
from captcha.fields import CaptchaField
class commentform (forms.ModelForm):
#captcha = CaptchaField()
class Meta:
model = commnet
fields = ['post','name','subject','message','email'] | StarcoderdataPython |
3243596 | <gh_stars>0
import datetime
import json
from typing import List, Dict, Union
import dateutil.parser
import requests
from requests import Response
class BotmanApi:
def __init__(self, base_url: str, token: str, cert=False, timeout: int = 5):
self.base_url = base_url + "{path}"
self.token = token
self.last_response = None
self.cert = cert
self.timeout = timeout
def _headers(self) -> dict:
return {"X-API-KEY": self.token}
def _parse_response(self, response: Response):
if "application/json" in response.headers.get('content-type', "") and response.content:
content = json.loads(response.content.decode('utf-8'))
return content
else:
return {}
def _post(self, endpoint: str, data: dict = None) -> Union[dict, bool]:
try:
response = requests.post(endpoint, json=data, headers=self._headers(), verify=self.cert, timeout=self.timeout)
self.last_response = response
result = self._parse_response(response)
return result
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
return {}
def _get(self, endpoint: str, params: dict = None) -> dict:
try:
response = requests.get(endpoint, params=params, headers=self._headers(), verify=self.cert, timeout=self.timeout)
self.last_response = response
result = self._parse_response(response)
return result
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
return {}
def get_bot_status(self) -> bool:
endpoint = self.base_url.format(path='bot/status')
content = self._get(endpoint)
if content.get('success', False):
return content['data']['status']
else:
return False
def get_api_status(self) -> bool:
endpoint = self.base_url.format(path='status')
content = self._get(endpoint)
if content.get('success', False):
return content['data']['status']
else:
return False
def start_bot(self) -> bool:
endpoint = self.base_url.format(path='bot/start')
content = self._post(endpoint)
return content.get('success', False)
def stop_bot(self) -> bool:
endpoint = self.base_url.format(path='bot/stop')
content = self._post(endpoint)
return content.get('success', False)
def restart_bot(self) -> bool:
endpoint = self.base_url.format(path='bot/restart')
content = self._post(endpoint)
return content.get('success', False)
def uptime(self) -> Union[datetime.timedelta, bool]:
endpoint = self.base_url.format(path='bot/uptime')
content = self._get(endpoint)
if content.get('success', False):
return datetime.timedelta(seconds=content['data']['uptime'])
else:
return False
def start_date(self) -> Union[datetime.datetime, bool]:
endpoint = self.base_url.format(path='bot/start-date')
content = self._get(endpoint)
if content.get('success', False):
if content['data']['start_date'] is None:
return None
return dateutil.parser.parse(content['data']['start_date'])
else:
return False
def get_data(self, table: str, columns: List[str] = None, limit: int = 300, offset: int = 0, ordering: int = 1) -> Union[List[Dict], bool]:
endpoint = self.base_url.format(path='bot/data/{table}'.format(table=table))
params = {
"columns": ", ".join(columns) if columns else None,
"limit": limit,
"offset": offset,
"ordering": ordering
}
content = self._get(endpoint, params=params)
if content.get('success', False):
return content['data']
else:
return False
def get_specific_data(self, table: str, data_id: Union[int, str], columns: List[str] = None) -> Union[dict, bool]:
endpoint = self.base_url.format(path='bot/data/{table}/{data_id}'.format(table=table, data_id=data_id))
params = {
"columns": ", ".join(columns) if columns else None,
}
content = self._get(endpoint, params=params)
if content.get('success', False):
return content['data']
else:
return False
def count_data(self, table: str) -> int:
endpoint = self.base_url.format(path='bot/data/{table}/count'.format(table=table))
content = self._get(endpoint, params={})
if content.get('success', False):
return content['data']
else:
return False
def distribution(self, text: str, when: datetime.datetime = None, web_preview: bool = True, notification: bool = True, parse_mode: Union[str, None] = None) -> bool:
endpoint = self.base_url.format(path='bot/distribution')
data = {
"text": text,
"web_preview": web_preview,
"notification": notification,
"parse_mode": parse_mode
}
if when:
data.update(when=when.isoformat())
content = self._post(endpoint, data=data)
return content.get('success', False)
def send_message(self, chat_id: Union[int, str], text: str) -> bool:
endpoint = self.base_url.format(path='bot/messages/send')
data = {
"chat_id": chat_id,
"text": text,
}
content = self._post(endpoint, data=data)
return content.get('success', False)
def get_users(self, limit: int = 300, offset: int = 0, ordering: int = 1) -> Union[List[Dict], bool]:
endpoint = self.base_url.format(path='bot/users')
params = {
"limit": limit,
"offset": offset,
"ordering": ordering
}
content = self._get(endpoint, params=params)
if content.get('success', False):
return content['data']
else:
return False
def get_user(self, user_id: Union[int, str]) -> Union[dict, bool]:
endpoint = self.base_url.format(path='bot/users/{user_id}'.format(user_id=user_id))
content = self._get(endpoint, params={})
if content.get('success', False):
return content['data']
else:
return False
def get_stats(self) -> Union[Dict, bool]:
endpoint = self.base_url.format(path='bot/stats')
content = self._get(endpoint, params={})
if content.get('success', False):
return content['data']
else:
return False
| StarcoderdataPython |
8005894 | <gh_stars>1-10
from bson.binary import OLD_UUID_SUBTYPE
from pymongo.common import WriteConcern
from database import Database
from bson.json_util import dumps, loads
from bson.objectid import ObjectId
import json
class Connection():
database_path = None
def __init__(self, database_path, tz_aware=False, **kwargs):
self.slave_okay = True
self.read_preference = True
self.tag_sets = None
self.secondary_acceptable_latency_ms = 1
self.safe = True
self.uuid_subtype = OLD_UUID_SUBTYPE
self.write_concern = WriteConcern()
self.document_class = None
self.max_wire_version = 2
self.__tz_aware = tz_aware
self.database_path = database_path
def __getitem__(self, name):
return self.__getattr__(name)
def __getattr__(self, name):
return Database(self, name, self.database_path)
def __repr__(self):
return self.database_path
def _filter(self, content, spec):
for key, value in spec.iteritems():
if isinstance(value, ObjectId):
value = json.loads(dumps(ObjectId(oid=value)))
remove = list()
for item in content:
if item[key] != value:
remove.append(item)
content = [it for it in content if it not in remove]
return content
def start_request(self):
return self
def end_request(self):
pass
def insert(self, collection, database, to_save):
if database.get(collection) is None:
database[collection] = list()
json_to_save = json.loads(dumps(to_save))
for obj in json_to_save:
exists = [item for item in database[collection] if item.get('_id') == obj['_id']]
if len(exists) == 0:
database[collection].append(obj)
elif len(exists) > 1:
raise Exception('There cannot be two elements with the same id')
else:
exists[0] = obj
return json.dumps(database, indent=4)
def query(self, collection, database, query=None):
response = list()
col = database.get(collection)
if col is not None:
if query:
subcol = list(col)
response = loads(json.dumps(self._filter(subcol, query)))
else:
response = loads(json.dumps(col))
return response
def update(self, collection, database, document, spec):
content = json.loads(dumps(document))
col = database.get(collection)
if col is not None:
for doc in col:
if doc['_id'] == content['_id']:
for key, value in spec.iteritems():
if key == '$set':
for field, fvalue in value.iteritems():
if isinstance(fvalue, ObjectId):
fvalue = json.loads(dumps(fvalue))
doc[field] = fvalue
content = doc
break
else:
raise Exception('Cannot update a document from an inexistent collection')
return json.dumps(database, indent=4), loads(json.dumps(content))
def delete(self, collection, database, spec):
col = database.get(collection)
if col is not None:
subcol = list(col)
to_delete = self._filter(subcol, spec)
if to_delete:
col = [it for it in col if it not in to_delete]
database[collection] = col
else:
raise Exception('Cannot delete a document from an inexistent collection')
return json.dumps(database, indent=4)
| StarcoderdataPython |
3449786 | """
Autor: <NAME>
Formula de Kamenetsky
Complexidade: O(1)
A formula de kamenetsky permite saber quantos
digitos tem o fatorial de um numero qualquer > 0
sem precisar calcular seu fatorial.
"""
def kamenetsky(number):
"""
:param number: O numero do fatorial
:return: Quantidade de digitos do fatorial de number.
"""
import math
if number < 0: # nao existe
return 0
elif number <= 1:
return 1
digits = (number * math.log10(number / math.e)) + (math.log10(2 * math.pi * number) / 2)
return math.floor(digits) + 1
| StarcoderdataPython |
8040888 | <reponame>mrmotallebi/pytorch_geometric
from math import pi as PI
import torch
class Spherical(object):
r"""Saves the spherical coordinates of linked nodes in its edge attributes.
Args:
norm (bool, optional): If set to :obj:`False`, the output will not be
normalized to the interval :math:`{[0, 1]}^3`.
(default: :obj:`True`)
max_value (float, optional): If set and :obj:`norm=True`, normalization
will be performed based on this value instead of the maximum value
found in the data. (default: :obj:`None`)
cat (bool, optional): If set to :obj:`False`, all existing edge
attributes will be replaced. (default: :obj:`True`)
"""
def __init__(self, norm=True, max_value=None, cat=True):
self.norm = norm
self.max = max_value
self.cat = cat
def __call__(self, data):
(row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr
assert pos.dim() == 2 and pos.size(1) == 3
cart = pos[col] - pos[row]
rho = torch.norm(cart, p=2, dim=-1).view(-1, 1)
theta = torch.atan2(cart[..., 1], cart[..., 0]).view(-1, 1)
theta = theta + (theta < 0).type_as(theta) * (2 * PI)
phi = torch.acos(cart[..., 2] / rho.view(-1)).view(-1, 1)
if self.norm:
rho = rho / (rho.max() if self.max is None else self.max)
theta = theta / (2 * PI)
phi = phi / PI
spher = torch.cat([rho, theta, phi], dim=-1)
if pseudo is not None and self.cat:
pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo
data.edge_attr = torch.cat([pseudo, spher.type_as(pos)], dim=-1)
else:
data.edge_attr = spher
return data
def __repr__(self):
return '{}(norm={}, max_value={})'.format(self.__class__.__name__,
self.norm, self.max)
| StarcoderdataPython |
9728547 | <filename>mri-2d-resnet18-cam-norm.py
"""
Class Activation Map for ResNet-18 with 2D Tensor of MRI slices.
Normalize the response over small subset of patches from all classes.
author: <NAME>
email: <EMAIL>
date: 10-03-2019
"""
import torch
from torch import nn
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from torchvision import models
from torch import optim
from torch.optim import lr_scheduler
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torch.nn import functional as F
from torch import topk
import numpy as np
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
import nibabel
import os
import random
import pandas as pd
import math
import scipy.misc
import time
import copy
import skimage.transform
patch_h = 112
patch_w = 112
# train_csv = 'utils/train-cam-tesla-0.csv'
# val_csv = 'utils/val-cam-tesla-0.csv'
train_csv = 'utils/train-cam-office-0.csv'
val_csv = 'utils/val-cam-office-0.csv'
checkpoint_dir = './checkpoints/'
ckpt_path = checkpoint_dir+'mri-dqa-2d-resnet-18-epoch-350.pth'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# >> DataSet Class >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
class MRIData(Dataset):
def __init__(self, phase='train'):
self.phase = phase
if self.phase == 'train':
self.data_list_path = train_csv
elif self.phase == 'val':
self.data_list_path = val_csv
else:
assert False, 'Invalid argument for phase. Choose from (train, val)'
data_list_df = pd.read_csv(self.data_list_path, header=None)
data_list_df.columns = ['path']
self.image_path_list = list(data_list_df['path'])
def __getitem__(self, index):
"""
Returns a patch of a slice from MRI volume
The volume is selected by the inpurt argument index. The slice is randomly selected.
The cropped patch is randomly selected.
"""
nii = nibabel.load(self.image_path_list[index])
nii = nii.get_fdata()
[img_h, img_w, img_d] = nii.shape
# drop the bottom 25% and top 10% of the slices
nii = nii[:, :, int(img_d / 4):int(9 * img_d / 10)]
[img_h, img_w, img_d] = nii.shape
_patch_h = patch_h
_patch_w = patch_w
if img_h < patch_h:
_patch_h = img_h
if img_w < patch_w:
_patch_w = img_w
# extract random slice and random patch
h_l = int(random.randint(0, img_h - _patch_h))
h_u = int(h_l + _patch_h - 1)
w_l = int(random.randint(0, img_w - _patch_w))
w_u = int(w_l + _patch_w - 1)
d = int(random.randint(0, img_d - 1))
nii = nii[h_l:h_u, w_l:w_u, d]
# resize
nii = scipy.misc.imresize(nii, (224, 224))
# convert to pytorch tensor
nii = torch.tensor(nii)
nii.unsqueeze_(0)
nii = nii.repeat(3, 1, 1)
# return the mri patch and associated label
return nii
def __len__(self):
return len(self.image_path_list)
# << DataSet Class <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
class SaveFeatures():
features = None
def __init__(self, m):
self.hook = m.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.features = ((output.cpu()).data).numpy()
def remove(self):
self.hook.remove()
def getCAM(feature_conv, weight_fc, class_idx):
_, nc, h, w = feature_conv.shape
cam = weight_fc[class_idx].dot(feature_conv.reshape((nc, h*w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam/np.max(cam)
return cam_img, cam
def grad_cam(image, model, count):
prediction_var = Variable((image.unsqueeze(0)).cuda(), requires_grad=True)
# reference to the final layers, depends on the model class
final_layer = model._modules.get('layer4')
activated_features = SaveFeatures(final_layer)
# put the flattened input image through the model
prediction = model(prediction_var)
pred_probabilities = F.softmax(prediction).data.squeeze()
activated_features.remove()
topk_pred = topk(pred_probabilities, 1)
weight_softmax_params = list(model._modules.get('fc').parameters())
weight_softmax = np.squeeze(weight_softmax_params[0].cpu().data.numpy())
class_idx = topk(pred_probabilities, 1)[1].int()
overlay, cam = getCAM(activated_features.features, weight_softmax, class_idx)
img = image[0, :, :].cpu().numpy()
# fig, ax = plt.subplots(nrows=1, ncols=3)
ax0 = plt.subplot(221)
ax0.imshow(img, cmap=plt.cm.bone)
ax0.set_xticks([], [])
ax0.set_yticks([], [])
ax0.set_title('MRI Slice Patch')
ax1 = plt.subplot(222)
ax1.imshow(img, cmap=plt.cm.gray)
ax1.imshow(skimage.transform.resize(overlay[0], image.shape[1:3]), alpha=0.3, cmap='jet')
ax1.set_xticks([], [])
ax1.set_yticks([], [])
ax1.set_title('Grad-CAM MRI')
ax2 = plt.subplot(212)
ax2.hist(cam, bins=256)
ax2.set_xlabel('gradient value')
ax2.set_ylabel('frequency')
ax2.set_title('GRADCAM Histogram')
plt.suptitle('Grad-CAM MRI-DQA-Augmented ResNet-18-ABIDE-1')
fig_path = f'gradcam_rot_norm/gradcam-{count+1}.png'
plt.savefig(fig_path)
print(fig_path)
def main():
# Get the pretrained model from checkpoint
model = models.resnet18(pretrained=False)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)
checkpoint = torch.load(ckpt_path)
model.load_state_dict(checkpoint['model_state_dict'])
model = model.to(device)
# put model in evaluation mode
model.eval()
phase = 'train'
dataset = MRIData(phase)
dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=1, drop_last=True)
for count, inputs in enumerate(dataloader):
image = inputs[0,:,:,:]
image = image.to(device, dtype=torch.float)
grad_cam(image, model, count)
if __name__=='__main__':
main()
| StarcoderdataPython |
5185592 | # Script to
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
file_name = '/home/jon/PycharmProjects/jon-insight-project/data/processed/pa_course_database_processed.plk'
df = pd.read_pickle(file_name)
#print(df.head())
sns.distplot(df.rating.dropna(), bins=10)
plt.show()
# Print mean rating of all rounds
print(df.rating.dropna().mean().round(2))
sns.catplot(x="hilliness", y = "rating", data = df)
plt.show()
sns.catplot(x="woodedness", y = "rating", data = df)
plt.show()
sns.catplot(x = "multiple_layouts", y = "rating", data = df)
plt.show()
sns.distplot(df.length.dropna(), bins=10)
plt.show()
sns.jointplot(x = 'rating', y = 'par-sse', data = df)
plt.show()
sns.jointplot(x = 'year_established', y = 'rating', data = df)
plt.show()
print('Finished.') | StarcoderdataPython |
5182179 | from .align import align
import docopt
import re
import collections
import os
import sys
import numpy as np
import pandas as pd
from Bio import SeqIO
import seaborn as sns
import matplotlib.pyplot as plt
def determine_thresholds():
"""
Run routine to determine the positive pair thresholds for all given opening and extension
penalty scores with the BLOSUM50 matrix. The 0.7 threshold score for each opening/extension
score is saved to a pandas dataframe and exported to a csv so I only need to run this once...
"""
# Record in... a dataframe? Everything in dataframes.
record_thresholds = pd.DataFrame(columns=['Gap_opening', 'Gap_extension', 'Threshold_Score'])
for gap_opening in range(1, 21, 1):
for gap_extension in range(1,6,1):
# I was having a problem where the run_alignments() function was only being run once in the for loop...
# Not entirely sure why that was, but reinitializing seq_align every time is a solution
# Not a good solution, but a solution for the time being...
seq_align = align()
# Import pospairs and try all gap opening and extention penalties
seq_align.working_pairs = open('./Pospairs.txt')
# Set gap opening and extension penalties
seq_align.gap_opening_penalty = -(gap_opening)
seq_align.gap_extension_penalty = -(gap_extension)
# Set substitution matrix to BLOSUM50
seq_align.substitution_matrix = pd.read_table(open('./BLOSUM50'), delim_whitespace=True, header=6)
seq_align.substitution_matrix = seq_align.substitution_matrix.set_index(seq_align.substitution_matrix.columns.values)
print('\n\n\n\n\n')
print("TESTING THE FOLLOWING GAP OPENING AND EXTENSION PENALTIES:")
print("GAP OPENING: {}".format(seq_align.gap_opening_penalty))
print("GAP EXTENSION: {}".format(seq_align.gap_extension_penalty))
print('\n\n\n\n\n')
run_alignments(seq_align)
# Sort max scores for all pospairs and take 15th lowest score as 0.7 threshold
threshold_score = sorted(seq_align.max_alignment_score)[14]
print(sorted(seq_align.max_alignment_score))
new_row = pd.Series({'Gap_opening': gap_opening,
'Gap_extension': gap_extension,
'Threshold_Score': threshold_score
})
record_thresholds = record_thresholds.append(new_row, ignore_index=True)
record_thresholds.to_csv('Thresholds.csv')
def determine_gap_penalties():
"""
Use the threshold values in Thresholds.csv to find the number of false positives in Negpairs.txt
using the associated gap penalty values.
Returns
-------
"""
gap_thresholds = pd.read_csv('Thresholds.csv')
for index, row in gap_thresholds.iterrows():
seq_align = align()
# Import pospairs and try all gap opening and extention penalties
seq_align.working_pairs = open('./Negpairs.txt')
# Set gap opening and extension penalties
seq_align.gap_opening_penalty = -(row['Gap_opening'])
seq_align.gap_extension_penalty = -(row['Gap_extension'])
# Set substitution matrix to BLOSUM50
seq_align.substitution_matrix = pd.read_table(open('./BLOSUM50'), delim_whitespace=True, header=6)
seq_align.substitution_matrix = seq_align.substitution_matrix.set_index(seq_align.substitution_matrix.columns.values)
print('\n\n\n\n\n')
print("TESTING THE FOLLOWING GAP OPENING AND EXTENSION PENALTIES:")
print("GAP OPENING: {}".format(seq_align.gap_opening_penalty))
print("GAP EXTENSION: {}".format(seq_align.gap_extension_penalty))
print('\n\n\n\n\n')
run_alignments(seq_align)
# Get counts for elements that get scores above threshold
above_threshold = [element for element in seq_align.max_alignment_score if element > row['Threshold_Score']]
print("CURRENT THRESHOLD: {}".format(row['Threshold_Score']))
print(seq_align.max_alignment_score)
print(above_threshold)
false_positive_rate = len(above_threshold)/50
gap_thresholds.ix[index, 'False_Positive_Rate'] = false_positive_rate
gap_thresholds.to_csv('False_Positives.csv')
def compare_matrices(normalize):
"""
Compare the different substitution matrices in terms of false positives
Parameters
----------
normalize
"""
# Initialize variables and stuff
substitution_matrices = {'BLOSUM50': 6,
'BLOSUM62': 6,
'MATIO': 2,
'PAM100': 9,
'PAM250': 9
}
matrix_false_pos = pd.DataFrame(columns=['Matrix', 'False_Positive_Rate'])
score_dict = {}
# Loop through matricies and save score lists for true_pos and false_pos
# Generate ROC and plot
for sub_matrix in substitution_matrices:
score_dict[sub_matrix] = {}
# Find 0.7 threshold for given Matrix
if normalize == True:
seq_align = align(normalize=True)
print('\n\n\n\n\n')
print("NORMALIZING SCORES")
print(seq_align.normalize)
else:
seq_align = align()
seq_align.working_pairs = open('./Pospairs.txt')
print('\n\n\n\n\n')
print("OBTAINING THRESHOLD SCORE FOR {}".format(sub_matrix))
print('\n\n\n\n\n')
# Set substitution matrix
seq_align.substitution_matrix = pd.read_table(open('./{}'.format(sub_matrix)), delim_whitespace=True, header=substitution_matrices[sub_matrix])
seq_align.substitution_matrix = seq_align.substitution_matrix.set_index(seq_align.substitution_matrix.columns.values)
run_alignments(seq_align)
# Save pospair scores
# Sort max scores for all pospairs and take 15th lowest score as 0.7 threshold
score_dict[sub_matrix]['tp'] = sorted(seq_align.max_alignment_score)
score_dict[sub_matrix]['threshold'] = score_dict[sub_matrix]['tp'][14]
print('\n\n\n\n\n')
print("{} THRESHOLD: {}".format(sub_matrix, score_dict[sub_matrix]['threshold']))
# Find false positive rate using previously found threshold value
if normalize == True:
seq_align = align(normalize=True)
else:
seq_align = align()
seq_align.working_pairs = open('./Negpairs.txt')
print('\n\n\n\n\n')
print("DETERMINING FALSE POSITIVE RATE FOR {}".format(sub_matrix))
print('\n\n\n\n\n')
# Set substitution matrix
seq_align.substitution_matrix = pd.read_table(open('./{}'.format(sub_matrix)), delim_whitespace=True, header=substitution_matrices[sub_matrix])
seq_align.substitution_matrix = seq_align.substitution_matrix.set_index(seq_align.substitution_matrix.columns.values)
run_alignments(seq_align)
# Save negpair scores
score_dict[sub_matrix]['fp'] = sorted(seq_align.max_alignment_score)
# Get counts for elements that get scores above threshold
above_threshold = [element for element in seq_align.max_alignment_score if element > score_dict[sub_matrix]['threshold']]
false_positive_rate = len(above_threshold)/50
print("False Positive Rate: {}".format(false_positive_rate))
new_row = pd.Series({'Matrix': sub_matrix,
'False_Positive_Rate': false_positive_rate
})
matrix_false_pos = matrix_false_pos.append(new_row, ignore_index=True)
generate_ROC(score_dict)
matrix_false_pos.to_csv("Compare_matrices.csv")
def compare_optimized(matrix):
"""
Compare:
1. default matrix with default alignments
2. optimized scoring matrix against default alignments
3. optimized scoring matrix against optimized alignments
Import alignments from align output and use those to generate score_dicts
"""
# Initialize
gap_opening = -7
gap_extension = -3
# Import sequence alignments to list of lists
default_pos = _crappy_parser('Alignments-{}-pos.txt'.format(matrix))
default_neg = _crappy_parser('Alignments-{}-neg.txt'.format(matrix))
optimized_pos = _crappy_parser('Alignments-{}_Optimized-pos.txt'.format(matrix))
optimized_neg = _crappy_parser('Alignments-{}_Optimized-neg.txt'.format(matrix))
# Import default and optmized matrices
substitution_matrices = {'BLOSUM50': 6,
'BLOSUM62': 6,
'BLOSUM62-Optimized': 0,
'MATIO': 2,
'MATIO-Optimized': 0,
'PAM100': 9,
'PAM100-Optimized': 0,
'PAM250': 9
}
default_matrix = np.loadtxt(matrix, skiprows=(substitution_matrices[matrix] + 1))
optimized_matrix = np.loadtxt("{}-Optimized".format(matrix), skiprows=(substitution_matrices["{}-Optimized".format(matrix)] + 1))
# Calculate FPR/TPR (Ripped from matrix optmization code)
mat_dict = {'A': 0, 'R': 1, 'N': 2, 'D': 3, 'C': 4, 'Q': 5,
'E': 6, 'G': 7, 'H': 8, 'I': 9, 'L': 10, 'K': 11,
'M': 12, 'F': 13, 'P': 14, 'S': 15, 'T': 16, 'W': 17,
'Y': 18, 'V': 19, 'B': 20, 'Z': 21, 'X': 22, '*': 23
}
alignments = {'default_pos': default_pos,
'default_neg': default_neg,
'optimized_pos': optimized_pos,
'optimized_neg': optimized_neg
}
matricies = {'optimized_matrix': optimized_matrix,
'default_matrix': default_matrix}
score_lists = {}
for matrix in matricies:
for alignment in alignments:
prealigned_sequences = alignments[alignment]
scores = []
for prealigned_sequence in prealigned_sequences:
current_score = []
extending = False
print(prealigned_sequence[0])
print(prealigned_sequence[1])
print(prealigned_sequence[2])
# Score each pair in alignment
for seq_A, seq_B in zip(prealigned_sequence[0], prealigned_sequence[2]):
if seq_A == '-' or seq_B == '-':
if extending == False:
current_score.append(gap_opening)
extending = True
else:
current_score.append(gap_extension)
pass
else:
current_score.append(matricies[matrix][mat_dict[seq_A], mat_dict[seq_B]])
extending = False
print(current_score)
print(sum(current_score))
scores.append(sum(current_score))
score_lists['-'.join([matrix, alignment])] = sorted(scores)
# Construct score_dict
score_dict = {'default_matrix-default_alignments': {'tp': score_lists['default_matrix-default_pos'],
'fp': score_lists['default_matrix-default_neg'],
'threshold': score_lists['default_matrix-default_pos'][14]
},
'optimized_matrix-default_alignments': {'tp': score_lists['optimized_matrix-default_pos'],
'fp': score_lists['optimized_matrix-default_neg'],
'threshold': score_lists['optimized_matrix-default_pos'][14]
},
'optimized_matrix-optimized_alignments': {'tp': score_lists['optimized_matrix-optimized_pos'],
'fp': score_lists['optimized_matrix-optimized_neg'],
'threshold': score_lists['optimized_matrix-optimized_pos'][14]
}
}
import pprint
pprint.pprint(score_dict)
generate_ROC(score_dict, optimized=True)
def generate_ROC(score_dict, optimized=False):
"""
Plot false_pos vs. true_pos in ROC
For a given substitution matrix:
* Combine both true_pos and false_pos score lists in to set, list,
reverse sort (highest to lowest)
* Look at true_pos and false_pos lists individually and determine
the total number of elements above score threshold. Store values in
total_above_threshold_pos/neg
* Iterate through sorted list of all scores (ruler)
* For each value in ruler, look at proportion of scores below value
considered hits
* Append counts/total_above_threshold_pos/neg to x-axis and y-axis lists
Plotting code lazily ripped from sklearn ROC example...
"""
fig = plt.figure(figsize=(8, 8), facecolor='white')
lw = 2
if optimized:
colors = {'default_matrix-default_alignments': sns.xkcd_rgb["pale red"],
'optimized_matrix-optimized_alignments': sns.xkcd_rgb["grass"],
'optimized_matrix-default_alignments': sns.xkcd_rgb["golden yellow"]
}
else:
colors = {'BLOSUM50': sns.xkcd_rgb["pale red"],
'BLOSUM62': sns.xkcd_rgb["grass"],
'MATIO': sns.xkcd_rgb["cerulean"],
'PAM100': sns.xkcd_rgb["purplish"],
'PAM250': sns.xkcd_rgb["golden yellow"]
}
for matrix in score_dict:
# Combine true_pos and false_pos score lists
ruler_set = set()
for asdf in score_dict[matrix]['tp']:
ruler_set.add(asdf)
for asdf in score_dict[matrix]['fp']:
ruler_set.add(asdf)
print(score_dict[matrix]['tp'])
print(score_dict[matrix]['fp'])
print(ruler_set)
ruler = sorted(list(ruler_set), reverse=True)
# Get counts of values above threshold in tp and fp lists
tp_threshold_count = len([element for element in score_dict[matrix]['tp'] if element > score_dict[matrix]['threshold']])
fp_threshold_count = len([element for element in score_dict[matrix]['fp'] if element > score_dict[matrix]['threshold']])
x = [] # False positive
y = [] # True positive
# Count and append hits to x-axis and y-axis lists
for tick in ruler:
if fp_threshold_count == 0:
x.append(0)
else:
x.append(len([element for element in score_dict[matrix]['fp'] if element >= tick and element >= score_dict[matrix]['threshold']])/fp_threshold_count)
y.append(len([element for element in score_dict[matrix]['tp'] if element >= tick and element >= score_dict[matrix]['threshold']])/tp_threshold_count)
plt.plot(x, y, color=colors[matrix], lw=lw, label=matrix)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
if optimized:
plt.title('Receiver operating characteristic - Optimized vs. Default MATIO')
else:
plt.title('Receiver operating characteristic - Normalized')
plt.legend(loc="lower right")
plt.show()
fig.savefig('MATIO-Optimized_vs_Default.pdf', dpi=300)
def run_alignments(seq_align, save_output=None):
"""
Core code to run alignments between two sequences
Parameters
----------
seq_align
"""
if save_output:
output = open(save_output, 'w+')
write_counter = 1
for working_pair in seq_align.working_pairs:
seq_align.seq_A = SeqIO.read(working_pair.split()[0], 'fasta').upper()
seq_align.seq_B = SeqIO.read(working_pair.split()[1], 'fasta').upper()
print(seq_align.seq_A.seq)
print(seq_align.seq_B.seq)
# Do work
seq_align.initialize_scoring_matrix()
seq_align.fill_in_matrix(working_pair)
seq_A_string, bar_things, seq_B_string = seq_align.traceback_II()
if save_output:
output.write('>{}\n'.format(write_counter))
write_counter += 1
output.write(seq_A_string + '\n')
output.write(bar_things + '\n')
output.write(seq_B_string + '\n')
return seq_A_string, bar_things, seq_B_string
def _crappy_parser(file_name):
align = open(file_name)
seq_list = []
temp_list = []
for line in align:
if line[0] != '>':
temp_list.append(line.strip())
if len(temp_list) == 3:
seq_list.append(temp_list)
temp_list = []
print("\n{} imported".format(file_name))
return seq_list
def matrix_optimization(matrix_to_optimize):
"""
Optimize the matrix... somehow...
Objective function: sum of TPR for FPRs 0, 0.1, 0.2, 0.3
SIMULATED ANNEALING MONTE CARLO WITH METROPOLIS HASTINGS CRITERIA
Sample weighted for AA frequency in alignment sequences
Select random AA pairs, change value in matrix
Calculate objective function value
If current < previous, accept new matrix
Else, calculate boltzmann probability (e^-(d(score / kT))), accept if > ( 0 > randint > 1 )
Increment temperature every 2000 or so iterations
This is going to be super slow... (nevermind, fast enough)
Matricies optimized so far:
BLOSUM62: 4.0
MATIO: 2.06
"""
substitution_matrices = {'BLOSUM50': 6,
'BLOSUM62': 6,
'MATIO': 2,
'PAM100': 9,
'PAM250': 9
}
# Set substitution matrix (this is used at the very end)
substitution_matrix = pd.read_table(open('./{}'.format(matrix_to_optimize)), delim_whitespace=True, header=substitution_matrices[matrix_to_optimize])
substitution_matrix_indicies = substitution_matrix.columns.values
# Set gap penalties
gap_opening = -7
gap_extension = -3
# Import saved positive and negative alignments and save to list of lists
# [Seq_A, bar_things, Seq_B]
neg_seq_list = _crappy_parser('Alignments-{}-neg.txt'.format(matrix_to_optimize))
pos_seq_list = _crappy_parser('Alignments-{}-pos.txt'.format(matrix_to_optimize))
# Calculate AA frequencies in Positive alignments
all_pos_align_sequences = [element[0] + element[2] for element in pos_seq_list]
one_big_pos_sequence = re.sub('[^ARNDCQEGHILKMFPSTWYVBZX*]', '', ''.join(all_pos_align_sequences))
mat_dict = {'A': 0, 'R': 1, 'N': 2, 'D': 3, 'C': 4, 'Q': 5,
'E': 6, 'G': 7, 'H': 8, 'I': 9, 'L': 10, 'K': 11,
'M': 12, 'F': 13, 'P': 14, 'S': 15, 'T': 16, 'W': 17,
'Y': 18, 'V': 19, 'B': 20, 'Z': 21, 'X': 22, '*': 23
}
# MC!
accepted_matrix = None
current_matrix = np.loadtxt(matrix_to_optimize, skiprows=(substitution_matrices[matrix_to_optimize] + 1))
print(current_matrix)
accepted_TPR = 0
temperatures = [0.01, 0.002, 0.0004, 0.00008, 0.000016, 0.0000032]
import random
# Run MC
for temperature in temperatures:
print("Current Temp: {}".format(temperature))
for increment in range(2500): #2000
# Generate new matrix
first_AA = mat_dict[random.choice(one_big_pos_sequence)]
second_AA = mat_dict[random.choice(one_big_pos_sequence)]
perturb = random.uniform(-1, 1)
previous_value = current_matrix[first_AA, second_AA]
# Maintain symmetry
current_matrix.itemset((first_AA, second_AA), (previous_value + perturb))
current_matrix.itemset((second_AA, first_AA), (previous_value + perturb))
# Get FP thresholds from saved alignment
FP_scores = []
current_score = []
extending = False
for alignment in neg_seq_list:
for seq_A, seq_B in zip(alignment[0], alignment[2]):
if seq_A == '-' or seq_B == '-':
if extending == False:
current_score.append(gap_opening)
extending = True
else:
current_score.append(gap_extension)
pass
else:
current_score.append(current_matrix[mat_dict[seq_A], mat_dict[seq_B]])
extending = False
FP_scores.append(sum(current_score))
current_score = []
# Calculate TP scores and get TPR for each FP threshold
FP_thresholds = [sorted(FP_scores)[49], sorted(FP_scores)[44], sorted(FP_scores)[39], sorted(FP_scores)[34]]
# print('FP_Thresholds: {}'.format(FP_thresholds))
TP_scores = []
current_score = []
extending = False
for alignment in pos_seq_list:
for seq_A, seq_B in zip(alignment[0], alignment[2]):
if seq_A == '-' or seq_B == '-':
if extending == False:
current_score.append(gap_opening)
extending = True
else:
current_score.append(gap_extension)
pass
else:
current_score.append(current_matrix[mat_dict[seq_A], mat_dict[seq_B]])
extending = False
TP_scores.append(sum(current_score))
current_score = []
sum_TPR = sum([len([element for element in TP_scores if element > threshold])/50 for threshold in FP_thresholds])
# print('TP_scores:', TP_scores)
# print('sum_TPR: {}'.format(sum_TPR))
# Reject/Accept
if sum_TPR > accepted_TPR:
print('\nNEW MATRIX ACCEPTED (Score: {})\n'.format(sum_TPR))
accepted_TPR = sum_TPR
accepted_matrix = current_matrix
np.savetxt('best_matrix.csv', accepted_matrix, delimiter=',')
else:
pass_limit = random.uniform(0, 1)
boltz = np.exp(-((accepted_TPR - sum_TPR)/temperature))
if boltz > pass_limit:
accepted_TPR = sum_TPR
accepted_matrix = current_matrix
np.savetxt('best_matrix.csv', accepted_matrix, delimiter=',')
else:
print('\nNEW MATRIX REJECTED\n')
current_matrix = np.loadtxt('best_matrix.csv', delimiter=',')
# Note: I'm super aware that this is kind of stupid since I'm writing the matrix to file every time
# I accept the matrix and reload it every time it is rejected, but for some reason the line
# below doesn't seem to work... I think it just makes a copy and doesn't let me edit it. It's
# a nasty hack but I'm short on time. :/
#
# current_matrix = accepted_matrix
# Hacky method to save optimized matrix in same format as the ones provided
np.savetxt("Optimized_matrix-temp.csv", accepted_matrix, delimiter=",")
mat_dict_rev = {mat_dict[res]: res for res in mat_dict}
temp = pd.DataFrame.from_csv("Optimized_matrix-temp.csv", header=None, index_col=None).set_index(substitution_matrix_indicies)
temp.rename(columns=mat_dict_rev, inplace=True)
temp.to_csv("{}-Optimized".format(matrix_to_optimize), sep='\t', index=None) | StarcoderdataPython |
6651214 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
import cached_auth
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
get_user_model = lambda: User
try:
# Python 3.4+ includes reload in importlib
from importlib import reload
except ImportError:
try:
# Python 3.3 includes reload in imp
from imp import reload
except ImportError:
# Python 2 includes reload as a builtin
pass
class MiddlewareTest(TestCase):
def setUp(self):
user_model = get_user_model()
self.user = user_model.objects.create_user(username='test', password='a')
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
cache.clear()
def test_anonymous(self):
# Anonymous user doesn't cause cache to be set
client = Client()
key = cached_auth.CACHE_KEY % self.user.id
client.get(reverse('admin:index'))
self.assertEqual(cache.get(key), None)
def test_cached_middleware(self):
client = Client()
key = cached_auth.CACHE_KEY % self.user.id
self.assertEqual(cache.get(key), None)
# Visiting admin causes the cache to be populated
client.login(username='test', password='a')
client.get(reverse('admin:index'))
self.assertEqual(cache.get(key), self.user)
# Changing user model invalidates cache
self.user.save()
self.assertEqual(cache.get(key), None)
# Deleting user invalidates cache
client.get(reverse('admin:index'))
self.assertEqual(cache.get(key), self.user)
self.user.delete()
self.assertEqual(cache.get(key), None)
@override_settings(CACHED_AUTH_PREPROCESSOR='test_project.utils.auth_preprocessor')
def test_cached_auth_preprocessor_function(self):
reload(cached_auth)
client = Client()
key = cached_auth.CACHE_KEY % self.user.id
self.assertEqual(cache.get(key), None)
client.login(username='test', password='a')
client.get(reverse('admin:index'))
user = cache.get(key)
self.assertEqual(user.username, 'test_auth')
| StarcoderdataPython |
9712454 | <reponame>suhasksv/py-ground<filename>syn_py.py<gh_stars>1-10
# defining a function
def greet(friends):
for i in friends:
print("Hi", i)
friend = ["Python", "Go", "HTML5", "Java", "C", "C++"]
greet(friend)
print("\n")
def price(price):
tax = (price * 0.06)
final = tax + price
return "The price without tax is ${:.2f}, tax is ${:.2f} and final price with tax is {:.2f}".format(price, tax, final)
x = 1
while x == 1:
try:
prices = float(input("Enter the price of the item :"))
print(price(prices))
except:
print("Please enter an integer or decimal value")
continue
ask = input("Do you want to find total price and tax? Enter 'y' to continue...! :").lower()
if ask != 'y':
x += 1
| StarcoderdataPython |
5033989 | <filename>src/d02_intermediate/preprocess_4_travel_time.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import geopandas as gpd
import pickle
# system path
import sys
import os
# util path
utility_path = os.path.join(os.getcwd(),'src/d00_utils/')
sys.path.append(utility_path)
import utilities as util
# data path
# sw: define the path based on the root project directory.
raw_data_path = os.path.join(os.getcwd(),'data/01_raw/')
intermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')
# mount_path = "/Users/shenhaowang/Dropbox (MIT)/project_econ_opportunity_south_Australia"
# read files
with open(raw_data_path+"OD_Google_API_raw.pickle", 'rb') as w:
OD_time_raw=pickle.load(w)
with open(raw_data_path+"OD_Google_API_With_Map_Info.pickle", 'rb') as w:
OD_time_SA2=pickle.load(w)
# rename
OD_time_SA2.rename(columns={'o_sa2_idx':'O',
'd_sa2_idx':'D',
'od_duration_value':'od_duration',
'od_distance_value':'od_distance'}, inplace=True)
# save
OD_time_SA2[['O','D','od_duration','od_distance']].to_pickle(intermediate_data_path+'sa2_edge_travel_time.pickle')
| StarcoderdataPython |
11296960 | import traceback
__author__ = '<NAME> (<EMAIL>)'
from virtualisation.wrapper.history.abstractreader import AbstractReader
from virtualisation.misc.buffer import Buffer
from virtualisation.misc.lists import TimestampedList, BufferedTimestampedList
from virtualisation.misc.log import Log
import csv
import os.path
class CSVHistoryReader(AbstractReader):
def __init__(self, wrapper, filehandle, firstLineHeader=True, headers=None, delimiter=',', timestampfield=None):
# if not os.path.exists(filename):
# raise Exception("Historic data for" + str(self) + "not available")
super(CSVHistoryReader, self).__init__(wrapper, timestampfield)
try:
self.filehandle = filehandle
self.reader = csv.reader(self.filehandle, delimiter=delimiter)
if firstLineHeader:
self.headers = self.reader.next()
if headers:
self.headers = headers
# read and sort the data
sd = self.wrapper.getSensorDescription()
tsdata = timestampfield
if not tsdata and sd.isTimestampedStream():
tsdata = sd.timestamp.inField
if tsdata:
self.__loadtimestamped(tsdata)
else:
self.__loadnormal()
except IOError as error:
raise error
def __loadtimestamped(self, tsdata):
try:
self.timestampindex = self.headers.index(tsdata)
self.data = BufferedTimestampedList(200, self.fillBuffer, 20)
except ValueError as e:
self.__loadnormal()
def __loadnormal(self):
self.data = []
for row in self.reader:
self.data.append(row)
def fillBuffer(self, aList):
sd = self.wrapper.getSensorDescription()
for x in range(0, aList.bufferLength):
try:
row = self.reader.next()
ts = sd.parseTimestamp(row[self.timestampindex])
aList.add(ts, row)
except StopIteration:
# print sd.sensorID, "no more data"
aList.stopFillFunction()
break
aList.fillFunctionComplete()
def tick(self, clock):
if isinstance(self.data, list):
return self.data.pop(0) if len(self.data) > 0 else None
else:
# clock.pause()
x = self.data.next(clock.now())
# clock.continue_running()
return x
def setTimeframe(self, startdate, enddate):
if not isinstance(self.data, list):
Log.i("Searching start date in historic data for", self.wrapper.getSensorDescription().sensorID, "...")
if self.data.scrollTo(startdate):
Log.i("done")
else:
Log.w("no historic data beginning at", startdate, "found")
super(CSVHistoryReader, self).setTimeframe(startdate, enddate)
# class CSVHistoryReader(AbstractReader):
# def __init__(self, wrapper, filename, firstLineHeader=True, headers=[]):
# super(CSVHistoryReader, self).__init__(wrapper)
# try:
# self.filehandle = open(filename, "rb")
# r = csv.reader(self.filehandle, delimiter=',')
# self.headers = headers
# if firstLineHeader:
# self.headers = r.next()
#
# # read and sort the data
# sd = self.wrapper.getSensorDescription()
# if sd.isTimestampedStream():
# self.__loadtimestamped(r, sd)
# else:
# self.__loadnormal(r)
# except IOError as error:
# print "Historic data for", self, "not available"
# print error
# raise error
#
# def __loadtimestamped(self, reader, sd):
# self.data = TimestampedList()
# try:
# i = self.headers.index(sd.timestamp.inField)
# for row in reader:
# ts = sd.parseTimestamp(row[self.timestampindex])
# self.data.add(ts, row)
# self.data.sort()
# except ValueError:
# self.__loadnormal(reader)
#
# def __loadnormal(self, reader):
# self.data = []
# for row in reader:
# self.data.append(row)
#
# def tick(self, clock):
# if isinstance(self.data, list):
# return self.data.pop(0)
# else:
# return self.data.next(clock.now())
# class CSVHistoryReader(AbstractReader):
# def __init__(self, wrapper, filename, firstLineHeader=True, headers=[], bufferLength=20):
# super(CSVHistoryReader, self).__init__(wrapper)
# self.buffer = Buffer(bufferLength, fillBelow=3, fillFunction=self.fillBuffer)
# try:
# self.filehandle = open(filename, "rb")
# self.r = csv.reader(self.filehandle, delimiter=',')
# self.headers = headers
# if firstLineHeader:
# self.headers = self.r.next()
# self.fillBuffer()
# except IOError as error:
# print "Historic data for", self, "not available"
# print error
# raise error
#
# def fillBuffer(self):
# tmp = []
# for i in range(0, self.buffer.length):
# tmp.append(self.r.next())
# self.buffer.append(tmp)
#
# def tick(self):
# return self.buffer.pick()
#
# def setTimeframe(self, startdate, enddate):
# super(CSVHistoryReader, self).setTimeframe(startdate, enddate)
# sd = self.wrapper.getSensorDescription()
# if "timestamp" in sd:
# # foreward to the timestamp after the startdate
# tsAfter = startdate + datetime.timedelta(seconds=sd.updateInterval)
# # find the position of the timestamp
# i = -1
# for x in range(0, len(self.headers)):
# if self.headers[x] == sd.timestamp.inField:
# i = x
# break
# if i < 0:
# return
# while self.buffer.size():
# data = self.buffer.peek()
# ts = datetime.datetime.strptime(data[i], sd.timestamp.format)
# if ts >= tsAfter:
# break
# else:
# self.buffer.pick()
#
| StarcoderdataPython |
6672095 | <filename>utils.py<gh_stars>0
from math import sqrt
import numpy as np
import shutil
import os
# =============================================================================
# Confidence Intervel for Sensitivity and Specificity
# The Confidence Interval use Wilson Score interval formula to calculate
# the upper and lower bound
# The arguments to pass are the following:
#
# TP = True positive: Sick people correctly identified as sick
# FP = False positive: Healthy people incorrectly identified as sick
# TN = True negative: Healthy people correctly identified as healthy
# FN = False negative: Sick people incorrectly identified as healthy
# =============================================================================
def confidence_interval_sen_spe(tp,tn,fp,fn):
sen = tp/(tp+fn) # sensitivity
spe = tn/(tn+fp) # specificiry
n = tp+fn # total number of sick individuals
n1 = tn+fp # total number of healthy individuals
z = 1.96 # Desired Confidence Interval 1.96 for 95%
adj_sen = (sen + (z*z)/(2*n))/(1 + ((z*z)/n)) # Adjusted sensitivity
adj_spe = (spe + (z*z)/(2*n1))/(1 + ((z*z)/n1)) # Adjusted specificity
ul_sen=((sen + (z*z)/(2*n))+(z*(sqrt(((sen*(1-sen))/n) + ((z*z)/(4*(n*n)))))))/(1 + ((z*z)/n)) # Upper level sensitivity
ll_sen=((sen + (z*z)/(2*n))-(z*(sqrt(((sen*(1-sen))/n) + ((z*z)/(4*(n*n)))))))/(1 + ((z*z)/n)) # Lower level sensitivity
ul_spe=((spe + (z*z)/(2*n1))+(z*(sqrt(((spe*(1-spe))/n1) + ((z*z)/(4*(n1*n1)))))))/(1 + ((z*z)/n1)) # Upper level specificity
ll_spe=((spe + (z*z)/(2*n1))-(z*(sqrt(((spe*(1-spe))/n1) + ((z*z)/(4*(n1*n1)))))))/(1 + ((z*z)/n1)) # Lower level specificity
return (adj_sen,ll_sen,ul_sen,adj_spe,ll_spe,ul_spe)
# =============================================================================
# Confidence Intervel for AUC Area under Receiver operating characteristic
# The Confidence Interval use The formula for SE(AUC) was given by Hanley and
# McNeil (1982 to calculate the upper and lower bound
#
# The arguments to pass are the following:
# AUC = Area under Receiver operating characteristic
# N1 = Total number of Positive sample in dataset
# N2 = Total number of Negative samples in dataset
# =============================================================================
def confidence_interval_auc(auc, n1, n2):
AUC = auc
N1 = n1
N2 = n2
z = 1.96 # Desired Confidence Interval 1.96 for 95%
Q1 = AUC / (2 - AUC)
Q2 = 2*(AUC*AUC) / (1 + AUC)
SE_AUC = sqrt((((AUC*(1 - AUC)) + ((N1 - 1)*(Q1 - AUC*AUC)) + ((N2 - 1)*(Q2 - AUC*AUC)))) / (N1*N2)) # Standard Error
AUC_lower = AUC - z * SE_AUC
AUC_upper = AUC + z * SE_AUC
return (AUC_lower, AUC_upper)
# =============================================================================
# Split the given CSV file into train 80%, validation 10% and holdout test 10%
# The algorithm use permutation to make sure your dataset split completely random
#
# The argumemnt to pass is data frame
# =============================================================================
def train_validate_test_split(df, train_percent=.800000, validate_percent=.1, seed=42):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
train = df.iloc[perm[:train_end]]
validate = df.iloc[perm[train_end:validate_end]]
test = df.iloc[perm[validate_end:]]
return train, validate, test
# =============================================================================
# Split the given CSV file into train 80%, validation 10% and holdout test 10%
# The algorithm use permutation to make sure your dataset split completely random
#
# The argumemnt to pass is
# data frame = where your data with all files are avilable under "fname" heading
# in_path = location of your ROI extracted CXR files
# dest_path = location where you want to move your data
#
# for example your data frame for IN dataset for hould out test for TB patients,
# your CXR is in segmentation_result dir ( in_path) and move test files to in_test/tb
# (dest_path)
# =============================================================================
def move_files(df, in_path, dest_path):
image=df['fname']
for i in image[0:]:
shutil.move(os.path.join(in_path, i),dest_path)
| StarcoderdataPython |
3230323 | <reponame>niksell/phenotypes-prediction-using-genotypes-Master-Thesis
import os.path
import time
import numpy as np
from DataStructure.PatientPhenotype import PatientPhenotype
from DataStructure.Snp import Snp
class Output:
def __init__(self,path,numberOfChromosomes):
self.__path = path
self.__numberOfChromosomes = numberOfChromosomes
def writePatientsList(self,patients,kind):
path = self.__path + kind
try:
write = open(path,'w')
for patient in patients.keys():
write.write(patient.strip() + '\n')
write.close()
except Exception as x:
print("error = ",x)
write.close()
def writeSnpsList(self,chromosomes):
for i in range(self.__numberOfChromosomes):
chro = 'chr'+str(i+1)
try:
path = self.__path + chro + 'snpList.txt'
write = open(path,'w')
for snp in chromosomes[chro].keys():
write.write(snp.strip() + '\n')
write.close()
except Exception as x:
print("error = ",x)
write.close()
def writeSnpsUsed(self,snpsIds,idToName,chromosomes,name = None):
if not name:
print("give a name to file")
return
path = self.__path + name + " ( " + time.strftime("%d-%m-%Y") + " ).txt "
i=1
while os.path.exists(path):
path = self.__path + name + " ( " + time.strftime("%d-%m-%Y") + " ) " + '_' + str(i)+".txt"
i += 1
snps = []
for i in snpsIds:
snps.append(idToName[i])
print("snpsIds = ",len(snpsIds))
print("idToName = ",len(idToName))
write = open(path,'w')
try:
for i in range(1,23):
chro = 'chr'+str(i)
chromList = chromosomes[chro]
if len(list(set(chromList) - set(snps))) < len(chromList):
write.write("chromosome"+str(i)+'\n')
for j in snps:
if j in chromosomes[chro]:
write.write(j + '\t' + chromosomes[chro][j][0] + '\t' + chromosomes[chro][j][1] + '\n')
write.write('\n')
write.close()
except Exception as x:
print("error = ",x)
write.close()
def saveData(self,ids,patients,data,chroms = {}):
self.__snpCodeLog(ids['patients']['idToName'],ids['snps']['idToName'],patients,data)
def writeDf(self,n,m,chromosomes,ids,patients):
X = np.zeros((n,m),dtype = int)
for i in range(self.__numberOfChromosomes):
chro = 'chr'+str(i+1)
path = self.__path + chro +'.lgen'
if os.path.exists(path):
try:
f = open(path,'r')
for line in f:
try:
patient = line.split()[0].strip()
snp = line.split()[2].strip()
allele1 = line.split()[3].strip()
allele2 = line.split()[4].strip()
snpp = Snp(snp,allele1,allele2)
snpp.setSnpCode(chromosomes[chro][snp][0],chromosomes[chro][snp][1])
code = snpp.getSnpCode()
p = ids['patients']['nameToId'][patient]
s = ids['snps']['nameToId'][snp]
X[p,s] = code
except Exception as x:
print("error1 = ",x)
f.close()
f.close()
except Exception as x:
print("error2 = ",x)
f.close()
print("x shape is ", X.shape)
write = open(self.__path + 'snpCodeTest1.csv','w')
write.write('patients,')
for i in range(len(X.T)):
s = ids['snps']['idToName'][i]
write.write(s + ',')
write.write('label' + '\n')
for i in range(len(X)):
p = ids['patients']['idToName'][i]
write.write(p + ',')
for j in range(len(X.T)):
s = ids['snps']['idToName'][j]
write.write(str(X[i,j]) + ',')
write.write(str(patients[p].getCase()) + '\n')
write.close()
def __patientsLogFile(self,ids,patientKind):
write = open(self.__path + patientKind + 'Ids.txt','w')
write.write(str(len(ids['nameToId'])) + '\n')
for patient in ids['nameToId'].keys():
write.write(patient.strip() + '\t' + str(ids['nameToId'][patient]).strip() + '\n')
write.close()
def __snpsLogFile(self,ids,chroms):
if len(chroms.keys()) > 0:
write = open(self.__path + 'SnpsIds.txt','w')
write.write(str(len(ids['nameToId'])) + '\n')
for chro in chroms.keys():
for snp in chroms[chro].keys():
write.write(snp.strip() + '\t' + str(ids['nameToId'][snp.strip()]).strip() + '\n')
write.close()
def __snpCodeLog(self,patientsIds,snpsIds,patients,data):
write = open(self.__path + 'snpCode.txt','w')
write.write(str(len(patientsIds)) + '\n')
write.write(str(len(snpsIds)) + '\n')
for i in range(len(data)):
for j in range(len(data.T)):
allele1 = patients[patientsIds[i]].getAllele1(snpsIds[j])
allele2 = patients[patientsIds[i]].getAllele2(snpsIds[j])
write.write(patientsIds[i].strip() + '\t' + snpsIds[j].strip() + '\t' + str(data[i,j]).strip() + '\t'
+ allele1.strip() + '\t' + allele2.strip() + '\n')
write.close() | StarcoderdataPython |
9657396 | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
import time
Motor1A = 16
Motor1B = 18
Motor2A = 11
Motor2B = 15
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor2A,GPIO.OUT)
GPIO.setup(Motor2B,GPIO.OUT)
def backward():
print("Going Back")
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
time.sleep(1)
print("Stopping")
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor2A,GPIO.LOW)
GPIO.output(Motor2B,GPIO.LOW)
def forward():
print("Going Forward")
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.HIGH)
GPIO.output(Motor2A,GPIO.LOW)
GPIO.output(Motor2B,GPIO.HIGH)
time.sleep(1)
print("Stopping")
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor2A,GPIO.LOW)
GPIO.output(Motor2B,GPIO.LOW)
def turnLeft():
print("Going Left")
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor2A,GPIO.LOW)
GPIO.output(Motor2B,GPIO.HIGH)
time.sleep(0.3)
print("Stopping")
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor2A,GPIO.LOW)
GPIO.output(Motor2B,GPIO.LOW)
def turnRight():
print("Going Right")
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.HIGH)
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
time.sleep(0.3)
print("Stopping")
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor2A,GPIO.LOW)
GPIO.output(Motor2B,GPIO.LOW)
def stop():
print("Stopping")
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor2A,GPIO.LOW)
GPIO.output(Motor2B,GPIO.LOW)
| StarcoderdataPython |
6432332 | <reponame>avilcheslopez/geopm
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
'''
Example frequency sweep experiment using geopmbench.
'''
from experiment.frequency_sweep import frequency_sweep
from apps.geopmbench import geopmbench
if __name__ == '__main__':
app_conf = geopmbench.TinyAppConf()
frequency_sweep.main(app_conf, cool_off_time=0)
| StarcoderdataPython |
1975239 | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for RunningProcessEvent event."""
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
from typing import TYPE_CHECKING
from ...event import Event
if TYPE_CHECKING:
from ...actions import ExecuteProcess # noqa: F401
class RunningProcessEvent(Event):
"""Event base class that is related to some running process."""
name = 'launch.events.process.RunningProcessEvent'
def __init__(
self,
*,
action: 'ExecuteProcess',
name: Text,
cmd: List[Text],
cwd: Optional[Text],
env: Optional[Dict[Text, Text]],
pid: int
) -> None:
"""
Create a RunningProcessEvent.
:param: action is the ExecuteProcess action associated with the event
:param: name is the final name of the process instance, which is unique
:param: cmd is the final command after substitution expansion
:param: cwd is the final working directory after substitution expansion
:param: env is the final environment variables after substitution expansion
"""
super().__init__()
self.__action = action
self.__name = name
self.__cmd = cmd
self.__cwd = cwd
self.__env = env
self.__pid = pid
@property
def action(self) -> 'ExecuteProcess':
"""Getter for action."""
return self.__action
@property
def execute_process_action(self) -> 'ExecuteProcess':
"""Getter for execute_process_action."""
return self.__action
@property
def process_name(self) -> Text:
"""Getter for process_name."""
return self.__name
@property
def cmd(self) -> List[Text]:
"""Getter for cmd."""
return self.__cmd
@property
def cwd(self) -> Optional[Text]:
"""Getter for cwd."""
return self.__cwd
@property
def env(self) -> Optional[Dict[Text, Text]]:
"""Getter for env."""
return self.__env
@property
def pid(self) -> int:
"""Getter for pid."""
return self.__pid
| StarcoderdataPython |
1872725 | <reponame>oscarbatori/doltpy
from typing import List, Callable, Any, Mapping, Iterable
import pandas as pd
from doltpy.core.dolt import Dolt, DEFAULT_HOST, DEFAULT_PORT
import logging
import io
import tempfile
from datetime import datetime, date, time
from sqlalchemy import String, DateTime, Date, Integer, Float, Table, MetaData, Column
import math
import os
logger = logging.getLogger(__name__)
DEFAULT_BATCH_SIZE = 300000
CREATE, FORCE_CREATE, REPLACE, UPDATE = 'create', 'force_create', 'replace', 'update'
IMPORT_MODES_TO_FLAGS = {CREATE: ['-c'],
FORCE_CREATE: ['-f', '-c'],
REPLACE: ['-r'],
UPDATE: ['-u']}
def import_df(repo: Dolt,
table_name: str,
data: pd.DataFrame,
primary_keys: List[str],
import_mode: str = None):
"""
Imports the given DataFrame object to the specified table, dropping records that are duplicates on primary key
(in order, preserving the first record, something we might want to allow the user to sepcify), subject to
given import mode. Import mode defaults to CREATE if the table does not exist, and UPDATE otherwise.
:param repo:
:param table_name:
:param data:
:param primary_keys:
:param import_mode:
:return:
"""
def writer(filepath: str):
clean = data.dropna(subset=primary_keys)
clean.to_csv(filepath, index=False)
_import_helper(repo, table_name, writer, primary_keys, import_mode)
def bulk_import(repo: Dolt,
table_name: str,
data: io.StringIO,
primary_keys: List[str] = None,
import_mode: str = None) -> None:
"""
This takes a file like object representing a CSV and imports it to the table specified. Note that you must
specify the primary key, and the import mode. The import mode is one of the keys of IMPORT_MODES_TO_FLAGS.
Choosing the wrong import mode will throw an error, for example `CREATE` on an existing table. Import mode
defaults to CREATE if the table does not exist, and UPDATE otherwise.
:param repo:
:param table_name:
:param data:
:param primary_keys:
:param import_mode:
:return:
"""
def writer(filepath: str):
with open(filepath, 'w') as f:
f.writelines(data.readlines())
_import_helper(repo, table_name, writer, primary_keys, import_mode)
def _import_helper(repo: Dolt,
table_name: str,
write_import_file: Callable[[str], None],
primary_keys: List[str],
import_mode: str) -> None:
import_modes = IMPORT_MODES_TO_FLAGS.keys()
if import_mode is not None:
assert import_mode in import_modes, 'update_mode must be one of: {}'.format(import_modes)
else:
if table_name in [table.name for table in repo.ls()]:
logger.info('No import mode specified, table exists, using "{}"'.format(UPDATE))
import_mode = UPDATE
else:
logger.info('No import mode specified, table exists, using "{}"'.format(CREATE))
import_mode = CREATE
if import_mode == CREATE and primary_keys is None:
raise ValueError('Import mode CREATE requires a primary key to be specified')
import_flags = IMPORT_MODES_TO_FLAGS[import_mode]
logger.info('Importing to table {} in dolt directory located in {}, import mode {}'.format(table_name,
repo.repo_dir(),
import_mode))
fname = tempfile.mktemp (suffix='.csv')
try:
write_import_file(fname)
args = ['table', 'import', table_name] + import_flags
if import_mode == CREATE:
args += ['--pk={}'.format(','.join(primary_keys))]
repo.execute(args + [fname])
finally:
if os.path.exists(fname):
os.remove(fname)
def import_dict(repo: Dolt,
table_name: str,
data: Mapping[str, List[Any]],
primary_keys: List[str] = None,
import_mode: str = None,
batch_size: int = DEFAULT_BATCH_SIZE):
"""
Provides a column major interface for writing Python data structures to Dolt, specifically data should be a dict
where the keys are column names and the values are equal length lists of values to be written to Dolt. The lists
must consist of:
- values that match the type of the table in the schema of the table being written to
- values of the same type that can be coalesced to a Python type by the (very limited) type inference logic
for generating a schema from a data structure
Note it is necessary for all list to be of the same length since we must coalesce the lists into rows, and that
doesn't really make sense when the lists are not of the same length.
Let's proceed with the example of creating a simple table and showing how to write some data structures:
CREATE TABLE players (id INT, name VARCHAR(16), PRIMARY KEY (id))
Now write in update mode:
>>> dict_of_lists = {'id': [1, 2], 'name': ['Roger', 'Rafael']}
>>> import_dict(repo, 'players', dict_of_lists, import_mode='update')
Alternatively we can let the Python code infer a schema:
>>> import_dict(repo, 'players', dict_of_lists, ['id'], import_mode='create')
Assertions:
- all list values are of equal length
- when inferring a schema each list value has elements of a type that can be mapped to a SQL type, the logic is
currently very limited
- when inferring a schema
This function requires the Dolt SQL server to be running on the host and port provided, defaulting to
127.0.0.1:3306.
:param repo:
:param table_name:
:param data:
:param primary_keys:
:param import_mode:
:param batch_size:
:return:
"""
assert import_mode in [UPDATE, CREATE]
# Grab some basic information about the data
assert data, 'Cannot provide an empty dictionary'
row_count = len(list(data.values())[0])
assert row_count > 0, 'Must provide at least a single row'
assert all(len(val_list) == row_count for val_list in data.values()), 'Must provide value lists of uniform length'
# Get an Engine object
# If the table does not exist, create it using type inference to build a create statement
if import_mode == CREATE:
assert primary_keys, 'primary_keys need to be provided when inferring a schema'
_create_table_inferred(repo, table_name, data, primary_keys)
rows = []
for i in range(row_count):
rows.append({col: data[col][i] for col in data.keys()})
clean_rows = coerce_dates(rows)
logger.info('Inserting {row_count} rows into table {table_name}'.format(row_count=row_count,
table_name=table_name))
metadata = MetaData(bind=repo.get_engine())
metadata.reflect()
table = metadata.tables[table_name]
for i in range(max(1, math.ceil(len(clean_rows) / batch_size))):
batch_start = i * batch_size
batch_end = min((i+1) * batch_size, len(clean_rows))
batch = clean_rows[batch_start:batch_end]
logger.info('Writing records {} through {} of {} rows to Dolt'.format(batch_start, batch_end, len(clean_rows)))
with repo.get_engine().connect() as conn:
conn.execute(table.insert(), batch)
def _create_table_inferred(repo: Dolt, table_name: str, data: Mapping[str, List[Any]], primary_keys: List[str]):
# generate and execute a create table statement
cols_to_types = {}
for col_name, list_of_values in data.items():
# Just take the first value to by the type
first_non_null = None
for val in list_of_values:
if val is not None:
first_non_null = val
break
raise ValueError('Cannot provide an empty list, types cannot be inferred')
cols_to_types[col_name] = _get_col_type(first_non_null, list_of_values)
metadata = MetaData(bind=repo.get_engine())
table = _get_table_def(metadata, table_name, cols_to_types, primary_keys)
table.create()
def _get_col_type(sample_value: Any, values: Any):
if type(sample_value) == str:
return String(2 * max(len(val) for val in values))
elif type(sample_value) == int:
return Integer
elif type(sample_value) == float:
return Float
elif type(sample_value) == datetime:
return DateTime
elif type(sample_value) == date:
return Date
else:
raise ValueError('Value of type {} is unsupported'.format(type(sample_value)))
def _get_table_def(metadata, table_name: str, cols_with_types: Mapping[str, str], pks: List[str]):
columns = [Column(col_name, col_type, primary_key=col_name in pks)
for col_name, col_type in cols_with_types.items()]
return Table(table_name, metadata, *columns)
def import_list(repo: Dolt,
table_name: str,
data: List[Mapping[str, Any]],
primary_keys: List[str] = None,
import_mode: str = None,
batch_size: int = DEFAULT_BATCH_SIZE):
"""
This provides a write interface for writing row major Python data structures to Dolt. The data parameter should be a
list of dicts, where each dict represents a row. Each dict must have the same columns, and:
- values that match the type of the table in the schema of the table being written to
- values of the same type that can be coalesced to a Python type by the (very limited) type inference logic
for generating a schema from a data structure.
Let's proceed with the example of creating a simple table and showing how to write some data structures:
CREATE TABLE players (id INT, name VARCHAR(16), PRIMARY KEY (id))
Now write in update mode:
>>> list_of_dicts = [{'id': 1, 'name': 'Roger'}, {'id': 2, 'name': 'Rafael'}]
>>> import_list(repo, 'players', list_of_dicts, import_mode='update')
Alternatively we can let the Python code infer a schema
>>> import_list(repo, 'players', list_of_dicts, ['id'], import_mode='create')
Note some restrictions (which we should loosen in a future release):
- all dicts must have the same set of columns, and they must be a strict subset of the table's columns
- when inferring a schema the type inference is very limited, and all values that correspond to a given key
must be of the same type
- when inferring a schema we cannot have a column of null values since no schema can be inferred
This function requires the Dolt SQL server to be running on the host and port provided, defaulting to
127.0.0.1:3306.
:param repo:
:param table_name:
:param data:
:param primary_keys:
:param import_mode:
:param batch_size:
:return:
"""
assert data, 'Cannot provide empty dict'
reformatted = {}
cols = set(data[0].keys())
logger.info('Reshaping data into columns')
for row in data:
assert set(row.keys()) == cols, 'Two rows with different keys found'
for col_name, value in row.items():
if col_name in reformatted:
reformatted[col_name].append(value)
else:
reformatted[col_name] = [value]
import_dict(repo, table_name, reformatted, primary_keys, import_mode, batch_size)
def coerce_dates(data: Iterable[dict]) -> List[dict]:
"""
:param data:
:return:
"""
data_copy = []
for row in data:
row_copy = {}
for col, val in row.items():
if type(val) == date:
row_copy[col] = datetime.combine(val, time())
else:
row_copy[col] = val
data_copy.append(row_copy)
return data_copy | StarcoderdataPython |
5092299 | <gh_stars>1-10
"""
Interface for using a web3 provider
"""
from typing import Optional
from src.utils import helpers
from src.utils.config import shared_config
from src.utils.multi_provider import MultiProvider
from web3 import HTTPProvider, Web3
web3: Optional[Web3] = None
def get_web3():
# pylint: disable=W0603
global web3
if not web3:
web3endpoint = helpers.get_web3_endpoint(shared_config)
web3 = Web3(HTTPProvider(web3endpoint))
return web3
return web3
eth_web3: Optional[Web3] = None
def get_eth_web3():
# pylint: disable=W0603
global eth_web3
if not eth_web3:
eth_web3 = Web3(MultiProvider(shared_config["web3"]["eth_provider_url"]))
return eth_web3
return eth_web3
| StarcoderdataPython |
8157644 | <filename>mmdet/datasets/icdar/tools/__init__.py
from .script import icdar_eval_tool | StarcoderdataPython |
3409141 | <gh_stars>0
import re
from basebot import BaseBot
HQ_expression = re.compile(r"HQ:(\d+)Id:(\d+)")
UNIT_expression = re.compile(r"U:(\d+)Id:(\d+)")
BLOCK_expression = re.compile(r"B")
class InvalidActionException(Exception):
pass
class PointInMap(object):
def __init__(self, coord_x, coord_y):
self.x = coord_x
self.y = coord_y
def __add__(self, other):
return PointInMap(coord_x=self.x + other.x, coord_y=self.y + other.y)
def as_tuple(self):
return (self.x, self.y)
class PlayerUnit(PointInMap):
def __init__(self, unit_id, coord_x, coord_y):
self.unit_id = unit_id
super(PlayerUnit, self).__init__(coord_x=coord_x, coord_y=coord_y)
class Tile(PointInMap):
def __init__(self, player_id, content, coord_x, coord_y):
super(Tile, self).__init__(coord_x, coord_y)
self.units = []
self.enemies_count = 0
self.enemy_hq = False
self.own_hq = False
self.reachable = False
self._parse_tile_string(player_id, content)
def _parse_tile_string(self, player_id, content_str):
for p_id, _ in HQ_expression.findall(content_str):
self.own_hq = p_id == player_id
self.enemy_hq = p_id != player_id
for p_id, unit_id in UNIT_expression.findall(content_str):
if p_id == player_id:
self.units.append(PlayerUnit(unit_id=unit_id, coord_x=self.x, coord_y=self.y))
else:
self.enemies_count += 1
self.reachable = BLOCK_expression.match(content_str) is None
class Map(dict):
pass
class GameBot(BaseBot):
NW = PointInMap(-1, -1)
N = PointInMap(-1, 0)
NE = PointInMap(-1, 1)
E = PointInMap(0, 1)
SE = PointInMap(1, 1)
S = PointInMap(1, 0)
SW = PointInMap(1, -1)
W = PointInMap(0, -1)
DIRECTIONS = [NW, N, NE, W, SE, S, SW, W]
def parse(self, feedback):
""":feedback: <dict> that has
{
'payer_num': <player_id>,
'map': <arena_map> [
[<tile_str>, .... ],
],
}
"""
game_map = Map()
player_id = str(feedback['player_num'])
for y, row in enumerate(feedback['map']):
for x, tile_str in enumerate(row):
game_map[x, y] = Tile(
player_id=player_id,
content=tile_str,
coord_x=x,
coord_y=y
)
self.game_map = game_map
return player_id, game_map
def on_turn(self, feedback):
self.actions = []
player_id, game_map = self.parse(feedback)
self.play(player_id, game_map)
return {'ACTIONS': self.actions}
def attack(self, tile, direction):
target_point = (tile + direction).as_tuple()
target_tile = self.game_map.get(target_point)
self.validate_target(tile + direction)
if not target_tile.enemies_count:
raise InvalidActionException("Target tile is empty")
self.actions.append({
'action_type': 'ATTACK',
'from': tile.as_tuple(),
'to': target_point,
})
def validate_target(self, target_point):
"""Validates that a tile is inside the map and reachable"""
coordinates = target_point.as_tuple()
if coordinates not in self.game_map:
raise InvalidActionException("Out of map")
if not self.game_map[coordinates].reachable:
raise InvalidActionException("Unreacheable")
def move(self, unit, direction):
target_point = (unit + direction)
self.validate_target(target_point)
if self.game_map[target_point.as_tuple()].enemies_count:
raise InvalidActionException("Target not empty")
self.actions.append({
'action_type': 'MOVE',
'unit_id': unit.unit_id,
'direction': direction.as_tuple(),
})
| StarcoderdataPython |
11371882 | <reponame>uktrade/auditinater
import requests
def strip_bearer(token):
"""Strip the bearer text from the token if present
:returns: The token string without the bearer keyword.
E.g.::
'bearer 1234' -> '1234'
'1234' -> '1234'
"""
# remove trailing spaces
_token = token.strip()
# split on gap between bearer and token (if any)
parts = token.split(' ')
# if the gap has more than on space strip all spaces
parts = [part for part in parts if part]
# return everything at the end of the list and make it into a string.
_token = "".join(parts[-1:])
return _token
def client(api_token):
"""Return a request session read for action.
:param api_token: The token string.
This is set as the AUTHORIZATION header. The api_token can contain or not
the bearer identifier at the begining. This won't cause problems.
:returns: A requests.session instance.
"""
token = strip_bearer(api_token)
request = requests.session()
request.headers.update({
'AUTHORIZATION': f'bearer {token}',
'ACCEPT': 'application/json'
})
return request
| StarcoderdataPython |
6622879 | <reponame>jkunimune15/kodi-analysis
import matplotlib.pyplot as plt
import numpy as np
import perlin
r = np.sqrt(np.random.random(1000000))
t = 2*np.pi*np.random.random(1000000)
x, y = r*np.cos(t), r*np.sin(t)
dx, dy = np.zeros(x.shape), np.zeros(y.shape)
for n in range(0, 3):
dx += perlin.perlin(x, y, 2**(-n), 0.1*2**(-2*n))
dy += perlin.perlin(x, y, 2**(-n), 0.1*2**(-2*n))
plt.hist2d(x + dx, y + dy, bins=72, range=[[-1.1, 1.1], [-1.1, 1.1]])
plt.axis('square')
plt.show()
| StarcoderdataPython |
3569076 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 20 2016
@author: <EMAIL>
Poles and zeros were calculated in Maxima from circuit component values which
are listed in:
https://www.itu.int/dms_pubrec/itu-r/rec/bs/R-REC-BS.468-4-198607-I!!PDF-E.pdf
http://www.beis.de/Elektronik/AudioMeasure/WeightingFilters.html#CCIR
https://en.wikipedia.org/wiki/ITU-R_468_noise_weighting
"""
from numpy import pi
from scipy.signal import zpk2tf, zpk2sos, freqs, sosfilt
from waveform_analysis.weighting_filters._filter_design import _zpkbilinear
__all__ = ['ITU_R_468_weighting_analog', 'ITU_R_468_weighting',
'ITU_R_468_weight']
def ITU_R_468_weighting_analog():
"""
Return ITU-R 468 analog weighting filter zeros, poles, and gain
"""
z = [0]
p = [-25903.70104781628,
+36379.90893732929j-23615.53521363528,
-36379.90893732929j-23615.53521363528,
+62460.15645250649j-18743.74669072136,
-62460.15645250649j-18743.74669072136,
-62675.1700584679]
# Normalize to +12.2 dB at 6.3 kHz, numerically
# TODO: Derive exact value with sympy
b, a = zpk2tf(z, p, 1)
w, h = freqs(b, a, 2*pi*6300)
k = 10**(+12.2/20) / abs(h[0])
return z, p, k
def ITU_R_468_weighting(fs, output='ba'):
"""
Return ITU-R 468 digital weighting filter transfer function
Parameters
----------
fs : float
Sampling frequency
Examples
--------
>>> from scipy.signal import freqz
>>> import matplotlib.pyplot as plt
>>> fs = 200000
>>> b, a = ITU_R_468_weighting(fs)
>>> f = np.logspace(np.log10(10), np.log10(fs/2), 1000)
>>> w = 2*pi * f / fs
>>> w, h = freqz(b, a, w)
>>> plt.semilogx(w*fs/(2*pi), 20*np.log10(abs(h)))
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
>>> plt.axis([10, 100e3, -50, 20])
"""
z, p, k = ITU_R_468_weighting_analog()
# Use the bilinear transformation to get the digital filter.
zz, pz, kz = _zpkbilinear(z, p, k, fs)
if output == 'zpk':
return zz, pz, kz
elif output in {'ba', 'tf'}:
return zpk2tf(zz, pz, kz)
elif output == 'sos':
return zpk2sos(zz, pz, kz)
else:
raise ValueError("'%s' is not a valid output form." % output)
def ITU_R_468_weight(signal, fs):
"""
Return the given signal after passing through an 468-weighting filter
signal : array_like
Input signal
fs : float
Sampling frequency
"""
sos = ITU_R_468_weighting(fs, output='sos')
return sosfilt(sos, signal)
if __name__ == '__main__':
import pytest
pytest.main(['../tests/test_ITU_R_468_weighting.py', "--capture=sys"])
| StarcoderdataPython |
5123748 | import sys
import os
import errno
import time
import json
import glob
import argparse
from base64 import b64decode
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
def __init__(self, args):
self.traefik_version = args.traefikVersion
self.challenge = args.challenge
def on_created(self, event):
self.handle_event(event)
def on_modified(self, event):
self.handle_event(event)
def handle_event(self, event):
# Check if it's a JSON file
if not event.is_directory and event.src_path.endswith('.json'):
print('Certificate storage changed (' + os.path.basename(event.src_path) + ')')
self.handle_file(event.src_path)
def handle_file(self, file):
try:
self.extract_certs(file)
except Exception as error:
print('Error while handling file ' + file + ': ' + repr(error))
def extract_certs(self, file):
# Read JSON file
data = json.loads(open(file).read())
# Determine challenge
if self.traefik_version == 2:
if self.challenge:
challengeData = data[self.challenge]
elif len(list(data.keys())) == 1:
self.challenge = list(data.keys())[0]
print('Using challenge: ' + self.challenge)
challengeData = data[self.challenge]
else:
print('Available challenges: ' + (', '.join([str(x) for x in list(data.keys())])))
raise ValueError('Multiple challenges found, please choose one with --challenge option')
else:
challengeData = data
# Determine ACME version
try:
acme_version = 2 if 'acme-v02' in challengeData['Account']['Registration']['uri'] else 1
except TypeError:
if 'DomainsCertificate' in challengeData:
acme_version = 1
else:
acme_version = 2
# Find certificates
if acme_version == 1:
certs = challengeData['DomainsCertificate']['Certs']
elif acme_version == 2:
certs = challengeData['Certificates']
print('Certificate storage contains ' + str(len(certs)) + ' certificates')
# Loop over all certificates
for c in certs:
if acme_version == 1:
name = c['Certificate']['Domain']
privatekey = c['Certificate']['PrivateKey']
fullchain = c['Certificate']['Certificate']
sans = c['Domains']['SANs']
elif acme_version == 2 and self.traefik_version == 1:
name = c['Domain']['Main']
privatekey = c['Key']
fullchain = c['Certificate']
sans = c['Domain']['SANs']
elif acme_version == 2 and self.traefik_version == 2:
name = c['domain']['main']
privatekey = c['key']
fullchain = c['certificate']
sans = c['domain'].get('sans')
# Decode private key, certificate and chain
privatekey = b64decode(privatekey).decode('utf-8')
fullchain = b64decode(fullchain).decode('utf-8')
start = fullchain.find('-----BEGIN CERTIFICATE-----', 1)
cert = fullchain[0:start]
chain = fullchain[start:]
# Create domain directory if it doesn't exist
directory = 'certs/' + name + '/'
try:
os.makedirs(directory)
except OSError as error:
if error.errno != errno.EEXIST:
raise
# Write private key, certificate and chain to file
with open(directory + 'privkey.pem', 'w') as f:
f.write(privatekey)
with open(directory + 'cert.pem', 'w') as f:
f.write(cert)
with open(directory + 'chain.pem', 'w') as f:
f.write(chain)
with open(directory + 'fullchain.pem', 'w') as f:
f.write(fullchain)
# Write private key, certificate and chain to flat files
directory = 'certs_flat/'
with open(directory + name + '.key', 'w') as f:
f.write(privatekey)
with open(directory + name + '.crt', 'w') as f:
f.write(fullchain)
with open(directory + name + '.chain.pem', 'w') as f:
f.write(chain)
if sans:
for name in sans:
with open(directory + name + '.key', 'w') as f:
f.write(privatekey)
with open(directory + name + '.crt', 'w') as f:
f.write(fullchain)
with open(directory + name + '.chain.pem', 'w') as f:
f.write(chain)
print('Extracted certificate for: ' + name + (', ' + ', '.join(sans) if sans else ''))
if __name__ == "__main__":
# Determine args
parser = argparse.ArgumentParser(description='Traefik certificate extractor')
parser.add_argument('path', nargs='?', default='./data', help='Path to traefik acme file')
parser.add_argument('-tv', '--traefikVersion', type=int, choices=[1, 2], default=1, help='Traefik version')
parser.add_argument('-c', '--challenge', help='Traefik challenge to use (only for traefik v2)')
args = parser.parse_args()
print('Path: ' + args.path)
print('Traefik version: ' + str(args.traefikVersion))
if args.traefikVersion >= 2 and args.challenge:
print('Traefik challenge: ' + args.challenge)
# Create output directories if it doesn't exist
try:
os.makedirs('certs')
except OSError as error:
if error.errno != errno.EEXIST:
raise
try:
os.makedirs('certs_flat')
except OSError as error:
if error.errno != errno.EEXIST:
raise
# Create event handler and observer
event_handler = Handler(args)
observer = Observer()
# Extract certificates from current file(s) before watching
files = glob.glob(os.path.join(args.path, '*.json'))
for file in files:
print('Certificate storage found (' + os.path.basename(file) + ')')
event_handler.handle_file(file)
# Register the directory to watch
observer.schedule(event_handler, args.path)
# Main loop to watch the directory
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| StarcoderdataPython |
129801 | from logging import getLogger
from typing import Tuple, Union
import torch
from torch import nn
from torch.nn.utils import spectral_norm
from ..modules import init_xavier_uniform
from ..modules.lightweight import SimpleDecoderBlock
logger = getLogger(__name__)
logger.debug('スクリプトを読み込みました。')
class SimpleDecoder(nn.Module):
def __init__(self, in_channels: int, nc: int):
super().__init__()
self.decode = nn.Sequential(
# 1 -> 2
SimpleDecoderBlock(in_channels, 64),
# 2 -> 4
SimpleDecoderBlock(64, 32),
# 4 -> 8
SimpleDecoderBlock(32, 16),
# 8 -> 8
spectral_norm(
nn.Conv2d(16, nc, 1, stride=1, padding=0, bias=False)),
nn.Sigmoid())
def forward(self, x):
return self.decode(x)
class DBlock(nn.Module):
def __init__(
self, in_channels: int, out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
num_classes: int = 0, prob_dropout: float = 0.3
):
super().__init__()
self.main = nn.Sequential(
spectral_norm(
nn.Conv2d(
in_channels, out_channels,
kernel_size, stride, padding, bias=True)),
nn.LeakyReLU(0.2),
)
self.main.apply(init_xavier_uniform) # 重みの初期化
def forward(self, inputs):
return self.main(inputs)
class Discriminator(nn.Module):
def __init__(
self, nc: int,
num_classes: int = 0
):
super().__init__()
logger.debug('Discriminatorのインスタンスを作成します。')
self.blocks = nn.Sequential(
# 32 -> 16
DBlock(nc, 32, 3, stride=2, padding=1),
# 16 -> 8
DBlock(32, 64, 3, stride=2, padding=1),
# 8 -> 4
DBlock(64, 128, 3, stride=2, padding=1),
# 4 -> 1
DBlock(128, 256, 4, stride=1, padding=0),
)
# ブロックの出力から8×8画像再構築
self.recons = SimpleDecoder(256, nc)
self.real_fake = nn.Linear(256, 1)
if num_classes > 0:
self.sn_embedding = spectral_norm(
nn.Embedding(num_classes, 256))
self.sn_embedding.apply(init_xavier_uniform)
else:
self.sn_embedding = None
def forward(
self, x, classes=None,
detach: bool = False, reconstruct: bool = False,
):
if detach:
x = x.detach()
x = self.blocks(x)
# 再構築
recons = self.recons(x) if reconstruct else None
h = x.view(-1, x.size(1))
real_fake = self.real_fake(h)
# cGANs with Projection Discriminator
if classes is not None:
real_fake += torch.sum(
h * self.sn_embedding(classes),
1, keepdim=True)
return real_fake, recons
| StarcoderdataPython |
4889617 | <filename>data/pakistan-data/crawler.py
from selenium import webdriver
from datetime import datetime, timedelta
import requests
import time
opts = webdriver.FirefoxOptions()
opts.add_argument("--headless")
driver = webdriver.Remote(command_executor='http://localhost:4444/wd/hub',
desired_capabilities=opts.to_capabilities())
#url = 'https://covid.gov.pk/stats/pakistan'
#html_txt = requests.get(url=url).text
#data_url = html_txt.split('datastudio.google.com')[1]
#data_url = data_url.split('"')[0]
#data_url = 'https://datastudio.google.com' + data_url
data_url = 'https://datastudio.google.com/embed/reporting/1PLVi5amcc_R5Gh928gTE8-8r8-fLXJQF/page/R24IB'
file_name = 'data/pakistan-data/pakistan.csv'
f = open(file_name, 'r+')
csv_content = f.read()
try:
driver.get(data_url)
time.sleep(20)
tables = driver.find_elements_by_css_selector('lego-table.table')
for table in tables:
table_text = table.text.strip().replace(',', '')
if 'GMT' in table_text:
# update time
date = table_text.split('\n')[1]
date = date.split('-')[0].strip()
date = datetime.strptime(date, '%d %b %Y')
date -= timedelta(days=1)
date = date.strftime('%Y-%m-%d')
elif table_text.startswith('AJK'):
if (date in csv_content):
print('Pakistan data on ' + date + ' already exist!')
else:
data = table_text.split('\n')
csv_lines = []
for i in range(0, len(data), 5):
region = data[i]
counts = [data[idx] for idx in [i + 1, i + 3, i + 4]]
csv_lines.append(date + ',' + region + ',' +
','.join(counts))
f.write('\n')
f.write('\n'.join(csv_lines))
except Exception as e:
print('Error ocurred when scraping Pakistan data!')
print(e)
driver.quit()
f.close() | StarcoderdataPython |
6496477 |
from threading import Timer
def delayed(seconds):
def decorator(f):
def wrapper(*args, **kargs):
t = Timer(seconds, f, args, kargs)
t.start()
return wrapper
return decorator
@delayed(3)
def timer_print():
print "timer_print"
for i in range(3):
timer_print() | StarcoderdataPython |
4889236 | <filename>demo/demo.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import multiprocessing as mp
import numpy as np
import os
import torch
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.logger import setup_logger
from pytorch3d.io import save_obj
from pytorch3d.structures import Meshes
# required so that .register() calls are executed in module scope
import meshrcnn.data # noqa
import meshrcnn.modeling # noqa
import meshrcnn.utils # noqa
from meshrcnn.config import get_meshrcnn_cfg_defaults
from meshrcnn.evaluation import transform_meshes_to_camera_coord_system
import cv2
logger = logging.getLogger("demo")
class VisualizationDemo(object):
def __init__(self, cfg, vis_highest_scoring=True, output_dir="./vis"):
"""
Args:
cfg (CfgNode):
vis_highest_scoring (bool): If set to True visualizes only
the highest scoring prediction
"""
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device("cpu")
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir
def run_on_image(self, image, focal_length=10.0):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
focal_length (float): the focal_length of the image
Returns:
predictions (dict): the output of the model.
"""
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
# camera matrix
imsize = [image.shape[0], image.shape[1]]
# focal <- focal * image_width / 32
focal_length = image.shape[1] / 32 * focal_length
K = [focal_length, image.shape[1] / 2, image.shape[0] / 2]
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(
verts=[mesh[0] for mesh in instances.pred_meshes],
faces=[mesh[1] for mesh in instances.pred_meshes],
)
pred_dz = instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1])
tc = pred_dz.abs().max() + 1.0
zranges = torch.stack(
[
torch.stack(
[
tc - tc * pred_dz[i] / 2.0 / focal_length,
tc + tc * pred_dz[i] / 2.0 / focal_length,
]
)
for i in range(len(meshes))
],
dim=0,
)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(
meshes, boxes.tensor, zranges, Ks, imsize
)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(
det_id,
image,
boxes.tensor[det_id],
labels[det_id],
scores[det_id],
masks[det_id],
meshes[det_id],
)
return predictions
def visualize_prediction(
self, det_id, image, box, label, score, mask, mesh, alpha=0.6, dpi=200
):
mask_color = np.array(self.colors[label], dtype=np.float32)
cat_name = self.cat_names[label]
thickness = max([int(np.ceil(0.001 * image.shape[0])), 1])
box_color = (0, 255, 0) # '#00ff00', green
text_color = (218, 227, 218) # gray
composite = image.copy().astype(np.float32)
# overlay mask
idx = mask.nonzero()
composite[idx[:, 0], idx[:, 1], :] *= 1.0 - alpha
composite[idx[:, 0], idx[:, 1], :] += alpha * mask_color
# overlay box
(x0, y0, x1, y1) = (int(x + 0.5) for x in box)
composite = cv2.rectangle(
composite, (x0, y0), (x1, y1), color=box_color, thickness=thickness
)
composite = composite.astype(np.uint8)
# overlay text
font_scale = 0.001 * image.shape[0]
font_thickness = thickness
font = cv2.FONT_HERSHEY_TRIPLEX
text = "%s %.3f" % (cat_name, score)
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, font_thickness)
# Place text background.
if x0 + text_w > composite.shape[1]:
x0 = composite.shape[1] - text_w
if y0 - int(1.2 * text_h) < 0:
y0 = int(1.2 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(composite, back_topleft, back_bottomright, box_color, -1)
# Show text
text_bottomleft = x0, y0 - int(0.2 * text_h)
cv2.putText(
composite,
text,
text_bottomleft,
font,
font_scale,
text_color,
thickness=font_thickness,
lineType=cv2.LINE_AA,
)
save_file = os.path.join(self.output_dir, "%d_mask_%s_%.3f.png" % (det_id, cat_name, score))
cv2.imwrite(save_file, composite[:, :, ::-1])
save_file = os.path.join(self.output_dir, "%d_mesh_%s_%.3f.obj" % (det_id, cat_name, score))
verts, faces = mesh.get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
def setup_cfg(args):
cfg = get_cfg()
get_meshrcnn_cfg_defaults(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/pix3d/meshrcnn_R50_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input image")
parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger(name="demo")
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
im_name = args.input.split("/")[-1].split(".")[0]
demo = VisualizationDemo(
cfg, vis_highest_scoring=args.onlyhighest, output_dir=os.path.join(args.output, im_name)
)
# use PIL, to be consistent with evaluation
img = read_image(args.input, format="BGR")
predictions = demo.run_on_image(img, focal_length=args.focal_length)
logger.info("Predictions saved in %s" % (os.path.join(args.output, im_name)))
| StarcoderdataPython |
6493679 | import discord
from discord.ext import commands
import datetime
import traceback
import time
class CommandLogging(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.info_channel_id = 527373033568993282
self.bot.mod_commands = []
async def on_command(self, ctx):
ctx.start_time = time.perf_counter()
async def on_command_completion(self, ctx):
if ctx not in self.bot.mod_commands:
return # if its not a command decorated with @checks.mod_commands()
if ctx.command.name == 'help':
return # for some reason help command gets past too so lets deal with that now
time_now = time.perf_counter()
time_taken = round(time_now - ctx.start_time, 4)
e = discord.Embed(colour=discord.Colour.green())
e.set_author(name=f'{ctx.author.name}#{ctx.author.discriminator}',
icon_url=ctx.author.avatar_url)
e.description = ':white_check_mark: Command Completed'
e.add_field(name='Command:',
value=ctx.command.qualified_name)
e.add_field(name='Location:',
value=f'Channel: #{ctx.channel.name} ({ctx.channel.id})')
e.add_field(name='Time taken:',
value=f'**{time_taken}** seconds')
e.timestamp = datetime.datetime.utcnow()
await self.bot.get_channel(self.bot.info_channel_id).send(embed=e)
async def on_command_error(self, ctx, error):
# we dont want logs for this stuff which isnt our problem
ignored = (commands.NoPrivateMessage, commands.DisabledCommand, commands.CheckFailure,
commands.CommandNotFound, commands.UserInputError, discord.Forbidden)
error = getattr(error, 'original', error)
if isinstance(error, ignored):
return
time_now = time.perf_counter()
time_taken = round(time_now - ctx.start_time, 4)
e = discord.Embed(colour=discord.Colour.red())
e.set_author(name=f'{ctx.author.name}#{ctx.author.discriminator}',
icon_url=ctx.author.avatar_url)
e.title = ':x: Command Error'
exc = ''.join(traceback.format_exception(type(error), error, error.__traceback__, chain=False))
e.description = f'```py\n{exc}\n```' # format legible traceback
e.add_field(name='Command:',
value=ctx.command.qualified_name)
e.add_field(name='Location:',
value=f'Channel: #{ctx.channel.name} ({ctx.channel.id})')
e.add_field(name='Time taken:',
value=f'**{time_taken}** seconds')
e.timestamp = datetime.datetime.utcnow()
e.set_footer(text="Please ping maths if the solution is not obvious or you don't understand")
# await self.bot.get_channel(self.bot.info_channel_id).send(embed=e)
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(CommandLogging(bot))
| StarcoderdataPython |
11209496 | """
Trace object space traces operations and bytecode execution
in frames.
"""
from pypy.tool import pydis
from pypy.rlib.rarithmetic import intmask
# __________________________________________________________________________
#
# Tracing Events
# __________________________________________________________________________
#
class ExecBytecode(object):
""" bytecode trace. """
def __init__(self, frame):
self.frame = frame
self.code = frame.pycode
self.index = intmask(frame.last_instr)
class EnterFrame(object):
def __init__(self, frame):
self.frame = frame
class LeaveFrame(object):
def __init__(self, frame):
self.frame = frame
class CallInfo(object):
""" encapsulates a function call with its arguments. """
def __init__(self, name, func, args, kwargs):
self.name = name
self.func = func
self.args = args
self.kwargs = kwargs
class CallBegin(object):
def __init__(self, callinfo):
self.callinfo = callinfo
class CallFinished(object):
def __init__(self, callinfo, res):
self.callinfo = callinfo
self.res = res
class CallException(object):
def __init__(self, callinfo, e):
self.callinfo = callinfo
self.ex = e
class TraceResult(object):
""" This is the state of tracing-in-progress. """
def __init__(self, tracespace, **printer_options):
self.events = []
self.reentrant = True
self.tracespace = tracespace
result_printer_clz = printer_options["result_printer_clz"]
self.printer = result_printer_clz(**printer_options)
self._cache = {}
def append(self, event):
if self.reentrant:
self.reentrant = False
self.events.append(event)
self.printer.print_event(self.tracespace, self, event)
self.reentrant = True
def getbytecodes(self):
for event in self.events:
if isinstance(event, ExecBytecode):
disres = self.getdisresult(event.frame)
yield disres.getbytecode(event.index)
def getoperations(self):
for event in self.events:
if isinstance(event, (CallBegin, CallFinished, CallException)):
yield event
def getevents(self):
for event in self.events:
yield event
def getdisresult(self, frame):
""" return (possibly cached) pydis result for the given frame. """
try:
return self._cache[id(frame.pycode)]
except KeyError:
res = self._cache[id(frame.pycode)] = pydis.pydis(frame.pycode)
assert res is not None
return res
# __________________________________________________________________________
#
# Tracer Proxy objects
# __________________________________________________________________________
#
class ExecutionContextTracer(object):
def __init__(self, result, ec):
self.ec = ec
self.result = result
def __getattr__(self, name):
""" generically pass through everything else ... """
return getattr(self.ec, name)
def enter(self, frame):
""" called just before (continuing to) evaluating a frame. """
self.result.append(EnterFrame(frame))
self.ec.enter(frame)
def leave(self, frame):
""" called just after evaluating of a frame is suspended/finished. """
self.result.append(LeaveFrame(frame))
self.ec.leave(frame)
def bytecode_trace(self, frame):
""" called just before execution of a bytecode. """
self.result.append(ExecBytecode(frame))
self.ec.bytecode_trace(frame)
class CallableTracer(object):
def __init__(self, result, name, func):
self.result = result
self.name = name
self.func = func
def __call__(self, *args, **kwargs):
callinfo = CallInfo(self.name, self.func, args, kwargs)
self.result.append(CallBegin(callinfo))
try:
res = self.func(*args, **kwargs)
except Exception, e:
self.result.append(CallException(callinfo, e))
raise
else:
self.result.append(CallFinished(callinfo, res))
return res
def __getattr__(self, name):
""" generically pass through everything we don't intercept. """
return getattr(self.func, name)
def __str__(self):
return "%s - CallableTracer(%s)" % (self.name, self.func)
__repr__ = __str__
# __________________________________________________________________________
#
# Tracer factory
# __________________________________________________________________________
#
def create_trace_space(space):
""" Will turn the supplied into a traceable space by extending its class."""
# Don't trace an already traceable space
if hasattr(space, "__pypytrace__"):
return space
class Trace(space.__class__):
def __getattribute__(self, name):
obj = super(Trace, self).__getattribute__(name)
if name in ["_result", "_in_cache", "_ect_cache",
"_tracing", "_config_options"]:
return obj
if not self._tracing or self._in_cache:
return obj
if name in self._config_options["operations"]:
assert callable(obj)
obj = CallableTracer(self._result, name, obj)
return obj
def __pypytrace__(self):
pass
def enter_cache_building_mode(self):
self._in_cache += 1
def leave_cache_building_mode(self, val):
self._in_cache -= 1
def settrace(self):
self._result = TraceResult(self, **self._config_options)
self._ect_cache = {}
self._tracing = True
def unsettrace(self):
self._tracing = False
def getresult(self):
return self._result
def getexecutioncontext(self):
ec = super(Trace, self).getexecutioncontext()
if not self._in_cache:
try:
ect = self._ect_cache[ec]
except KeyError:
assert not isinstance(ec, ExecutionContextTracer)
ect = ExecutionContextTracer(self._result, ec)
self._ect_cache[ec] = ect
return ect
return ec
# XXX Rename
def reset_trace(self):
""" Returns the class to its original form. """
space.__class__ = space.__oldclass__
del space.__oldclass__
for k in ["_result", "_in_cache", "_ect_cache",
"_config_options", "_operations"]:
if hasattr(self, k):
delattr(self, k)
trace_clz = type("Trace%s" % repr(space), (Trace,), {})
space.__oldclass__, space.__class__ = space.__class__, trace_clz
# Do config
from pypy.tool.traceconfig import config
space._tracing = False
space._result = None
space._ect_cache = {}
space._in_cache = 0
space._config_options = config
space.settrace()
return space
# ______________________________________________________________________
# End of trace.py
| StarcoderdataPython |
273067 | import configargparse
def config_parser():
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str,
help='experiment name')
parser.add_argument("--basedir", type=str, default='./logs/',
help='where to store ckpts and logs')
parser.add_argument("--datadir", type=str, default='./data/llff/fern',
help='input data directory')
parser.add_argument("--scene", type=str, default='1',
help="the scene id of blendermvs dataset")
# sampling options
parser.add_argument("--N_rand", type=int, default=1024,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--N_views", type=int, default=1,
help='number of view used in PerViewRaySampler')
parser.add_argument("--no_batching", action='store_true',
help='only take random rays from 1 image at a time')
parser.add_argument("--full_rays", action='store_true', help='used for PerViewRaySampler')
parser.add_argument("--precrop_iters", type=int, default=0,
help='number of steps to train on central crops')
parser.add_argument("--precrop_frac", type=float,
default=.5, help='fraction of img taken for central crops')
# point sampling options
parser.add_argument("--N_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_samples_eval", type=int, default=128)
parser.add_argument("--N_samples_extra", type=int, default=32)
parser.add_argument("--near", type=float, default=0.0)
parser.add_argument("--eps", type=float, default=0.1)
parser.add_argument("--beta_iters", type=int, default=10)
parser.add_argument("--max_total_iters", type=int, default=5)
parser.add_argument("--N_samples_inverse_sphere", type=int, default=32)
parser.add_argument("--add_tiny", type=float, default=1e-6)
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
# training options
parser.add_argument("--N_iters", type=int, default=200000,
help='the number of training iterations')
parser.add_argument("--lrate", type=float, default=5e-4,
help='learning rate')
parser.add_argument("--lrate_decay", type=int, default=250,
help='exponential learning rate decay (in 1000 steps)')
parser.add_argument("--chunk", type=int, default=1024*8,
help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--ft_path", type=str, default=None,
help='specific weights file to reload for network')
# model options
parser.add_argument("--feature_vector_size", type=int, default=256)
parser.add_argument("--scene_bounding_sphere", type=float, default=3.0)
## sdf net
parser.add_argument("--D_sdf", type=int, default=8,
help='layers in network')
parser.add_argument("--W_sdf", type=int, default=256,
help='channels per layer')
parser.add_argument("--multires", type=int, default=6,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--sdf_geo_init", action='store_true')
parser.add_argument("--sdf_weight_norm", action='store_true')
parser.add_argument("--sdf_skip_in", nargs='+', type=int, default=[4])
parser.add_argument("--sdf_bias", type=float, default=0.6)
## rgb net
parser.add_argument("--D_rgb", type=int, default=4)
parser.add_argument("--W_rgb", type=int, default=256)
parser.add_argument("--rgb_mode", type=str, choices=['idr', 'nerf'], default='idr')
parser.add_argument("--rgb_weight_norm", action='store_true')
parser.add_argument("--multires_views", type=int, default=4,
help='log2 of max freq for positional encoding (2D direction)')
## density
parser.add_argument("--beta", type=float, default=0.1)
parser.add_argument("--beta_min", type=float, default=0.0001)
## bg net
parser.add_argument("--inverse_sphere_bg", action='store_true')
parser.add_argument("--bg_feature_vector_size", type=int, default=256)
parser.add_argument("--multires_bg", type=int, default=10,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--D_bg_sdf", type=int, default=8,
help='layers in network')
parser.add_argument("--W_bg_sdf", type=int, default=256,
help='channels per layer')
parser.add_argument("--bg_sdf_skip_in", nargs='+', type=int, default=[4])
## Not used.
# parser.add_argument("--bg_sdf_geo_init", action='store_true')
# parser.add_argument("--bg_sdf_weight_norm", action='store_true')
# parser.add_argument("--bg_sdf_bias", type=float, default=0.0)
parser.add_argument("--D_bg_rgb", type=int, default=1)
parser.add_argument("--W_bg_rgb", type=int, default=128)
parser.add_argument("--bg_rgb_mode", type=str, choices=['idr', 'nerf'], default='nerf')
# parser.add_argument("--bg_rgb_weight_norm", action='store_true')
# loss options
parser.add_argument("--with_eikonal_samples", action='store_true')
parser.add_argument("--eikonal_weight", type=float, default=0.1)
# rendering options
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
# distributed options
parser.add_argument("--local_rank", type=int, default=-1,
help='node rank for distributed training')
# dataset options
parser.add_argument("--dataset_type", type=str, default='llff',
help='options: llff / blender / deepvoxels')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
## blender flags
parser.add_argument("--white_bkgd", action='store_true',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
## llff flags
parser.add_argument("--factor", type=int, default=8,
help='downsample factor for LLFF images')
parser.add_argument("--no_ndc", action='store_true',
help='do not use normalized device coordinates (set for non-forward facing scenes)')
parser.add_argument("--lindisp", action='store_true',
help='sampling linearly in disparity rather than depth')
parser.add_argument("--spherify", action='store_true',
help='set for spherical 360 scenes')
parser.add_argument("--llffhold", type=int, default=8,
help='will take every 1/N images as LLFF test set, paper uses 8')
# logging/saving options
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_img", type=int, default=1000,
help='frequency of tensorboard image logging')
parser.add_argument("--i_weights", type=int, default=10000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_testset", type=int, default=50000,
help='frequency of testset saving')
parser.add_argument("--i_video", type=int, default=50000,
help='frequency of render_poses video saving')
return parser
| StarcoderdataPython |
9721350 | import io
import time
import picamera
import asyncio
import file_naming_manager
import os
import time
from subprocess import call
import base64
#make sure the video_detections folder exists
class CameraHandler:
def __init__(self, parent):
self.parent = parent
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.setup_was_successful = True
self.naming_manager = file_naming_manager.NamingManager()
try:
self.camera = picamera.PiCamera(framerate = 30)
self.camera.vflip = True
self.camera.resolution = (1024, 768)
self.camera.start_preview()
time.sleep(2)
self.camera.stop_preview()
self.stream = io.BytesIO()
except:
self.setup_was_successful = False
def record_for_x_seconds(self,x):
self.naming_manager.remove_the_oldest()
camera = picamera.PiCamera()
camera.resolution = (640, 480)
camera.start_recording(self.naming_manager.new_file_name())
camera.wait_recording(x)
camera.stop_recording()
#convert video to mp4
def convert_video_to_mp4(self,file_name):
command = f"MP4Box -add {file_name} temp_file.mp4"
cd_command = f"cd {self.dir_path}/video_detections"
call([cd_command,command], shell=True)
def get_video_bytes_in_mp4_format(self,file_name):
self.convert_video_to_mp4(file_name)
time.sleep(1)
with open(f"{self.dir_path}/video_detections/temp_file.mp4", "rb") as File:
return File.read()
def base64_encoded_video(self,byte_data):
encoded = base64.b64encode(byte_data)
return encoded
def gather_all_detections(self):
file_names = os.listdir(self.dir_path + "/video_detections")
return file_names | StarcoderdataPython |
8187935 | """
<http://ircv3.net/specs/extensions/tls-3.1.html>
"""
from irctest import cases
from irctest.basecontrollers import NotImplementedByController
class StarttlsFailTestCase(cases.BaseServerTestCase):
@cases.SpecificationSelector.requiredBySpecification('IRCv3.1')
def testStarttlsRequestTlsFail(self):
"""<http://ircv3.net/specs/extensions/tls-3.1.html>
"""
self.addClient()
# TODO: check also without this
self.sendLine(1, 'CAP LS')
capabilities = self.getCapLs(1)
if 'tls' not in capabilities:
raise NotImplementedByController('starttls')
# TODO: check also without this
self.sendLine(1, 'CAP REQ :tls')
m = self.getRegistrationMessage(1)
# TODO: Remove this once the trailing space issue is fixed in Charybdis
# and Mammon:
#self.assertMessageEqual(m, command='CAP', params=['*', 'ACK', 'tls'],
# fail_msg='Did not ACK capability `tls`: {msg}')
self.sendLine(1, 'STARTTLS')
m = self.getRegistrationMessage(1)
self.assertMessageEqual(m, command='691',
fail_msg='Did not respond to STARTTLS with 691 whereas '
'SSL is not configured: {msg}.')
class StarttlsTestCase(cases.BaseServerTestCase):
ssl = True
def testStarttlsRequestTls(self):
"""<http://ircv3.net/specs/extensions/tls-3.1.html>
"""
self.addClient()
# TODO: check also without this
self.sendLine(1, 'CAP LS')
capabilities = self.getCapLs(1)
if 'tls' not in capabilities:
raise NotImplementedByController('starttls')
# TODO: check also without this
self.sendLine(1, 'CAP REQ :tls')
m = self.getRegistrationMessage(1)
# TODO: Remove this one the trailing space issue is fixed in Charybdis
# and Mammon:
#self.assertMessageEqual(m, command='CAP', params=['*', 'ACK', 'tls'],
# fail_msg='Did not ACK capability `tls`: {msg}')
self.sendLine(1, 'STARTTLS')
m = self.getRegistrationMessage(1)
self.assertMessageEqual(m, command='670',
fail_msg='Did not respond to STARTTLS with 670: {msg}.')
self.clients[1].starttls()
self.sendLine(1, 'USER f * * :foo')
self.sendLine(1, 'NICK foo')
self.sendLine(1, 'CAP END')
self.getMessages(1)
| StarcoderdataPython |
1993634 | # Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.14')
@rfm.simple_test
class GpuDirectAccCheck(rfm.RegressionTest):
def __init__(self):
self.descr = 'tests gpu-direct for Fortran OpenACC'
self.valid_systems = ['daint:gpu', 'dom:gpu', 'kesch:cn', 'tiger:gpu',
'arolla:cn', 'tsa:cn']
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-pgi']
if self.current_system.name in ['daint', 'dom', 'tiger']:
self.modules = ['craype-accel-nvidia60']
self.variables = {
'MPICH_RDMA_ENABLED_CUDA': '1',
}
if self.current_system.name in ['tiger']:
craypath = '%s:$PATH' % os.environ['CRAY_BINUTILS_BIN']
self.variables['PATH'] = craypath
self.num_tasks = 2
self.num_gpus_per_node = 1
self.num_tasks_per_node = 1
elif self.current_system.name == 'kesch':
self.exclusive_access = True
self.modules = ['cudatoolkit/8.0.61']
self.variables = {
'CRAY_ACCEL_TARGET': 'nvidia35',
'MV2_USE_CUDA': '1',
'G2G': '1'
}
self.num_tasks = 8
self.num_gpus_per_node = 8
self.num_tasks_per_node = 8
elif self.current_system.name in ['arolla', 'tsa']:
self.exclusive_access = True
self.variables = {
'G2G': '1'
}
self.num_tasks = 8
self.num_gpus_per_node = 8
self.num_tasks_per_node = 8
self.sourcepath = 'gpu_direct_acc.F90'
self.build_system = 'SingleSource'
self.prebuild_cmds = ['module list -l']
self.sanity_patterns = sn.all([
sn.assert_found(r'GPU with OpenACC', self.stdout),
sn.assert_found(r'Result :\s+OK', self.stdout)
])
self.launch_options = []
self.maintainers = ['AJ', 'MKr']
self.tags = {'production', 'mch', 'craype'}
@rfm.run_before('compile')
def setflags(self):
if self.current_environ.name.startswith('PrgEnv-cray'):
self.build_system.fflags = ['-hacc', '-hnoomp']
elif self.current_environ.name.startswith('PrgEnv-pgi'):
self.build_system.fflags = ['-acc']
if self.current_system.name in ['daint', 'dom']:
self.build_system.fflags += ['-ta=tesla:cc60', '-Mnorpath']
elif self.current_system.name == 'kesch':
self.build_system.fflags += ['-ta=tesla:cc35']
elif self.current_system.name in ['arolla', 'tsa']:
self.build_system.fflags += ['-ta=tesla:cc70']
@rfm.run_before('compile')
def cray_linker_workaround(self):
# NOTE: Workaround for using CCE < 9.1 in CLE7.UP01.PS03 and above
# See Patch Set README.txt for more details.
if (self.current_system.name == 'dom' and
self.current_environ.name == 'PrgEnv-cray'):
self.variables['LINKER_X86_64'] = '/usr/bin/ld'
| StarcoderdataPython |
136926 | <gh_stars>0
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Registration(models.Model):
title = models.CharField(max_length=200)
newstext = models.CharField(max_length=200)
newsvideourl = models.CharField(max_length=200)
fburl = models.CharField(max_length=200)
twiterurl = models.CharField(max_length=200)
youtubeurl = models.CharField(max_length=200)
def __unicode__(self):
#function returns unicode representaion of a task
return self.title
| StarcoderdataPython |
3523685 | <reponame>elizabethandrews/llvm<gh_stars>100-1000
# DExTer : Debugging Experience Tester
# ~~~~~~ ~ ~~ ~ ~~
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Conditional Controller Class for DExTer.-"""
import os
import time
from collections import defaultdict
from itertools import chain
from dex.debugger.DebuggerControllers.ControllerHelpers import in_source_file, update_step_watches
from dex.debugger.DebuggerControllers.DebuggerControllerBase import DebuggerControllerBase
from dex.debugger.DebuggerBase import DebuggerBase
from dex.utils.Exceptions import DebuggerException
class ConditionalBpRange:
"""Represents a conditional range of breakpoints within a source file descending from
one line to another."""
def __init__(self, expression: str, path: str, range_from: int, range_to: int, values: list):
self.expression = expression
self.path = path
self.range_from = range_from
self.range_to = range_to
self.conditional_values = values
def get_conditional_expression_list(self):
conditional_list = []
for value in self.conditional_values:
# (<expression>) == (<value>)
conditional_expression = '({}) == ({})'.format(self.expression, value)
conditional_list.append(conditional_expression)
return conditional_list
class ConditionalController(DebuggerControllerBase):
def __init__(self, context, step_collection):
self.context = context
self.step_collection = step_collection
self._conditional_bps = None
self._watches = set()
self._step_index = 0
self._build_conditional_bps()
self._path_and_line_to_conditional_bp = defaultdict(list)
self._pause_between_steps = context.options.pause_between_steps
self._max_steps = context.options.max_steps
def _build_conditional_bps(self):
commands = self.step_collection.commands
self._conditional_bps = []
try:
limit_commands = commands['DexLimitSteps']
for lc in limit_commands:
conditional_bp = ConditionalBpRange(
lc.expression,
lc.path,
lc.from_line,
lc.to_line,
lc.values)
self._conditional_bps.append(conditional_bp)
except KeyError:
raise DebuggerException('Missing DexLimitSteps commands, cannot conditionally step.')
def _set_conditional_bps(self):
# When we break in the debugger we need a quick and easy way to look up
# which conditional bp we've breaked on.
for cbp in self._conditional_bps:
conditional_bp_list = self._path_and_line_to_conditional_bp[(cbp.path, cbp.range_from)]
conditional_bp_list.append(cbp)
# Set break points only on the first line of any conditional range, we'll set
# more break points for a range when the condition is satisfied.
for cbp in self._conditional_bps:
for cond_expr in cbp.get_conditional_expression_list():
self.debugger.add_conditional_breakpoint(cbp.path, cbp.range_from, cond_expr)
def _conditional_met(self, cbp):
for cond_expr in cbp.get_conditional_expression_list():
valueIR = self.debugger.evaluate_expression(cond_expr)
if valueIR.type_name == 'bool' and valueIR.value == 'true':
return True
return False
def _run_debugger_custom(self):
# TODO: Add conditional and unconditional breakpoint support to dbgeng.
if self.debugger.get_name() == 'dbgeng':
raise DebuggerException('DexLimitSteps commands are not supported by dbgeng')
self.step_collection.clear_steps()
self._set_conditional_bps()
for command_obj in chain.from_iterable(self.step_collection.commands.values()):
self._watches.update(command_obj.get_watches())
self.debugger.launch()
time.sleep(self._pause_between_steps)
while not self.debugger.is_finished:
while self.debugger.is_running:
pass
step_info = self.debugger.get_step_info(self._watches, self._step_index)
if step_info.current_frame:
self._step_index += 1
update_step_watches(step_info, self._watches, self.step_collection.commands)
self.step_collection.new_step(self.context, step_info)
loc = step_info.current_location
conditional_bp_key = (loc.path, loc.lineno)
if conditional_bp_key in self._path_and_line_to_conditional_bp:
conditional_bps = self._path_and_line_to_conditional_bp[conditional_bp_key]
for cbp in conditional_bps:
if self._conditional_met(cbp):
# Unconditional range should ignore first line as that's the
# conditional bp we just hit and should be inclusive of final line
for line in range(cbp.range_from + 1, cbp.range_to + 1):
self.debugger.add_conditional_breakpoint(cbp.path, line, condition='')
# Clear any uncondtional break points at this loc.
self.debugger.delete_conditional_breakpoint(file_=loc.path, line=loc.lineno, condition='')
self.debugger.go()
time.sleep(self._pause_between_steps)
| StarcoderdataPython |
3248271 | #!/usr/bin/env python
import sys,os,time
def send_angles(j1,j2,j3,j5,j6,wait):
with open("/run/shm/angles.tmp","w") as f:
f.write("%d,%d,%d,%d,%d\n" % (j1,j2,j3,j5,j6))
f.flush()
os.rename("/run/shm/angles.tmp","/run/shm/angles")
time.sleep(wait)
if __name__ == "__main__" :
send_angles(0,0,90,90,0,2.0)
for d in range(7):
j2 = d*10
j3 = 90
j5 = 180 - j2 - j3
send_angles(0,j2,j3,j5,0,0.5)
| StarcoderdataPython |
11390487 | # -*- encoding: utf-8 -*-
"""
Basic account handling. Most of the openid methods are adapted from the
Flask-OpenID documentation.
"""
from flask import Blueprint, session, redirect, render_template, request,\
flash, g, url_for
import pygraz_website as site
import uuid
import hashlib
from pygraz_website import forms, decorators, models, db, signals, email
module = Blueprint('account', __name__)
@module.route('/register', methods=['POST', 'GET'])
def register():
if request.method == 'POST':
form = forms.RegisterForm.from_flat(request.form)
if form.validate():
data = dict(form.flatten())
openid = models.OpenID(id=session['openid'])
db.session.add(openid)
user = models.User()
user.username = data['username'].lstrip().rstrip()
user.email = data['email'].lstrip().rstrip()
user.openids.append(openid)
db.session.add(user)
db.session.commit()
return redirect(site.oid.get_next_url())
else:
default = {}
for k, v in request.args.items():
if k == 'name':
default['username'] = v
else:
default[k] = v
form = forms.RegisterForm.from_flat(default)
return render_template('account/register.html',
form=form, next=site.oid.get_next_url())
@module.route('/account/login', methods=['GET', 'POST'])
@site.oid.loginhandler
def login():
if request.method == 'POST':
form = forms.LoginForm.from_flat(request.form)
if form.validate():
return site.oid.try_login(form['openid'].u,
ask_for=['fullname', 'email'])
else:
form = forms.LoginForm()
return render_template('account/login.html',
form=form, next=request.args.get('next', '/'))
@site.oid.after_login
def login_or_register(response):
session['openid'] = response.identity_url
user = db.session.query(models.User).join(models.OpenID)\
.filter(models.OpenID.id == session['openid']).first()
if user is None:
return redirect(url_for('.register',
name=response.fullname,
email=response.email, next=site.oid.get_next_url()))
else:
g.user = user
return redirect(site.oid.get_next_url())
@module.route('/logout')
def logout():
del session['openid']
return redirect(request.args.get('next', '/'))
@module.route('/profile', methods=['GET', 'POST'])
@decorators.login_required
def edit_profile():
# Fake the email status for now
if request.method == 'POST':
form = forms.EditProfileForm.from_flat(request.form)
if form.validate():
old_email = g.user.email
g.user.username = form['username'].u
g.user.email = form['email'].u
g.user.email_notify_new_meetup = form['email_notify_new_meetup'].value
g.user.email_notify_new_sessionidea = form['email_notify_new_sessionidea'].value
if old_email != g.user.email:
g.user.email_status = 'not_verified'
db.session.add(g.user)
db.session.commit()
flash("Benutzerdaten gespeichert")
return redirect(url_for('.edit_profile'))
else:
form = forms.EditProfileForm.from_object(g.user)
return render_template('account/edit_profile.html', form=form)
@module.route('/email-activation/start', methods=['GET'])
@decorators.login_required
def start_email_activation():
"""
Sends out an activation email to the address currently associated
with the user and sets the current email as inactive.
"""
if g.user.email_status == 'active':
flash("Ihre E-Mail-Adresse wurde bereits aktiviert")
return redirect(url_for('.edit_profile'))
code = _generate_activation_code(g.user)
activation_url = request.url_root.rstrip('/') + url_for('.activate_email', code=code)
g.user.email_activation_code = code
db.session.add(g.user)
db.session.commit()
email.send_email(g.user.email, "Aktivieren Sie Ihre E-Mail-Adresse",
'account/emails/activation', {'user': g.user,
'url': activation_url})
flash(u"Es wurde eine Aktivierungsemail an {} versandt".format(g.user.email))
return redirect(url_for('.edit_profile'))
@module.route('/email-activation/finalize')
@decorators.login_required
def activate_email():
code = request.args.get('code', None)
if code is not None:
if code == g.user.email_activation_code:
g.user.email_status = 'active'
g.user.email_activation_code = None
db.session.add(g.user)
db.session.commit()
flash("Ihre E-Mail-Adresse wurde aktiviert")
return redirect(url_for('.edit_profile'))
flash(u"Der von Ihnen angegebene Code ist leider ungültig")
return render_template('account/email_activation_finalize.html')
def handle_meetup_created(meetup):
"""
Is called when a new meetup is created and notifies all users that requested such
a notification.
"""
users = db.session.query(models.User)\
.filter_by(email_status='active')\
.filter_by(email_notify_new_meetup=True)
if hasattr(g, "user"):
users = users.filter(models.User.id != g.user.id)
emails = [user.email for user in users]
email.send_mass_email(emails, 'Neues Stammtisch', 'emails/new-meetup', ctx=dict(meetup=meetup))
def handle_sessionidea_created(sessionidea):
users = db.session.query(models.User)\
.filter_by(email_status='active')\
.filter_by(email_notify_new_sessionidea=True)
if hasattr(g, "user"):
users = users.filter(models.User.id != g.user.id)
emails = [user.email for user in users]
url = '{}{}#idea-{}'.format(request.url_root.rstrip('/'), sessionidea.meetup.get_absolute_url(), sessionidea.id)
email.send_mass_email(emails, '<NAME>', 'emails/new-sessionidea',
ctx=dict(idea=sessionidea, url=url))
signals.meetup_created.connect(handle_meetup_created)
signals.sessionidea_created.connect(handle_sessionidea_created)
def _generate_activation_code(user):
raw = "{}.{}.{}".format(user.id, user.email, str(uuid.uuid4()))
return hashlib.sha1(raw).hexdigest()[:5]
| StarcoderdataPython |
144547 | #
# -*- coding: utf-8 -*-
#
# This file is part of reclass
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import itertools as it
import operator
import pyparsing as pp
from six import iteritems
from six import string_types
from reclass.values import item
from reclass.values import parser_funcs
from reclass.settings import Settings
from reclass.utils.dictpath import DictPath
from reclass.errors import ExpressionError, ParseError, ResolveError
# TODO: generalize expression handling.
class BaseTestExpression(object):
known_operators = {}
def __init__(self, delimiter):
self._delimiter = delimiter
self.refs = []
self.inv_refs = []
class EqualityTest(BaseTestExpression):
known_operators = { parser_funcs.EQUAL: operator.eq,
parser_funcs.NOT_EQUAL: operator.ne}
def __init__(self, expression, delimiter):
# expression is a list of at least three tuples, of which first element
# is a string tag, second is subelement value; other tuples apparently
# are not used.
# expression[0][1] effectively contains export path and apparently must
# be treated as such, also left hand operand in comparison
# expression[1][1] appa holds commparison operator == or !=
# expression[2][1] is the righhand operand
super(EqualityTest, self).__init__(delimiter)
# TODO: this double sommersault must be cleaned
_ = self._get_vars(expression[2][1], *self._get_vars(expression[0][1]))
self._export_path, self._parameter_path, self._parameter_value = _
try:
self._export_path.drop_first()
except AttributeError:
raise ExpressionError('No export')
try:
self._compare = self.known_operators[expression[1][1]]
except KeyError as e:
msg = 'Unknown test {0}'.format(expression[1][1])
raise ExpressionError(msg, tbFlag=False)
self.inv_refs = [self._export_path]
if self._parameter_path is not None:
self._parameter_path.drop_first()
self.refs = [str(self._parameter_path)]
def value(self, context, items):
if self._parameter_path is not None:
self._parameter_value = self._resolve(self._parameter_path,
context)
if self._parameter_value is None:
raise ExpressionError('Failed to render %s' % str(self),
tbFlag=False)
if self._export_path.exists_in(items):
export_value = self._resolve(self._export_path, items)
return self._compare(export_value, self._parameter_value)
return False
def _resolve(self, path, dictionary):
try:
return path.get_value(dictionary)
except KeyError as e:
raise ResolveError(str(path))
def _get_vars(self, var, export=None, parameter=None, value=None):
if isinstance(var, string_types):
path = DictPath(self._delimiter, var)
if path.path[0].lower() == 'exports':
export = path
elif path.path[0].lower() == 'self':
parameter = path
elif path.path[0].lower() == 'true':
value = True
elif path.path[0].lower() == 'false':
value = False
else:
value = var
else:
value = var
return export, parameter, value
class LogicTest(BaseTestExpression):
known_operators = { parser_funcs.AND: operator.and_,
parser_funcs.OR: operator.or_}
def __init__(self, expr, delimiter):
super(LogicTest, self).__init__(delimiter)
subtests = list(it.compress(expr, it.cycle([1, 1, 1, 0])))
self._els = [EqualityTest(subtests[j:j+3], self._delimiter)
for j in range(0, len(subtests), 3)]
for x in self._els:
self.refs.extend(x.refs)
self.inv_refs.extend(x.inv_refs)
try:
self._ops = [self.known_operators[x[1]] for x in expr[3::4]]
except KeyError as e:
msg = 'Unknown operator {0} {1}'.format(e.messsage, self._els)
raise ExpressionError(msg, tbFlag=False)
def value(self, context, items):
if len(self._els) == 0: # NOTE: possible logic error
return True
result = self._els[0].value(context, items)
for op, next_el in zip(self._ops, self._els[1:]):
result = op(result, next_el.value(context, items))
return result
class InvItem(item.Item):
type = item.ItemTypes.INV_QUERY
def __init__(self, newitem, settings):
super(InvItem, self).__init__(newitem.render(None, None), settings)
self.needs_all_envs = False
self.ignore_failed_render = (
self._settings.inventory_ignore_failed_render)
self._parse_expression(self.contents)
def _parse_expression(self, expr):
parser = parser_funcs.get_expression_parser()
try:
tokens = parser.parseString(expr).asList()
except pp.ParseException as e:
raise ParseError(e.msg, e.line, e.col, e.lineno)
if len(tokens) == 2: # options are set
passed_opts = [x[1] for x in tokens.pop(0)]
self.ignore_failed_render = parser_funcs.IGNORE_ERRORS in passed_opts
self.needs_all_envs = parser_funcs.ALL_ENVS in passed_opts
elif len(tokens) > 2:
raise ExpressionError('Failed to parse %s' % str(tokens),
tbFlag=False)
self._expr_type = tokens[0][0]
self._expr = list(tokens[0][1])
if self._expr_type == parser_funcs.VALUE:
self._value_path = DictPath(self._settings.delimiter,
self._expr[0][1]).drop_first()
self._question = LogicTest([], self._settings.delimiter)
self.refs = []
self.inv_refs = [self._value_path]
elif self._expr_type == parser_funcs.TEST:
self._value_path = DictPath(self._settings.delimiter,
self._expr[0][1]).drop_first()
self._question = LogicTest(self._expr[2:], self._settings.delimiter)
self.refs = self._question.refs
self.inv_refs = self._question.inv_refs
self.inv_refs.append(self._value_path)
elif self._expr_type == parser_funcs.LIST_TEST:
self._value_path = None
self._question = LogicTest(self._expr[1:], self._settings.delimiter)
self.refs = self._question.refs
self.inv_refs = self._question.inv_refs
else:
msg = 'Unknown expression type: %s'
raise ExpressionError(msg % self._expr_type, tbFlag=False)
@property
def has_inv_query(self):
return True
@property
def has_references(self):
return len(self._question.refs) > 0
def get_references(self):
return self._question.refs
def assembleRefs(self, context):
return
def get_inv_references(self):
return self.inv_refs
def _resolve(self, path, dictionary):
try:
return path.get_value(dictionary)
except KeyError as e:
raise ResolveError(str(path))
def _value_expression(self, inventory):
results = {}
for (node, items) in iteritems(inventory):
if self._value_path.exists_in(items):
results[node] = copy.deepcopy(self._resolve(self._value_path,
items))
return results
def _test_expression(self, context, inventory):
if self._value_path is None:
msg = 'Failed to render %s'
raise ExpressionError(msg % str(self), tbFlag=False)
results = {}
for node, items in iteritems(inventory):
if (self._question.value(context, items) and
self._value_path.exists_in(items)):
results[node] = copy.deepcopy(
self._resolve(self._value_path, items))
return results
def _list_test_expression(self, context, inventory):
results = []
for (node, items) in iteritems(inventory):
if self._question.value(context, items):
results.append(node)
return results
def render(self, context, inventory):
if self._expr_type == parser_funcs.VALUE:
return self._value_expression(inventory)
elif self._expr_type == parser_funcs.TEST:
return self._test_expression(context, inventory)
elif self._expr_type == parser_funcs.LIST_TEST:
return self._list_test_expression(context, inventory)
raise ExpressionError('Failed to render %s' % str(self), tbFlag=False)
def __str__(self):
return ' '.join(str(j) for i,j in self._expr)
def __repr__(self):
# had to leave it here for now as the behaviour differs from basic
return 'InvItem(%r)' % self._expr
| StarcoderdataPython |
6653148 | <filename>src/lstm.py
#!/usr/bin/env python3 -u
import chainer
import chainer.links as L
import chainer.functions as F
from general_model import GeneralModel
import numpy as np
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = np.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0)
return exs
class LSTM(GeneralModel):
def __init__(self, n_vocab, n_emb, n_layers, n_dim, num_classes, dropout=0.5, **kwargs):
super(LSTM, self).__init__(**kwargs)
self.n_vocab = n_vocab
self.num_class = num_classes
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_emb)
self.bilstm = L.NStepBiLSTM(n_layers, n_emb, n_dim, dropout)
self.l1 = L.Linear(n_dim * 2, num_classes)
# self.l1 = L.Linear(n_dim, num_classes)
def __call__(self, xs, y, enable_double_backprop=False):
logit = self.predict(xs, softmax=False, argmax=False)
loss = F.softmax_cross_entropy(logit, y, enable_double_backprop=enable_double_backprop)
accuracy = F.accuracy(logit, y)
chainer.report({'loss': loss, 'acc': accuracy}, observer=self)
return loss
def predict(self, xs, softmax=True, argmax=False):
exs = sequence_embed(self.embed, xs)
hy, cy, ys = self.bilstm(None, None, exs)
h = F.concat(hy[-2:])
logit = self.l1(h)
# logit = self.l1(h)
if argmax:
return F.argmax(logit, axis=1)
elif softmax:
return F.softmax(logit)
else:
return logit
def get_x(self, xs, y):
zeros = self.xp.zeros((len(xs), self.n_vocab), dtype=self.xp.int32)
for i, x in enumerate(xs):
for _x in x:
zeros[i, _x] += 1
return chainer.cuda.to_cpu(zeros)
def get_top_h(self, xs, y):
exs = sequence_embed(self.embed, xs)
hy, cy, ys = self.bilstm(None, None, exs)
h = F.concat(hy[-2:])
return chainer.cuda.to_cpu(h.data)
def get_all_h(self, xs, y):
exs = sequence_embed(self.embed, xs)
hy, cy, ys = self.bilstm(None, None, exs)
h = F.concat(hy)
return chainer.cuda.to_cpu(h.data)
| StarcoderdataPython |
9704166 | begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'fields'
newline|'\n'
nl|'\n'
nl|'\n'
op|'@'
name|'base'
op|'.'
name|'NovaObjectRegistry'
op|'.'
name|'register'
newline|'\n'
DECL|class|VirtCPUTopology
name|'class'
name|'VirtCPUTopology'
op|'('
name|'base'
op|'.'
name|'NovaObject'
op|')'
op|':'
newline|'\n'
comment|'# Version 1.0: Initial version'
nl|'\n'
DECL|variable|VERSION
indent|' '
name|'VERSION'
op|'='
string|"'1.0'"
newline|'\n'
nl|'\n'
DECL|variable|fields
name|'fields'
op|'='
op|'{'
nl|'\n'
string|"'sockets'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'nullable'
op|'='
name|'True'
op|','
name|'default'
op|'='
number|'1'
op|')'
op|','
nl|'\n'
string|"'cores'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'nullable'
op|'='
name|'True'
op|','
name|'default'
op|'='
number|'1'
op|')'
op|','
nl|'\n'
string|"'threads'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'nullable'
op|'='
name|'True'
op|','
name|'default'
op|'='
number|'1'
op|')'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
comment|'# NOTE(jaypipes): for backward compatibility, the virt CPU topology'
nl|'\n'
comment|'# data is stored in the database as a nested dict.'
nl|'\n'
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|from_dict
name|'def'
name|'from_dict'
op|'('
name|'cls'
op|','
name|'data'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'cls'
op|'('
name|'sockets'
op|'='
name|'data'
op|'.'
name|'get'
op|'('
string|"'sockets'"
op|')'
op|','
nl|'\n'
name|'cores'
op|'='
name|'data'
op|'.'
name|'get'
op|'('
string|"'cores'"
op|')'
op|','
nl|'\n'
name|'threads'
op|'='
name|'data'
op|'.'
name|'get'
op|'('
string|"'threads'"
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|to_dict
dedent|''
name|'def'
name|'to_dict'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
nl|'\n'
string|"'sockets'"
op|':'
name|'self'
op|'.'
name|'sockets'
op|','
nl|'\n'
string|"'cores'"
op|':'
name|'self'
op|'.'
name|'cores'
op|','
nl|'\n'
string|"'threads'"
op|':'
name|'self'
op|'.'
name|'threads'
nl|'\n'
op|'}'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| StarcoderdataPython |
6663325 | <filename>WorldTime/test.py
###
# Copyright (c) 2014, spline
# Copyright (c) 2020, oddluck <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class WorldTimeTestCase(PluginTestCase):
plugins = ("WorldTime",)
config = {"supybot.plugins.WorldTime.disableANSI": True}
def setUp(self):
PluginTestCase.setUp(self)
self.prefix = "foo!bar@baz"
def testWorldTime(self):
# New York, NY, USA :: Current local time is: Thu, 12:02 (Eastern Daylight Time)
self.assertRegexp("worldtime New York, NY", "New York\, NY\, USA")
self.assertRegexp("worldtime Chicago", "Current local time is")
def testWorldTimeDb(self):
self.assertError("worldtime") # Fail if location not set & none is given
self.assertNotError("set Vancouver, BC")
self.assertRegexp("worldtime", "Vancouver") # Should work if location is set
self.assertNotError("unset") # Unsetting location should work,
self.assertError("unset") # But only once.
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| StarcoderdataPython |
308554 | <reponame>usegalaxy-no/usegalaxy
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class FactDiffBase:
def __init__(self, task_args, task_vars, debug):
self._debug = debug
self._task_args = task_args
self._task_vars = task_vars
| StarcoderdataPython |
11231437 | <reponame>lenguyenthanh/heltour
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-12-30 22:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0127_auto_20161230_1947'),
]
operations = [
migrations.AlterField(
model_name='alternate',
name='status',
field=models.CharField(blank=True, choices=[('waiting', 'Waiting'), ('contacted', 'Contacted'), ('accepted', 'Accepted'), ('declined', 'Declined'), ('unresponsive', 'Unresponsive')], default='waiting', max_length=31),
),
]
| StarcoderdataPython |
1947314 | <filename>fb_scrape_public.py
'''
This script can download posts and comments from public Facebook pages. It requires Python 3.
INSTRUCTIONS
1. This script is written for Python 3 and won't work with previous Python versions.
2. The main function in this module is scrape_fb. It is the only function most users will need to run directly.
3. You need to create your own Facebook app, which you can do here: https://developers.facebook.com/apps . Doesn't matter what you call it, you just need to pull the unique client ID (app ID) and app secret for your new app.
4. Once you create your app, you can insert the client ID and secret AS STRINGS into the appropriate scrape_fb fields.
5. This function accepts text FB user IDs ('barackobama'), numerical user IDs, and post IDs. You can load them into the ids field using a comma-delimited string or by creating a plain text file in the same folder as the script containing one or more names of the Facebook pages you want to scrape, one ID per line. For example, if you wanted to scrape Barack Obama's official FB page (http://facebook.com/barackobama/), your first line would simply be 'barackobama' without quotes. I suggest starting with only one ID to make sure it works. You'll only be able to collect data from public pages.
6. The only required fields for the main function are client_id, client_secret, and ids. I recommend not changing the other defaults (except for maybe outfile) unless you know what you're doing.
7. If you did everything correctly, the command line should show you some informative status messages. Eventually it will save a CSV full of data to the same folder where this script was run. If something went wrong, you'll see an error.
'''
import copy
import csv
import datetime
import json
import socket
import time
import urllib.request
socket.setdefaulttimeout(30)
def load_data(data,enc='utf-8'):
if type(data) is str:
csv_data = []
with open(data,'r',encoding = enc,errors = 'replace') as f:
reader = csv.reader((line.replace('\0','') for line in f)) #remove NULL bytes
for row in reader:
if row != []:
csv_data.append(row)
return csv_data
else:
return copy.deepcopy(data)
def save_csv(filename,data,use_quotes=True,file_mode='w',enc='utf-8'): #this assumes a list of lists wherein the second-level list items contain no commas
with open(filename,file_mode,encoding = enc) as out:
for line in data:
if use_quotes == True:
row = '"' + '","'.join([str(i).replace('"',"'") for i in line]) + '"' + "\n"
else:
row = ','.join([str(i) for i in line]) + "\n"
out.write(row)
def url_retry(url):
succ = 0
while succ == 0:
try:
json_out = json.loads(urllib.request.urlopen(url).read().decode(encoding="utf-8"))
succ = 1
except Exception as e:
print(str(e))
if 'HTTP Error 4' in str(e):
return False
else:
time.sleep(1)
return json_out
def optional_field(dict_item,dict_key):
try:
out = dict_item[dict_key]
if dict_key == 'shares':
out = dict_item[dict_key]['count']
if dict_key == 'likes':
out = dict_item[dict_key]['summary']['total_count']
except KeyError:
out = ''
return out
def make_csv_chunk(fb_json_page,scrape_mode,thread_starter='',msg=''):
csv_chunk = []
if scrape_mode == 'feed' or scrape_mode == 'posts':
for line in fb_json_page['data']:
csv_line = [line['from']['name'], \
'_' + line['from']['id'], \
optional_field(line,'message'), \
optional_field(line,'picture'), \
optional_field(line,'link'), \
optional_field(line,'name'), \
optional_field(line,'description'), \
optional_field(line,'type'), \
line['created_time'], \
optional_field(line,'shares'), \
optional_field(line,'likes'), \
optional_field(line,'LOVE'), \
optional_field(line,'WOW'), \
optional_field(line,'HAHA'), \
optional_field(line,'SAD'), \
optional_field(line,'ANGRY'), \
line['id']]
csv_chunk.append(csv_line)
if scrape_mode == 'comments':
for line in fb_json_page['data']:
csv_line = [line['from']['name'], \
'_' + line['from']['id'], \
optional_field(line,'message'), \
line['created_time'], \
optional_field(line,'like_count'), \
line['id'], \
thread_starter, \
msg]
csv_chunk.append(csv_line)
return csv_chunk
'''
# The first five fields of scrape_fb are fairly self-explanatory or are explained above.
# scrape_mode can take three values: "feed," "posts," or "comments." The first two are identical in most cases and pull the main posts from a public wall. "comments" pulls the comments from a given permalink for a post. Only use "comments" if your IDs are post permalinks.
# You can use end_date to specify a date around which you'd like the program to stop. It won't stop exactly on that date, but rather a little after it. If present, it needs to be a string in yyyy-mm-dd format. If you leave the field blank, it will extract all available data.
'''
def scrape_fb(client_id,client_secret,ids,outfile="fb_data.csv",version="2.7",scrape_mode="feed",end_date=""):
time1 = time.time()
fb_urlobj = urllib.request.urlopen('https://graph.facebook.com/oauth/access_token?grant_type=client_credentials&client_id=' + client_id + '&client_secret=' + client_secret)
fb_token = fb_urlobj.read().decode(encoding="latin1")
if type(ids) == 'str':
fb_ids = [i[0].strip() for i in load_data(ids)]
else:
fb_ids = [i.strip() for i in ids.split(",")]
try:
end_dateobj = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()
except ValueError:
end_dateobj = ''
if scrape_mode == 'feed' or scrape_mode == 'posts':
header = ['from','from_id','message','picture','link','name','description','type','created_time','shares','likes','loves','wows','hahas','sads','angrys','post_id']
else:
header = ['from','from_id','comment','created_time','likes','post_id','original_poster','original_message']
csv_data = []
csv_data.insert(0,header)
save_csv(outfile,csv_data,file_mode="a")
for x,fid in enumerate(fb_ids):
if scrape_mode == 'comments':
msg_url = 'https://graph.facebook.com/v' + version + '/' + fid + '?fields=from,message&' + fb_token
msg_json = url_retry(msg_url)
if msg_json == False:
print("URL not available. Continuing...", fid)
continue
msg_user = msg_json['from']['name']
msg_content = optional_field(msg_json,'message')
field_list = 'from,message,created_time,like_count'
else:
msg_user = ''
msg_content = ''
field_list = 'from,message,picture,link,name,description,type,created_time,shares,likes.summary(total_count).limit(0)'
data_url = 'https://graph.facebook.com/v' + version + '/' + fid.strip() + '/' + scrape_mode + '?fields=' + field_list + '&limit=100&' + fb_token
data_rxns = []
new_rxns = ['LOVE','WOW','HAHA','SAD','ANGRY']
for i in new_rxns:
data_rxns.append('https://graph.facebook.com/v' + version + '/' + fid.strip() + '/' + scrape_mode + '?fields=reactions.type(' + i + ').summary(total_count).limit(0)&limit=100&' + fb_token)
next_item = url_retry(data_url)
if next_item != False:
for n,i in enumerate(data_rxns):
tmp_data = url_retry(i)
for z,j in enumerate(next_item['data']):
try:
j[new_rxns[n]] = tmp_data['data'][z]['reactions']['summary']['total_count']
except (KeyError,IndexError):
j[new_rxns[n]] = 0
csv_data = make_csv_chunk(next_item,scrape_mode,msg_user,msg_content)
save_csv(outfile,csv_data,file_mode="a")
else:
print("Skipping ID " + fid + " ...")
continue
n = 0
while 'paging' in next_item and 'next' in next_item['paging']:
next_item = url_retry(next_item['paging']['next'])
try:
for i in new_rxns:
start = next_item['paging']['next'].find("from")
end = next_item['paging']['next'].find("&",start)
next_rxn_url = next_item['paging']['next'][:start] + 'reactions.type(' + i + ').summary(total_count).limit(0)' + next_item['paging']['next'][end:]
tmp_data = url_retry(next_rxn_url)
for z,j in enumerate(next_item['data']):
try:
j[i] = tmp_data['data'][z]['reactions']['summary']['total_count']
except (KeyError,IndexError):
j[i] = 0
except KeyError:
continue
csv_data = make_csv_chunk(next_item,scrape_mode,msg_user,msg_content)
save_csv(outfile,csv_data,file_mode="a")
try:
print(n+1,"page(s) of data archived for ID",fid,"at",next_item['data'][-1]['created_time'],".",round(time.time()-time1,2),'seconds elapsed.')
except IndexError:
break
n += 1
time.sleep(1)
if end_dateobj != '' and end_dateobj > datetime.datetime.strptime(next_item['data'][-1]['created_time'][:10],"%Y-%m-%d").date():
break
print(x+1,'Facebook ID(s) archived.')
print('Script completed in',time.time()-time1,'seconds.')
return csv_data
| StarcoderdataPython |
1871135 | <reponame>lfx/honeybadger-python<filename>honeybadger/contrib/flask.py<gh_stars>0
from __future__ import absolute_import
import logging
from honeybadger import honeybadger
from honeybadger.plugins import Plugin, default_plugin_manager
from honeybadger.utils import filter_dict
from six import iteritems
logger = logging.getLogger(__name__)
class FlaskPlugin(Plugin):
"""
Handle flask plugin information.
"""
def __init__(self):
super(FlaskPlugin, self).__init__('Flask')
def supports(self, config, context):
"""
Check whether we are in a Flask request context.
:param config: honeybadger configuration.
:param context: current honeybadger configuration.
:return: True if this is a django request, False else.
"""
try:
from flask import request
except ImportError:
return False
else:
return bool(request)
def generate_payload(self, config, context):
"""
Generate payload by checking Flask request object.
:param context: current context.
:param config: honeybadger configuration.
:return: a dict with the generated payload.
"""
from flask import current_app, session, request as _request
current_view = current_app.view_functions[_request.endpoint]
if hasattr(current_view, 'view_class'):
component = '.'.join((current_view.__module__, current_view.view_class.__name__))
else:
component = current_view.__module__
cgi_data = {
k: v
for k, v in iteritems(_request.headers)
}
cgi_data.update({
'REQUEST_METHOD': _request.method
})
payload = {
'url': _request.base_url,
'component': component,
'action': _request.endpoint,
'params': {},
'session': filter_dict(dict(session), config.params_filters),
'cgi_data': cgi_data,
'context': context
}
# Add query params
params = filter_dict(_request.args.to_dict(flat=False), config.params_filters)
params.update(filter_dict(_request.form.to_dict(flat=False), config.params_filters))
payload['params'] = params
return payload
class FlaskHoneybadger(object):
"""
Flask extension for Honeybadger. Initializes Honeybadger and adds a request information to payload.
"""
CONFIG_PREFIX = 'HONEYBADGER_'
def __init__(self, app=None, report_exceptions=False, reset_context_after_request=False):
"""
Initialize Honeybadger.
:param flask.Application app: the application to wrap for the exception.
:param bool report_exceptions: whether to automatically report exceptions raised by Flask on requests
(i.e. by calling abort) or not.
:param bool reset_context_after_request: whether to reset honeybadger context after each request.
"""
self.app = app
self.report_exceptions = False
self.reset_context_after_request = False
default_plugin_manager.register(FlaskPlugin())
if app is not None:
self.init_app(app,
report_exceptions=report_exceptions,
reset_context_after_request=reset_context_after_request)
def init_app(self, app, report_exceptions=False, reset_context_after_request=False):
"""
Initialize honeybadger and listen for errors.
:param Flask app: the Flask application object.
:param bool report_exceptions: whether to automatically report exceptions raised by Flask on requests
(i.e. by calling abort) or not.
:param bool reset_context_after_request: whether to reset honeybadger context after each request.
"""
from flask import request_tearing_down, got_request_exception
self.app = app
self.app.logger.info('Initializing Honeybadger')
self.report_exceptions = report_exceptions
self.reset_context_after_request = reset_context_after_request
self._initialize_honeybadger(app.config)
# Add hooks
if self.report_exceptions:
self._register_signal_handler('auto-reporting exceptions',
got_request_exception,
self._handle_exception)
if self.reset_context_after_request:
self._register_signal_handler('auto clear context on request end',
request_tearing_down,
self._reset_context)
logger.info('Honeybadger helper installed')
def _register_signal_handler(self, description, signal, handler):
"""
Registers a handler for the given signal.
:param description: a short description of the signal to handle.
:param signal: the signal to handle.
:param handler: the function to use for handling the signal.
"""
from flask import signals
if not signals.signals_available:
self.app.logger.warn('blinker needs to be installed in order to support %s'.format(description))
self.app.logger.info('Enabling {}'.format(description))
# Weak references won't work if handlers are methods rather than functions.
signal.connect(handler, sender=self.app, weak=False)
def _initialize_honeybadger(self, config):
"""
Initializes honeybadger using the given config object.
:param dict config: a dict or dict-like object that contains honeybadger configuration properties.
"""
if config.get('DEBUG', False):
honeybadger.configure(environment='development')
honeybadger_config = {}
for key, value in iteritems(config):
if key.startswith(self.CONFIG_PREFIX):
honeybadger_config[key[len(self.CONFIG_PREFIX):].lower()] = value
honeybadger.configure(**honeybadger_config)
honeybadger.config.set_12factor_config() # environment should override Flask settings
def _reset_context(self, *args, **kwargs):
"""
Resets context when request is done.
"""
honeybadger.reset_context()
def _handle_exception(self, sender, exception=None):
"""
Actual code handling the exception and sending it to honeybadger if it's enabled.
:param T sender: the object sending the exception event.
:param Exception exception: the exception to handle.
"""
honeybadger.notify(exception)
if self.reset_context_after_request:
self._reset_context()
| StarcoderdataPython |
8129038 | <gh_stars>10-100
# Copyright (c) 2018 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import graphene
from gtmcore.files import FileOperations
from lmsrvcore.api.interfaces import GitRepository
from lmsrvcore.auth.user import get_logged_in_username
class LabbookFile(graphene.ObjectType):
"""A type representing a file or directory inside the labbook file system."""
class Meta:
interfaces = (graphene.relay.Node, GitRepository)
# Loaded file info
_file_info = None
# Section in the LabBook (code, input, output)
section = graphene.String(required=True)
# Relative path from labbook section.
key = graphene.String(required=True)
# True indicates that path points to a directory
is_dir = graphene.Boolean()
# Modified at contains timestamp of last modified - NOT creation - in epoch time with nanosecond resolution if
# supported by the underlying filesystem of the host
modified_at = graphene.Float()
# Size in bytes encoded as a string.
size = graphene.String()
def _load_file_info(self, dataloader):
"""Private method to retrieve file info for a given key"""
if not self._file_info:
# Load file info from LabBook
if not self.section or not self.key:
raise ValueError("Must set `section` and `key` on object creation to resolve file info")
# Load labbook instance
lb = dataloader.load(f"{get_logged_in_username()}&{self.owner}&{self.name}").get()
# Retrieve file info
self._file_info = FileOperations.get_file_info(lb, self.section, self.key)
# Set class properties
self.is_dir = self._file_info['is_dir']
self.modified_at = self._file_info['modified_at']
self.size = f"{self._file_info['size']}"
@classmethod
def get_node(cls, info, id):
"""Method to resolve the object based on it's Node ID"""
# Parse the key
owner, name, section, key = id.split("&")
return LabbookFile(id=f"{owner}&{name}&{section}&{key}", name=name, owner=owner, section=section, key=key)
def resolve_id(self, info):
"""Resolve the unique Node id for this object"""
if not self.id:
if not self.owner or not self.name or not self.section or not self.key:
raise ValueError("Resolving a LabbookFile Node ID requires owner, name, section, and key to be set")
self.id = f"{self.owner}&{self.name}&{self.section}&{self.key}"
return self.id
def resolve_is_dir(self, info):
"""Resolve the is_dir field"""
if self.is_dir is None:
self._load_file_info(info.context.labbook_loader)
return self.is_dir
def resolve_modified_at(self, info):
"""Resolve the modified_at field"""
if self.modified_at is None:
self._load_file_info(info.context.labbook_loader)
return self.modified_at
def resolve_size(self, info):
"""Resolve the size field"""
if self.size is None:
self._load_file_info(info.context.labbook_loader)
return self.size
| StarcoderdataPython |
1638037 | # main script 1 - robin_crypto_info.py
import robin_stocks
from robin_stocks import *
import robin_stocks.robinhood as r
import time
import os
from twilio.rest import Client
from math import log10, flooR
account_sid = os.environ['account_sid']
auth_token = os.environ['auth_token']
client = Client(account_sid, auth_token)
login = r.login('USERNAME','PASSWORD')
# function below rounds a value to 2 sig figs
def round_sig(x, sig=2):
return round(x, sig-int(floor(log10(abs(x))))-1)
symbol = 'DOGE' # input whatever crypto you want
n = 0
while n<100: # in case I want to terminate loop
n+=0 # current setup intentionally runs forever
price_0 = float(robin_stocks.robinhood.crypto.get_crypto_quote(symbol, info=None)['mark_price'])
time.sleep(15) # 15 second delay b/w gathering price values
price_f = float(robin_stocks.robinhood.crypto.get_crypto_quote(symbol, info=None)['mark_price'])
percentage_change = (price_f-price_0)/price_0*100
if percentage_change>0.5:
client.api.account.messages.create(
to='+17777777777'
from='+18888888888'
body='DOGE TO THE MOON! Up ' + str(round_sig(percentage_change)) + '%. Current price: $' + str(round_sig(price_f)))
elif percentage_change<0.5:
client.api.account.messages.create(
to='+17777777777'
from='+18888888888'
body='DOGE AWAY FROM MOON! Down ' + str(round_sig(percentage_change)) + '%. Current price: $' + str(round_sig(price_f)))
else:
pass
| StarcoderdataPython |
3442769 | <gh_stars>0
from django.db import models
from django.urls import reverse, reverse_lazy
class ReferenceQuestion(models.Model):
message = models.TextField(blank=True, null=True)
is_anonymized = models.BooleanField(blank=True, null=True, default=False)
lh3ChatID = models.PositiveIntegerField(blank=True, null=True)
chat_date = models.DateTimeField(blank=True, null=True)
reference_question_line_number = models.PositiveIntegerField(
blank=True, null=True, default=0
)
def __str__(self):
return str(self.id)
def get_absolute_url(self):
return reverse("rq__detail", kwargs={"pk": self.pk})
class Meta:
ordering = ("-id",)
indexes = [
models.Index(fields=["lh3ChatID"]),
]
| StarcoderdataPython |
11308678 | <filename>aria/modeling/service_changes.py
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ARIA modeling service changes module
"""
# pylint: disable=no-self-argument, no-member, abstract-method
from collections import namedtuple
from sqlalchemy import (
Column,
Text,
DateTime,
Enum,
)
from sqlalchemy.ext.declarative import declared_attr
from .types import (List, Dict)
from .mixins import ModelMixin
from . import relationship
class ServiceUpdateBase(ModelMixin):
"""
Deployment update model representation.
"""
__tablename__ = 'service_update'
__private_fields__ = ('service_fk',
'execution_fk')
created_at = Column(DateTime, nullable=False, index=True)
service_plan = Column(Dict, nullable=False)
service_update_nodes = Column(Dict)
service_update_service = Column(Dict)
service_update_node_templates = Column(List)
modified_entity_ids = Column(Dict)
state = Column(Text)
# region association proxies
@declared_attr
def execution_name(cls):
return relationship.association_proxy('execution', cls.name_column_name())
@declared_attr
def service_name(cls):
return relationship.association_proxy('service', cls.name_column_name())
# endregion
# region one_to_one relationships
# endregion
# region one_to_many relationships
@declared_attr
def steps(cls):
return relationship.one_to_many(cls, 'service_update_step')
# endregion
# region many_to_one relationships
@declared_attr
def execution(cls):
return relationship.one_to_one(cls, 'execution', back_populates=relationship.NO_BACK_POP)
@declared_attr
def service(cls):
return relationship.many_to_one(cls, 'service', back_populates='updates')
# endregion
# region foreign keys
@declared_attr
def execution_fk(cls):
return relationship.foreign_key('execution', nullable=True)
@declared_attr
def service_fk(cls):
return relationship.foreign_key('service')
# endregion
def to_dict(self, suppress_error=False, **kwargs):
dep_update_dict = super(ServiceUpdateBase, self).to_dict(suppress_error) #pylint: disable=no-member
# Taking care of the fact the DeploymentSteps are _BaseModels
dep_update_dict['steps'] = [step.to_dict() for step in self.steps]
return dep_update_dict
class ServiceUpdateStepBase(ModelMixin):
"""
Deployment update step model representation.
"""
__tablename__ = 'service_update_step'
__private_fields__ = ('service_update_fk',)
_action_types = namedtuple('ACTION_TYPES', 'ADD, REMOVE, MODIFY')
ACTION_TYPES = _action_types(ADD='add', REMOVE='remove', MODIFY='modify')
_entity_types = namedtuple(
'ENTITY_TYPES',
'NODE, RELATIONSHIP, PROPERTY, OPERATION, WORKFLOW, OUTPUT, DESCRIPTION, GROUP, PLUGIN')
ENTITY_TYPES = _entity_types(
NODE='node',
RELATIONSHIP='relationship',
PROPERTY='property',
OPERATION='operation',
WORKFLOW='workflow',
OUTPUT='output',
DESCRIPTION='description',
GROUP='group',
PLUGIN='plugin'
)
action = Column(Enum(*ACTION_TYPES, name='action_type'), nullable=False)
entity_id = Column(Text, nullable=False)
entity_type = Column(Enum(*ENTITY_TYPES, name='entity_type'), nullable=False)
# region association proxies
@declared_attr
def service_update_name(cls):
return relationship.association_proxy('service_update', cls.name_column_name())
# endregion
# region one_to_one relationships
# endregion
# region one_to_many relationships
# endregion
# region many_to_one relationships
@declared_attr
def service_update(cls):
return relationship.many_to_one(cls, 'service_update', back_populates='steps')
# endregion
# region foreign keys
@declared_attr
def service_update_fk(cls):
return relationship.foreign_key('service_update')
# endregion
def __hash__(self):
return hash((getattr(self, self.id_column_name()), self.entity_id))
def __lt__(self, other):
"""
the order is 'remove' < 'modify' < 'add'
:param other:
:return:
"""
if not isinstance(other, self.__class__):
return not self >= other
if self.action != other.action:
if self.action == 'remove':
return_value = True
elif self.action == 'add':
return_value = False
else:
return_value = other.action == 'add'
return return_value
if self.action == 'add':
return self.entity_type == 'node' and other.entity_type == 'relationship'
if self.action == 'remove':
return self.entity_type == 'relationship' and other.entity_type == 'node'
return False
class ServiceModificationBase(ModelMixin):
"""
Deployment modification model representation.
"""
__tablename__ = 'service_modification'
__private_fields__ = ('service_fk',)
STARTED = 'started'
FINISHED = 'finished'
ROLLEDBACK = 'rolledback'
STATES = [STARTED, FINISHED, ROLLEDBACK]
END_STATES = [FINISHED, ROLLEDBACK]
context = Column(Dict)
created_at = Column(DateTime, nullable=False, index=True)
ended_at = Column(DateTime, index=True)
modified_node_templates = Column(Dict)
nodes = Column(Dict)
status = Column(Enum(*STATES, name='service_modification_status'))
# region association proxies
@declared_attr
def service_name(cls):
return relationship.association_proxy('service', cls.name_column_name())
# endregion
# region one_to_one relationships
# endregion
# region one_to_many relationships
# endregion
# region many_to_one relationships
@declared_attr
def service(cls):
return relationship.many_to_one(cls, 'service', back_populates='modifications')
# endregion
# region foreign keys
@declared_attr
def service_fk(cls):
return relationship.foreign_key('service')
# endregion
| StarcoderdataPython |
1739043 | <reponame>saadidrees/dynRet
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 18 14:53:08 2022
@author: <NAME>, <NAME>'s Lab, York University
<EMAIL>
This is a custom keras layer that converts light stimulus (R*/rod/s) into photoreceptor currents by using a biophysical model
of the photoreceptors by Rieke's lab https://www.biorxiv.org/content/10.1101/2021.02.13.431101v1.full
"""
import tensorflow as tf
# Solve the differential equations using Euler method
@tf.function(autograph=True,experimental_relax_shapes=True)
def riekeModel(X_fun,TimeStep,sigma,phi,eta,cgmp2cur,cgmphill,cdark,beta,betaSlow,hillcoef,hillaffinity,gamma,gdark):
darkCurrent = gdark**cgmphill * cgmp2cur/2
gdark = (2 * darkCurrent / cgmp2cur) **(1/cgmphill)
cur2ca = beta * cdark / darkCurrent # get q using steady state
smax = eta/phi * gdark * (1 + (cdark / hillaffinity) **hillcoef) # get smax using steady state
tme = tf.range(0,X_fun.shape[1],dtype='float32')*TimeStep
NumPts = tme.shape[0]
# initial conditions
g_prev = gdark+(X_fun[:,0,:]*0)
s_prev = (gdark * eta/phi)+(X_fun[:,0,:]*0)
c_prev = cdark+(X_fun[:,0,:]*0)
r_prev = X_fun[:,0,:] * gamma / sigma
p_prev = (eta + r_prev)/phi
g = tf.TensorArray(tf.float32,size=NumPts)
g.write(0,X_fun[:,0,:]*0)
# solve difference equations
for pnt in tf.range(1,NumPts):
r_curr = r_prev + TimeStep * (-1 * sigma * r_prev)
r_curr = r_curr + gamma * X_fun[:,pnt-1,:]
p_curr = p_prev + TimeStep * (r_prev + eta - phi * p_prev)
c_curr = c_prev + TimeStep * (cur2ca * (cgmp2cur * g_prev **cgmphill)/2 - beta * c_prev)
s_curr = smax / (1 + (c_curr / hillaffinity) **hillcoef)
g_curr = g_prev + TimeStep * (s_prev - p_prev * g_prev)
g = g.write(pnt,g_curr)
# update prev values to current
g_prev = g_curr
s_prev = s_curr
c_prev = c_curr
p_prev = p_curr
r_prev = r_curr
g = g.stack()
g = tf.transpose(g,(1,0,2))
outputs = -(cgmp2cur * g **cgmphill)/2
return outputs
class photoreceptor_RIEKE(tf.keras.layers.Layer):
"""
This is a custom keras layer that converts light stimulus (R*/rod/s) into photoreceptor currents by using a biophysical model
of the photoreceptors by Rieke's lab https://www.biorxiv.org/content/10.1101/2021.02.13.431101v1.full
"""
def __init__(self,units=1):
super(photoreceptor_RIEKE,self).__init__()
self.units = units
def get_config(self):
config = super().get_config()
config.update({
'units': self.units,
})
return config
def build(self,input_shape):
sigma_init = tf.keras.initializers.Constant(2.2)
self.sigma = tf.Variable(name='sigma',initial_value=sigma_init(shape=(1,self.units),dtype='float32'),trainable=True)
sigma_scaleFac = tf.keras.initializers.Constant(10.)
self.sigma_scaleFac = tf.Variable(name='sigma_scaleFac',initial_value=sigma_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
phi_init = tf.keras.initializers.Constant(2.2)
self.phi = tf.Variable(name='phi',initial_value=phi_init(shape=(1,self.units),dtype='float32'),trainable=True)
phi_scaleFac = tf.keras.initializers.Constant(10.)
self.phi_scaleFac = tf.Variable(name='phi_scaleFac',initial_value=phi_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
eta_init = tf.keras.initializers.Constant(2.)
self.eta = tf.Variable(name='eta',initial_value=eta_init(shape=(1,self.units),dtype='float32'),trainable=True)
eta_scaleFac = tf.keras.initializers.Constant(1000.)
self.eta_scaleFac = tf.Variable(name='eta_scaleFac',initial_value=eta_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
beta_init = tf.keras.initializers.Constant(0.9)
self.beta = tf.Variable(name='beta',initial_value=beta_init(shape=(1,self.units),dtype='float32'),trainable=True)
beta_scaleFac = tf.keras.initializers.Constant(10.)
self.beta_scaleFac = tf.Variable(name='beta_scaleFac',initial_value=beta_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
cgmp2cur_init = tf.keras.initializers.Constant(0.01)
self.cgmp2cur = tf.Variable(name='cgmp2cur',initial_value=cgmp2cur_init(shape=(1,self.units),dtype='float32'),trainable=False)
cgmphill_init = tf.keras.initializers.Constant(3.)
self.cgmphill = tf.Variable(name='cgmphill',initial_value=cgmphill_init(shape=(1,self.units),dtype='float32'),trainable=False)
cgmphill_scaleFac = tf.keras.initializers.Constant(1.)
self.cgmphill_scaleFac = tf.Variable(name='cgmphill_scaleFac',initial_value=cgmphill_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
cdark_init = tf.keras.initializers.Constant(1.)
self.cdark = tf.Variable(name='cdark',initial_value=cdark_init(shape=(1,self.units),dtype='float32'),trainable=False)
betaSlow_init = tf.keras.initializers.Constant(1.)
self.betaSlow = tf.Variable(name='betaSlow',initial_value=betaSlow_init(shape=(1,self.units),dtype='float32'),trainable=False)
betaSlow_scaleFac = tf.keras.initializers.Constant(1.)
self.betaSlow_scaleFac = tf.Variable(name='betaSlow_scaleFac',initial_value=betaSlow_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
hillcoef_init = tf.keras.initializers.Constant(4.)
self.hillcoef = tf.Variable(name='hillcoef',initial_value=hillcoef_init(shape=(1,self.units),dtype='float32'),trainable=True)
hillcoef_scaleFac = tf.keras.initializers.Constant(1.)
self.hillcoef_scaleFac = tf.Variable(name='hillcoef_scaleFac',initial_value=hillcoef_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
hillaffinity_init = tf.keras.initializers.Constant(0.5)
self.hillaffinity = tf.Variable(name='hillaffinity',initial_value=hillaffinity_init(shape=(1,self.units),dtype='float32'),trainable=False)
hillaffinity_scaleFac = tf.keras.initializers.Constant(1.)
self.hillaffinity_scaleFac = tf.Variable(name='hillaffinity_scaleFac',initial_value=hillaffinity_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
gamma_init = tf.keras.initializers.Constant(1.)
self.gamma = tf.Variable(name='gamma',initial_value=gamma_init(shape=(1,self.units),dtype='float32'),trainable=True)
gamma_scaleFac = tf.keras.initializers.Constant(10.)
self.gamma_scaleFac = tf.Variable(name='gamma_scaleFac',initial_value=gamma_scaleFac(shape=(1,self.units),dtype='float32'),trainable=False)
gdark_init = tf.keras.initializers.Constant(0.28)
self.gdark = tf.Variable(name='gdark',initial_value=gdark_init(shape=(1,self.units),dtype='float32'),trainable=False)
self.timeBin = 8 # find a way to fix this in the model
def call(self,inputs):
X_fun = inputs
timeBin = float(self.timeBin) # ms
frameTime = 8 # ms
upSamp_fac = int(frameTime/timeBin)
TimeStep = 1e-3*timeBin
if upSamp_fac>1:
X_fun = tf.keras.backend.repeat_elements(X_fun,upSamp_fac,axis=1)
X_fun = X_fun/upSamp_fac # appropriate scaling for photons/ms
sigma = self.sigma * self.sigma_scaleFac # rhodopsin activity decay rate (1/sec)
phi = self.phi * self.phi_scaleFac # phosphodiesterase activity decay rate (1/sec)
eta = self.eta * self.eta_scaleFac # phosphodiesterase activation rate constant (1/sec)
cgmp2cur = self.cgmp2cur # constant relating cGMP to current
cgmphill = self.cgmphill * self.cgmphill_scaleFac # cooperativity for cGMP->current
cdark = self.cdark # dark calcium concentration
beta = self.beta * self.beta_scaleFac # rate constant for calcium removal in 1/sec
betaSlow = self.betaSlow * self.betaSlow_scaleFac #
hillcoef = self.hillcoef * self.hillcoef_scaleFac # cooperativity for cyclase, hill coef
hillaffinity = self.hillaffinity * self.hillaffinity_scaleFac # hill affinity for cyclase
gamma = (self.gamma*self.gamma_scaleFac)/timeBin # (this is rate of increase in opsin activity per R*/sec)
gdark = self.gdark*100 # concentration of cGMP in darkness
outputs = riekeModel(X_fun,TimeStep,sigma,phi,eta,cgmp2cur,cgmphill,cdark,beta,betaSlow,hillcoef,hillaffinity,gamma,gdark)
if upSamp_fac>1:
outputs = outputs[:,upSamp_fac-1::upSamp_fac]
return outputs
class Normalize(tf.keras.layers.Layer):
"""
BatchNorm is where you calculate normalization factors for each dimension seperately based on
the batch data
LayerNorm is where you calculate the normalization factors based on channels and dimensions
Normalize combines both: you calculate normalization factors based on channels, dimensions and the batch
"""
def __init__(self,units=1):
super(Normalize,self).__init__()
self.units = units
def get_config(self):
config = super().get_config()
config.update({
"units": self.units,
})
return config
def call(self,inputs):
value_min = tf.math.reduce_min(inputs)
value_max = tf.math.reduce_max(inputs)
R_norm = (inputs - value_min)/(value_max-value_min)
R_mean = tf.math.reduce_mean(R_norm)
R_norm = R_norm - R_mean
return R_norm
| StarcoderdataPython |
6543643 | <filename>motion_planning.py
import numpy as np
from utils_3D import Motion_Planer
if __name__ == "__main__":
data = [[1, 10,10, 0,20, 20,20, 10,30, 15], [0, 1,1, 4,4, 1,4, 4,1, 17], [0, 18,-4, 24,2, 18,8, 12,2, 17]]
"""
****data descripe the buildings or obstacles****
data: [z, a_x,a_y, b_x,b_y, c_x,c_y, d_x,d_y, h]
z is botttom of the building in z-anix
a,b,c,d are vertices of rectangle, a = (a_x, a_y)
h is the height
"""
motion_planer = Motion_Planer(data)
voxmap_start, voxmap_goal = motion_planer.rand_points() #generate a random start point and end point
# Run A* to find a path from start to goal
paths_r = motion_planer.find_paths(voxmap_start, voxmap_goal, flag_virtual = 1)
#print("paths_r: ", paths_r)
| StarcoderdataPython |
9621958 | # -*- coding: utf-8 -*-
"""Conservation urls."""
from django.urls import path
from . import views
app_name = 'conservation'
urlpatterns = [
path('threats/',
views.ConservationThreatListView.as_view(),
name="conservationthreat-list"),
path('threats/<int:pk>/',
views.ConservationThreatDetailView.as_view(),
name="conservationthreat-detail"),
path('threats/<int:pk>/update/',
views.ConservationThreatUpdateView.as_view(),
name="conservationthreat-update"),
path('threats/create/',
views.ConservationThreatCreateView.as_view(),
name="conservationthreat-create"),
path('actions/',
views.ConservationActionListView.as_view(),
name="conservationaction-list"),
path('actions/<int:pk>/',
views.ConservationActionDetailView.as_view(),
name="conservationaction-detail"),
path('actions/<int:pk>/update/',
views.ConservationActionUpdateView.as_view(),
name="conservationaction-update"),
path('actions/create/',
views.ConservationActionCreateView.as_view(),
name="conservationaction-create"),
path('actions/<int:pk>/report-progress/',
views.ConservationActivityCreateView.as_view(),
name="conservationactivity-create"),
path('actions/activities/<int:pk>/update/',
views.ConservationActivityUpdateView.as_view(),
name="conservationactivity-update"),
path('nominate-taxon/',
views.TaxonConservationListingCreateView.as_view(),
name="taxonconservationlisting-create"),
path('nominate-community/',
views.CommunityConservationListingCreateView.as_view(),
name="communityconservationlisting-create"),
path('documents/',
views.DocumentListView.as_view(),
name="document-list"),
path('documents/create/',
views.DocumentCreateView.as_view(),
name="document-create"),
path('documents/<int:pk>/',
views.DocumentDetailView.as_view(),
name="document-detail"),
path('documents/<int:pk>/update/',
views.DocumentUpdateView.as_view(),
name="document-update"),
]
| StarcoderdataPython |
9777818 | <filename>prosit/constants.py
DATA_PATH = "/root/data.hdf5"
MODEL_SPECTRA = "/root/model_spectra/"
MODEL_IRT = "/root/model_irt/"
OUT_DIR = "/root/prediction/"
VAL_SPLIT = 0.8
TRAIN_EPOCHS = 500
TRAIN_BATCH_SIZE = 1024
PRED_BATCH_SIZE = 1024
PRED_BAYES = False
PRED_N = 100
TOLERANCE_FTMS = 25
TOLERANCE_ITMS = 0.35
TOLERANCE_TRIPLETOF = 0.5
TOLERANCE = {"FTMS": (25, "ppm"), "ITMS": (0.35, "da"), "TripleTOF": (50, "ppm")}
ALPHABET = {
"A": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"G": 6,
"H": 7,
"I": 8,
"K": 9,
"L": 10,
"M": 11,
"N": 12,
"P": 13,
"Q": 14,
"R": 15,
"S": 16,
"T": 17,
"V": 18,
"W": 19,
"Y": 20,
"M(ox)": 21,
}
ALPHABET_S = {integer: char for char, integer in ALPHABET.items()}
CHARGES = [1, 2, 3, 4, 5, 6]
DEFAULT_MAX_CHARGE = len(CHARGES)
MAX_FRAG_CHARGE = 3
MAX_SEQUENCE = 30
MAX_ION = MAX_SEQUENCE - 1
ION_TYPES = ["y", "b"]
NLOSSES = ["", "H2O", "NH3"]
FORWARD = {"a", "b", "c"}
BACKWARD = {"x", "y", "z"}
# Amino acids
MODIFICATION = {
"CAM": 57.0214637236, # Carbamidomethylation (CAM)
"OX": 15.99491, # Oxidation
}
AMINO_ACID = {
"G": 57.021464,
"R": 156.101111,
"V": 99.068414,
"P": 97.052764,
"S": 87.032028,
"U": 150.95363,
"L": 113.084064,
"M": 131.040485,
"Q": 128.058578,
"N": 114.042927,
"Y": 163.063329,
"E": 129.042593,
"C": 103.009185 + MODIFICATION["CAM"],
"F": 147.068414,
"I": 113.084064,
"A": 71.037114,
"T": 101.047679,
"W": 186.079313,
"H": 137.058912,
"D": 115.026943,
"K": 128.094963,
}
AMINO_ACID["M(ox)"] = AMINO_ACID["M"] + MODIFICATION["OX"]
# Atomic elements
PROTON = 1.007276467
ELECTRON = 0.00054858
H = 1.007825035
C = 12.0
O = 15.99491463
N = 14.003074
# Tiny molecules
N_TERMINUS = H
C_TERMINUS = O + H
CO = C + O
CHO = C + H + O
NH2 = N + H * 2
H2O = H * 2 + O
NH3 = N + H * 3
NEUTRAL_LOSS = {"NH3": NH3, "H2O": H2O}
ION_OFFSET = {
"a": N_TERMINUS - CHO,
"b": N_TERMINUS - H,
"c": N_TERMINUS + NH2,
"x": C_TERMINUS + CO - H,
"y": C_TERMINUS + H,
"z": C_TERMINUS - NH2,
}
| StarcoderdataPython |
9729862 | """Testing the metrics developed to asses performance of a ride."""
# Authors: <NAME> <<EMAIL>>
# <NAME>
# License: MIT
import pytest
import pandas as pd
import numpy as np
from sksports.metrics import normalized_power_score
from sksports.metrics import intensity_factor_score
from sksports.metrics import training_stress_score
from sksports.metrics import training_load_score
from sksports.metrics import mpa2ftp
from sksports.metrics import ftp2mpa
mpa = 400.
ftp = 304.
ride = np.array([300.] * 200 + [0.] * 200 + [200.] * 200)
ride = pd.Series(ride,
index=pd.date_range('1/1/2011',
periods=ride.size,
freq='1S'),
name='power')
ride_2 = np.array([140.] * 20 + [220.] * 20 + [250.] * 20 + [310.] * 20 +
[350.] * 20 + [410.] * 20 + [800.] * 20)
ride_2 = pd.Series(ride_2,
index=pd.date_range('1/1/2011',
periods=ride_2.size,
freq='1S'),
name='power')
@pytest.mark.parametrize(
"score_func, params, expected_score",
[(normalized_power_score, (ride, mpa), 260.7611),
(intensity_factor_score, (ride, mpa), 0.857766),
(training_stress_score, (ride, mpa), 12.26273),
(training_load_score, (ride_2, mpa), 11.16666)]
)
def test_scores(score_func, params, expected_score):
assert score_func(*params) == pytest.approx(expected_score)
def test_convert_mpa_ftp():
assert mpa2ftp(ftp2mpa(ftp)) == pytest.approx(ftp)
| StarcoderdataPython |
92727 | <filename>inferelator_prior/motifs/fimo.py
import io
import subprocess
import pandas as pd
import numpy as np
import pandas.errors as pde
from inferelator_prior import FIMO_EXECUTABLE_PATH
from inferelator_prior.motifs import meme, chunk_motifs, SCAN_SCORE_COL, SCORE_PER_BASE
from inferelator_prior.motifs._motif import MotifScanner
FIMO_DATA_SUFFIX = ".fimo.tsv"
FIMO_MOTIF = 'motif_id'
FIMO_MOTIF_COMMON = 'motif_alt_id'
FIMO_CHROMOSOME = 'sequence_name'
FIMO_STRAND = 'strand'
FIMO_START = 'start'
FIMO_STOP = 'stop'
FIMO_SCORE = 'p-value'
FIMO_SEQUENCE = 'matched_sequence'
FIMO_COMMAND = [FIMO_EXECUTABLE_PATH, "--text"]
class FIMOScanner(MotifScanner):
scanner_name = "FIMO"
def _preprocess(self, min_ic=None):
if self.motif_file is not None:
self.motifs = meme.read(self.motif_file)
return chunk_motifs(meme, self.motifs, num_workers=self.num_workers, min_ic=min_ic)
def _postprocess(self, motif_peaks):
if motif_peaks is not None:
motif_peaks = motif_peaks.drop_duplicates(subset=[FIMO_MOTIF, FIMO_START, FIMO_STOP, FIMO_CHROMOSOME,
FIMO_STRAND])
return motif_peaks
def _get_motifs(self, fasta_file, motif_file, threshold=None, parse_genomic_coord=True):
fimo_command = FIMO_COMMAND + ["--parse-genomic-coord"] if parse_genomic_coord else FIMO_COMMAND
if threshold is None:
fimo_command = fimo_command + [motif_file, fasta_file]
else:
fimo_command = fimo_command + ["--thresh", str(threshold)] + [motif_file, fasta_file]
proc = subprocess.run(fimo_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if int(proc.returncode) != 0:
print(proc.stderr.decode("utf-8"))
print("fimo motif scan failed for {meme}, {fa} ({cmd})".format(meme=motif_file,
fa=fasta_file,
cmd=" ".join(fimo_command)))
return self._parse_output(io.StringIO(proc.stdout.decode("utf-8")))
def _parse_output(self, output_handle):
try:
motifs = pd.read_csv(output_handle, sep="\t", index_col=None)
motifs.dropna(subset=[FIMO_START, FIMO_STOP], inplace=True, how='any')
motifs[FIMO_START], motifs[FIMO_STOP] = motifs[FIMO_START].astype(int), motifs[FIMO_STOP].astype(int)
if "#pattern name" in motifs.columns:
raise RuntimeError("FIMO version not supported; update to 5.0.5")
motifs[SCAN_SCORE_COL] = [self.motifs[x].score_match(y) for x, y in
zip(motifs[FIMO_MOTIF], motifs[FIMO_SEQUENCE])]
motifs[SCORE_PER_BASE] = [np.array(self.motifs[x]._info_match(y)) for x, y in
zip(motifs[FIMO_MOTIF], motifs[FIMO_SEQUENCE])]
return motifs
except pde.EmptyDataError:
return None
| StarcoderdataPython |
3485462 | # coding: utf-8
"""
VNS3 Controller API
Cohesive networks VNS3 API providing complete control of your network's addresses, routes, rules and edge # noqa: E501
The version of the OpenAPI document: 4.8
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
from cohesivenet.api_builder import VersionRouter
def get_license(api_client, **kwargs): # noqa: E501
"""get_license # noqa: E501
Get license details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.get_license(async_req=True)
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/license",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def put_license_upgrade(api_client, body=None, **kwargs): # noqa: E501
"""put_license_upgrade # noqa: E501
Upload new license to controller # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.put_license_upgrade(body, async_req=True)
:param async_req bool: execute request asynchronously
:param body str: License file (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = body
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501
["text/plain"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/license/upgrade",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def put_set_license_parameters(
api_client,
subnet=None,
managers=None,
asns=None,
clients=None,
my_manager_vip=None,
default=None,
**kwargs
): # noqa: E501
"""put_set_license_parameters # noqa: E501
Set and accept license parameters. Triggers reboot. Irreversible operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.put_set_license_parameters(client, default=True, async_req=True)
:param subnet str:
:param managers str:
:param asns str: space delimited ASNs
:param clients str:
:param my_manager_vip str:
:param default bool: use default topology
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = [
"subnet",
"managers",
"asns",
"clients",
"my_manager_vip",
"default",
]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = {}
for param in [p for p in request_params if local_var_params.get(p) is not None]:
body_params[param] = local_var_params[param]
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/license/parameters",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def upload_license(api_client, body=None, **kwargs): # noqa: E501
"""upload_license # noqa: E501
License a VNS3 Controller to be a part of a specific topology. Must not be licensed already. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.upload_license(client, body, async_req=True)
:param body str: License file (required)
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = body
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501
["text/plain"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/license",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
class LicensingApiRouter(VersionRouter):
function_library = {
"get_license": {"4.8.4-5.1.5": get_license},
"put_license_upgrade": {"4.8.4-5.1.5": put_license_upgrade},
"put_set_license_parameters": {"4.8.4-5.1.5": put_set_license_parameters},
"upload_license": {"4.8.4-5.1.5": upload_license},
}
| StarcoderdataPython |
8041781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: kangliang
date: 2019-12-25
"""
import configparser
from poseidon.core.output import *
def get_ini_info(file, section, key):
config = configparser.ConfigParser()
config.read(file, encoding="utf-8")
_value = config.get(section, key)
return _value
def update_ini_info(file, section, key, value):
try:
config = configparser.ConfigParser()
config.read(file, encoding="utf-8")
config.set(section, key, value)
config.write(open(file, "w"))
except Exception as e:
err(e)
| StarcoderdataPython |
6562862 | <reponame>doanthevu1910/ams-real-estate
import matplotlib.pyplot as plt
y = rf.feature_importances_
list_y = [a for a in y if a > 0.005]
print(list_y)
list_of_index = []
for i in list_y:
a = np.where(y==i)
list_of_index.append(a)
print(list_of_index)
list_of_index = [0, 1]
col = []
for i in feature_list:
col.append(i)
labels = []
for i in list_of_index:
b = col[i]
labels.append(b)
y = list_y
fig, ax = plt.subplots()
width = 0.8
ind = np.arange(len(y))
ax.barh(ind, y,width, color="orange")
ax.set_yticks(ind+width/10)
ax.set_yticklabels(labels, minor=False)
plt.title('Feature importance in Random Forest Regression')
plt.xlabel('Relative importance')
plt.ylabel('Feature')
plt.figure(figsize=(10,8.5))
fig.set_size_inches(10, 8.5, forward=True) | StarcoderdataPython |
4926691 | <gh_stars>0
###############################################
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
> File Name: urls.py
> Author: lixuebin
> Mail: <EMAIL>
> Created Time: 2019年08月04日 星期日 07时17分51秒
"""
################################################
from django.urls import path, re_path
from . import views, user, group, power
#from views import UserView, UserDetailView, RoleView
app_name = "users"
urlpatterns = [
path('', views.IndexView.as_view(), name = 'index'),
path('login/', views.LoginViews.as_view(), name = 'login'),
path('logout/', views.LogoutView.as_view(), name = 'logout'),
path('group/', group.RoleView.as_view(), name = "role_list"),
path('groupUser/', group.RoleView.as_view(), name = "role_user"),
re_path('groupdetail/(?P<pk>[0-9]+)?/$', group.RoleDetailView.as_view(), name = "role_detail"),
path('user/', user.UserView.as_view(), name = "user_list"),
re_path('userdetail/(?P<pk>[0-9]+)?/$', user.UserDetailView.as_view(), name= 'user_detail')
]
| StarcoderdataPython |
1725752 | # SPDX-License-Identifier: MIT
import sys, json, os, re, requests
import nvd, scancode
from glob import glob
from os import path, remove
from subprocess import call
from collections import OrderedDict
from shutil import rmtree
def main(argv):
if len(argv) <= 1:
print("Usage: ./riskmetrics.py <github-repo-url>")
exit(2)
repo_url = argv[1]
repo_info = re.search(r'github\.com/([^/]+)/([^/]+)', repo_url)
if repo_info is None:
print("Unable to parse url: %s" % repo_url)
exit(1)
owner = repo_info.group(1)
repo = repo_info.group(2)
version = get_latest_version(owner, repo)
cpe1 = construct_cpe(owner, repo, version)
cpe2 = construct_cpe(repo, repo, version)
nvd.update()
vulnerable = nvd.search(cpe1)
if vulnerable is False:
vulnerable = nvd.search(cpe2)
download_repo(owner, repo)
repo_dir = glob(owner + '-' + repo + '*')[0]
scancode.install()
scancode.scan(repo_dir)
rmtree(repo_dir)
data_json = json.loads('{"results" : "none"}')
parent_dir = path.dirname(path.realpath(__file__))
directory = str(path.dirname(parent_dir))
print (directory)
for subdir, dirs, files in os.walk(directory):
for file in files:
if file.endswith('info.json'):
with open(path.join(subdir, file),'r') as f:
data_json = json.load(f)
all_licenses = data_json['results']
if all_licenses == "none":
print ('Failed to scan package. Exiting.')
exit(1)
count_empty_file = 0
for element in all_licenses:
if (element['licenses'] == []):
count_empty_file = count_empty_file + 1
count_file_with_license = data_json['resource_count'] - count_empty_file
list_license = []
for _ in item_generator(data_json,"short_name"):
list_license.append(_)
list_license.sort()
data = {
"Number of files with a license" : count_file_with_license,
"Number of files without a license" : count_empty_file,
"Types of licenses" : list(OrderedDict.fromkeys(list_license)),
"CPE located in NVD" : vulnerable
}
metrics = json.dumps(data, indent = 2)
remove('info.json')
display(metrics)
def item_generator(json_input, lookup_key):
"""
Find key item in nested dictionary.
Args:
json_input (dict): Dictionary in which to find key.
lookup_key (str): The key to look for in the dictionary.
"""
if isinstance(json_input, dict):
for k, v in json_input.iteritems():
if k == lookup_key:
yield v
else:
for child_val in item_generator(v, lookup_key):
yield child_val
elif isinstance(json_input, list):
for item in json_input:
for item_val in item_generator(item, lookup_key):
yield item_val
def download_repo(owner, repo):
"""
Download the specified github repo.
Args:
owner (str): The owner of the repo.
repo (str): The name of the repo.
"""
r = requests.get("https://api.github.com/repos/" +
owner + "/" + repo + "/tarball")
tarball = repo + '.tar.gz'
with open(tarball, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
call(['tar', '-xzvf', tarball])
remove(tarball)
def get_latest_version(owner, repo):
"""
Find the version of the repo's latest published release.
Args:
owner (str): The owner of the repo.
repo (str): The name of the repo.
Returns:
str: The version of the latest published release of the repo, or the
empty string if no version is found.
"""
response = requests.get("https://api.github.com/repos/" +
owner + "/" + repo + "/releases/latest")
try:
release = response.json()
found = re.search(r'([\d.]+)', release['tag_name'])
if found is None:
found = re.search(r'([\d.]+)', release['name'])
if found is None:
print("Unable to find version of latest release...")
version = ""
version = found.group(1)
except:
print("Unable to retrieve latest published release...")
version = ""
return version
def construct_cpe(vendor, product, version):
"""
Construct a Common Platform Enumeration (CPE) for a given software.
Args:
vendor (str): The vendor name of the software.
product (str): The product name of the software.
version (str): The software version.
Returns:
str: The constructed CPE.
"""
return 'cpe:/a:' + vendor + ':' + product + ':' + version
def display(metrics):
"""
Display the risk metrics created on the repo to stdout.
Args:
metrics (dict): The risk metrics on the repo.
"""
print ('\nResults:\n')
print (metrics)
print ('\nNotes:\n')
print ('* A Common Platform Enumeration (CPE) in the National Vulnerability Database (NVD)')
print (' is not necessarily indicitive of a vulnerable project, nor is a CPE not in the NVD')
print (' indicitive of a secure project.\n')
print ('* License information gathered using the scancode-toolkit-1.6.0 from nexB.\n')
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
3520237 | import random
import math
import sqlite3
import pytest
import genomicsqlite
def test_twobit_random():
con = genomicsqlite.connect(":memory:")
random.seed(42)
for seqlen in (random.randint(2, 1250) for _ in range(5000)):
rna = random.choice((False, True))
nucs = (
("a", "A", "g", "G", "c", "C", "u", "U")
if rna
else ("a", "A", "g", "G", "c", "C", "t", "T")
)
seq = "".join(random.choice(nucs) for _ in range(seqlen))
assert (rna and ("t" not in seq and "T" not in seq)) or (
not rna and ("u" not in seq and "U" not in seq)
)
crumbs = next(con.execute("SELECT nucleotides_twobit(?)", (seq,)))[0]
assert isinstance(crumbs, bytes)
assert len(crumbs) == math.ceil(len(seq) / 4) + 1
assert next(con.execute("SELECT twobit_length(nucleotides_twobit(?))", (seq,)))[0] == len(
seq
)
query = f"SELECT {'twobit_rna' if rna else 'twobit_dna'}(nucleotides_twobit(?))"
decoded = next(con.execute(query, (seq,)))[0]
assert decoded == seq.upper()
# test built-in substr
sub_ofs = random.randint(0, seqlen)
sub_len = random.randint(0, seqlen)
decoded = next(con.execute("SELECT twobit_dna(?,?,?)", (seq, sub_ofs, sub_len)))[0]
control = next(con.execute("SELECT substr(twobit_dna(?),?,?)", (seq, sub_ofs, sub_len)))[0]
assert decoded == control
# test with negative offset/length -- https://sqlite.org/lang_corefunc.html#substr
decoded = next(con.execute("SELECT twobit_dna(?,?,?)", (seq, 0 - sub_ofs, sub_len)))[0]
control = next(
con.execute("SELECT substr(twobit_dna(?),?,?)", (seq, 0 - sub_ofs, sub_len))
)[0]
assert decoded == control
decoded = next(con.execute("SELECT twobit_dna(?,?,?)", (seq, sub_ofs, 0 - sub_len)))[0]
control = next(
con.execute("SELECT substr(twobit_dna(?),?,?)", (seq, sub_ofs, 0 - sub_len))
)[0]
assert decoded == control
def test_twobit_corner_cases():
con = genomicsqlite.connect(":memory:")
for nuc in "AGCTagct":
assert next(con.execute("SELECT length(nucleotides_twobit(?))", (nuc,)))[0] == 1
assert (
next(con.execute("SELECT twobit_dna(nucleotides_twobit(?))", (nuc,)))[0] == nuc.upper()
)
assert next(con.execute("SELECT nucleotides_twobit('')"))[0] == ""
assert next(con.execute("SELECT nucleotides_twobit('acgt 1')"))[0] == "acgt 1"
assert next(con.execute("SELECT twobit_dna('acgt 1')"))[0] == "acgt 1"
assert next(con.execute("SELECT twobit_dna('acgt 1',1,6)"))[0] == "acgt 1"
assert next(con.execute("SELECT twobit_dna('acgt 1',3,3)"))[0] == "gt "
assert next(con.execute("SELECT twobit_dna('acgt 1',-2,-3)"))[0] == "cgt"
# exhaustively test offset/length corner cases
for xtest in range(-9, 9):
for ytest in range(-9, 9):
decoded = next(
con.execute("SELECT twobit_rna(nucleotides_twobit('gattaca'),?,?)", (xtest, ytest))
)[0]
control = next(con.execute("SELECT substr('GAUUACA',?,?)", (xtest, ytest)))[0]
assert decoded == control, str((xtest, ytest))
def test_twobit_column():
# test populating a column with mixed BLOB and TEXT values
con = genomicsqlite.connect(":memory:")
con.executescript("CREATE TABLE test(test_twobit BLOB)")
for elt in list("Tu") + ["foo", "bar", "gATuaCa"]:
con.execute("INSERT INTO test(test_twobit) VALUES(nucleotides_twobit(?))", (elt,))
column = list(con.execute("SELECT test_twobit FROM test"))
assert isinstance(column[0][0], bytes), str([type(x[0]) for x in column])
assert isinstance(column[-1][0], bytes)
assert isinstance(column[-2][0], str)
assert column[-2][0] == "bar"
assert list(con.execute("SELECT twobit_dna(test_twobit) FROM test")) == [
("T",),
("T",),
("foo",),
("bar",),
("GATTACA",),
]
def test_dna_revcomp():
con = genomicsqlite.connect(":memory:")
assert next(con.execute("SELECT dna_revcomp('AGCTagct')"))[0] == "agctAGCT"
assert next(con.execute("SELECT dna_revcomp('gATtaCa')"))[0] == "tGtaATc"
assert next(con.execute("SELECT dna_revcomp('')"))[0] == ""
assert next(con.execute("SELECT dna_revcomp(NULL)"))[0] is None
with pytest.raises(sqlite3.OperationalError):
con.execute("SELECT dna_revcomp('GATTACAb')")
with pytest.raises(sqlite3.OperationalError):
con.execute("SELECT dna_revcomp('GATTACA ')")
with pytest.raises(sqlite3.OperationalError):
con.execute("SELECT dna_revcomp(42)")
| StarcoderdataPython |
8157288 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: 1.0
@author: li
@file: cnn_model.py
@time: 2020/1/8 2:51 下午
"""
import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class TCNNConfig(object):
"""CNN配置参数"""
embedding_dim = 64 # 词向量维度
seq_length = 600 # 序列长度
num_classes = 10 # 类别数
num_filters = 256 # 卷积核数目
kernel_size = 5 # 卷积核尺寸
kernel_size_list = [3, 4, 5]
vocab_size = 5000 # 词汇表达小
hidden_dim = 128 # 全连接层神经元
dropout_keep_prob = 0.5 # dropout保留比例
learning_rate = 1e-3 # 学习率
batch_size = 64 # 每批训练大小
num_epochs = 10 # 总迭代轮次
print_per_batch = 100 # 每多少轮输出一次结果
save_per_batch = 10 # 每多少轮存入tensorboard
class TextCNN(object):
"""文本分类,CNN模型"""
def __init__(self, config):
self.config = config
self._build_graph()
def _build_graph(self):
"""CNN模型"""
with tf.variable_scope("Input_data"):
# 三个待输入的数据
self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes], name='input_y')
self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
# 词向量映射
with tf.device('/cpu:0'), tf.name_scope('Embedding'):
embedding = tf.get_variable('embedding', [self.config.vocab_size, self.config.embedding_dim])
embedding_inputs = tf.nn.embedding_lookup(embedding, self.input_x)
self.embedding_inputs_expanded = tf.expand_dims(embedding_inputs, -1)
# with tf.name_scope("CNN"):
# # CNN layer
# conv = tf.layers.conv1d(embedding_inputs, self.config.num_filters, self.config.kernel_size, name='conv')
# # global max pooling layer
# gmp = tf.reduce_max(conv, reduction_indices=[1], name='gmp')
with tf.name_scope('CNN'):
pooled_outputs = []
for i, filter_size in enumerate(self.config.kernel_size_list):
with tf.name_scope("conv-maxpool-%s" % filter_size):
filter_shape = [filter_size, self.config.embedding_dim, 1, self.config.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[self.config.num_filters]), name="b")
conv = tf.nn.conv2d(self.embedding_inputs_expanded, W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
pooled = tf.nn.max_pool(h, ksize=[1, self.config.seq_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
num_filter_total = self.config.num_filters * len(self.config.kernel_size_list)
self.h_pool = tf.concat(pooled_outputs, 3)
gmp = tf.reshape(self.h_pool, [-1, num_filter_total])
with tf.name_scope("Score"):
# 全连接层,后面接dropout以及relu激活
fc = tf.layers.dense(gmp, self.config.hidden_dim, name='fc1')
fc = tf.contrib.layers.dropout(fc, self.dropout_keep_prob)
fc = tf.nn.relu(fc)
# 分类器
self.logits = tf.layers.dense(fc, self.config.num_classes, name='fc2')
self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1) # 预测类别
with tf.name_scope("Optimize"):
# 损失函数,交叉熵
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(cross_entropy)
# 优化器
self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
with tf.name_scope("Accuracy"):
# 准确率
correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)
self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
| StarcoderdataPython |
1640359 | <reponame>stevebob/camkes-cli<gh_stars>0
import multiprocessing
import toml
from . import common
def make_subparser(subparsers):
parser = subparsers.add_parser('init', description="Initialize an existing project")
parser.add_argument('--jobs', type=int, help="Number of threads to use when downloading code",
default=multiprocessing.cpu_count())
parser.set_defaults(func=handle_init)
def handle_init(args):
with open(common.markup_path()) as info_file:
info = toml.load(info_file)
args.logger.info("Downloading dependencies...")
common.get_code(".", info["manifest_url"], info["manifest_name"], args.jobs)
args.logger.info("Instantiating build templates...")
common.instantiate_build_templates(".", info)
args.logger.info("Creating build system symlinks...")
common.make_symlinks(".", info)
| StarcoderdataPython |
6659308 | <gh_stars>0
"""
given an encoded relation, and an example of this relation (or not), can we classify
it correctly?
means, no need for any nlp bits, or REINFORCE.
let's us test various ways of using the encoded relation to classify the image
a few ways we can try:
- concatenate the encoding with that of the image
- distance of encoding from image encoding (eg "dot product is positive or negative?" perhaps?)
- attention over output feature planes
- attention over filter planes at each layer
- mapping from encoded relation to the filter weights of each layer :P
(this means we wont backprop so much onto the weights, as through the weights)
"""
import time
import json
import torch
from torch import nn, optim
from ulfs.stats import Stats
from ulfs.runner_base_v1 import RunnerBase
from ulfs import utils
from ref_task import params_groups
from ref_task.models import receiver_model as receiver_model_lib
from ref_task import dataset_family_lib
class Runner(RunnerBase):
def __init__(self):
super().__init__(
save_as_statedict_keys=['model', 'opt'],
step_key='episode'
)
def setup(self, p):
params_groups.process_args(args=p)
print('p', self.params)
print('')
print('ref', p.ref)
print('')
self.dataset = dataset_family_lib.build_family(**utils.filter_dict_by_prefix(p.__dict__, prefix='ds_'))
self.ds_meta = self.dataset.meta
print('ds_meta', json.dumps(self.ds_meta.__dict__, indent=2))
self.vocab_size = self.ds_meta.vocab_size
self.grid_planes = self.ds_meta.grid_planes
p.grid_size = self.ds_meta.grid_size
p.max_colors = self.ds_meta.num_colors
p.max_shapes = self.ds_meta.num_shapes
self.in_seq_len = self.ds_meta.utt_len
print('in seq len', self.in_seq_len)
print('params', self.params)
print('vocab_size', self.vocab_size)
self.model = receiver_model_lib.build_receiver_model(
params=p,
ds_meta=self.ds_meta,
vocab_size=self.ds_meta.vocab_size,
utt_len=self.ds_meta.utt_len
)
if p.enable_cuda:
self.model = self.model.cuda()
print('receiver model', self.model)
self.opt = optim.Adam(lr=0.001, params=self.model.parameters())
self.crit = nn.CrossEntropyLoss(reduction='none')
self.stats = Stats([
'episodes_count',
'loss_sum',
'acc_sum'
])
def step(self, p):
episode = self.episode
render = self.should_render()
def forward(split_name: str, render=False):
batch_size = p.batch_size
b = self.dataset.sample(batch_size=batch_size, split_name=split_name, no_sender=True)
receiver_examples_t, labels_t, hypotheses_t = map(b.__getitem__, [
'inner_test_examples_t', 'inner_test_labels_t', 'hypotheses_t'
])
M, N = hypotheses_t.size()
batch_size = N
if p.enable_cuda:
labels_t = labels_t.cuda()
receiver_examples_t = receiver_examples_t.cuda()
hypotheses_t = hypotheses_t.cuda()
labels_t = labels_t.long()
M, N = hypotheses_t.size()
relations_onehot_t = torch.zeros(M, N, self.vocab_size, device=hypotheses_t.device)
relations_onehot_t.scatter_(-1, hypotheses_t.view(M, N, 1), 1.0)
out = self.model(relations_onehot_t, receiver_examples_t)
_, pred = out.max(-1)
correct = pred.view(-1) == labels_t.view(-1)
acc = correct.long().sum().item() / labels_t.numel()
crit_loss = self.crit(out.view(-1, out.size(-1)), labels_t.view(-1))
loss = crit_loss.mean()
return labels_t, receiver_examples_t, pred, correct, acc, loss
self.model.train()
labels_t, receiver_examples_t, pred, correct, acc, loss = forward(
split_name='train')
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.stats.loss_sum += loss.item()
self.stats.acc_sum += acc
self.stats.episodes_count += 1
terminate_reason = ''
if p.max_steps is not None and p.max_steps > 0 and episode >= p.max_steps:
print(f'reached max steps {p.max_steps} => terminating')
self.finish = True
terminate_reason = 'max_steps'
if p.max_mins is not None and time.time() - self.start_time >= p.max_mins * 60:
print('reached terminate time => terminating')
self.finish = True
terminate_reason = 'timeout'
if render or self.finish:
stats = self.stats
log_dict = {
'loss': stats.loss_sum / stats.episodes_count,
'acc': stats.acc_sum / stats.episodes_count,
'eps': stats.episodes_count / (time.time() - self.last_print)
}
format_str = (
'e={episode} '
'eps={eps:.1f} '
'loss={loss:.3f} '
'acc={acc:.3f} '
)
for split_name in ['val_same', 'val_new', 'test_same', 'test_new']:
eval_acc_l = []
for it in range(1):
self.model.eval()
labels_t, receiver_examples_t, pred, correct, eval_acc, eval_loss = forward(
split_name=split_name, render=(it == 0))
eval_acc_l.append(eval_acc)
log_dict[f'{split_name}_acc'] = sum(eval_acc_l) / len(eval_acc_l)
format_str += f'{split_name}_acc={{{split_name}_acc:.3f}}'
self.print_and_log(
log_dict,
format_str
)
stats.reset()
if self.finish:
print('log_dict', log_dict)
self.res = dict(log_dict)
self.res.update({
'batch': episode,
'elapsed': (time.time() - self.start_time),
'terminate_reason': terminate_reason
})
print('self.res', self.res)
if __name__ == '__main__':
runner = Runner()
runner.add_param('--crit-loss-rewards', action='store_true')
runner.add_param('--max-steps', type=int, default=0)
runner.add_param('--max-mins', type=float, help='finish running if reach this elapsed time')
params_groups.add_ds_args(runner)
params_groups.add_common_args(runner)
params_groups.add_receiver_args(runner)
params_groups.add_conv_args(runner)
runner.parse_args()
runner.setup_base()
runner.run_base()
| StarcoderdataPython |
6416734 | # Django Imports
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.template.context_processors import csrf
from django.urls import reverse
# HTK Imports
from htk.apps.forums.forms import MessageCreationForm
from htk.apps.forums.forms import ThreadCreationForm
from htk.apps.forums.helpers import wrap_data_forum
from htk.apps.forums.models import Forum
from htk.apps.forums.models import ForumThread
from htk.view_helpers import render_custom as _r
@login_required
def index(request):
data = wrap_data_forum(request)
site = data['site']
forums = site.forums.all()
data['forums'] = forums
response = _r(request, 'forum/index.html', data)
return response
@login_required
def forum(request, fid):
data = wrap_data_forum(request)
forum = get_object_or_404(Forum, id=fid)
data.update(csrf(request))
data['forum'] = forum
thread_creation_form = ThreadCreationForm()
data['thread_creation_form'] = thread_creation_form
data['threads'] = forum.threads.order_by('sticky', '-updated')
response = _r(request, 'forum/forum.html', data)
return response
@login_required
def thread(request, tid=None):
thread = get_object_or_404(ForumThread, id=tid)
data = wrap_data_forum(request)
data.update(csrf(request))
message_creation_form = MessageCreationForm()
data['message_creation_form'] = message_creation_form
data['thread'] = thread
response = _r(request, 'forum/thread.html', data)
return response
@login_required
def thread_create(request, fid=None):
forum = get_object_or_404(Forum, id=fid)
data = wrap_data_forum(request)
user = data['user']
data.update(csrf(request))
data['forum'] = forum
success = False
if request.method == 'POST':
thread_creation_form = ThreadCreationForm(request.POST)
if thread_creation_form.is_valid():
thread = thread_creation_form.save(author=user, forum=forum)
success = True
else:
for error in thread_creation_form.non_field_errors():
data['errors'].append(error)
else:
thread_creation_form = ThreadCreationForm(None)
if success:
response = redirect(reverse('forum_thread', args=(thread.id,)))
else:
data['forms'].append(thread_creation_form)
data['thread_creation_form'] = thread_creation_form
response = _r(request, 'forum/thread_create.html', data)
return response
@login_required
def message_create(request, tid=None):
thread = get_object_or_404(ForumThread, id=tid)
data = wrap_data_forum(request)
data['thread'] = thread
user = data['user']
data.update(csrf(request))
success = False
if request.method == 'POST':
message_creation_form = MessageCreationForm(request.POST)
if message_creation_form.is_valid():
message = message_creation_form.save(author=user, thread=thread)
success = True
else:
for error in auth_form.non_field_errors():
data['errors'].append(error)
else:
message_creation_form = MessageCreationForm(None)
if success:
response = redirect(reverse('forum_thread', args=(thread.id,)))
else:
data['message_creation_form'] = message_creation_form
response = _r(request, 'forum/message_create.html', data)
return response
| StarcoderdataPython |
205550 | <gh_stars>1-10
from os.path import dirname, join
from setuptools import find_packages, setup
def read_file(file):
with open(file, "rt") as f:
return f.read()
with open(join(dirname(__file__), 'transformer_pytorch/VERSION.txt'), 'rb') as f:
version = f.read().decode('ascii').strip()
setup(
name='transformer_pytorch',
version=version,
description='transformer in pytorch',
packages=find_packages(exclude=[]),
author='allen',
author_email='<EMAIL>',
license='Apache License v2',
package_data={'': ['*.*']},
url='https://github.com/walkacross/transformer-pytorch',
install_requires=read_file("requirements.txt").strip(),
zip_safe=False,
classifiers=[
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| StarcoderdataPython |
1937217 | """ResNet with Deconvolution layers for CenterNet object detection."""
# pylint: disable=unused-argument
from __future__ import absolute_import
import warnings
import math
import torch
from torch import nn
from torchvision import models
from ..model_zoo import get_model
__all__ = ['DeconvResnet', 'get_deconv_resnet',
'resnet18_v1b_deconv', 'resnet18_v1b_deconv_dcnv2',
'resnet50_v1b_deconv', 'resnet50_v1b_deconv_dcnv2',
'resnet101_v1b_deconv', 'resnet101_v1b_deconv_dcnv2']
class DeconvResnet(nn.Module):
"""Deconvolutional ResNet.
base_network : str
Name of the base feature extraction network.
deconv_filters : list of int
Number of filters for deconv layers.
deconv_kernels : list of int
Kernel sizes for deconv layers.
pretrained_base : bool
Whether load pretrained base network.
norm_layer : torch.nn.Module
Type of Norm layers, can be BatchNorm2d, SyncBatchNorm, GroupNorm, etc.
norm_kwargs : dict
Additional kwargs for `norm_layer`.
use_dcnv2 : bool
If true, will use DCNv2 layers in upsampling blocks
"""
def __init__(self, base_network='resnet18_v1b', deconv_filters=(256, 128, 64),
deconv_kernels=(4, 4, 4), pretrained_base=True, norm_layer=nn.BatchNorm2d,
norm_kwargs=None, use_dcnv2=False, **kwargs):
super(DeconvResnet, self).__init__(**kwargs)
assert 'resnet' in base_network
net = get_model(base_network, pretrained=pretrained_base)
# net = models.resnet18(pretrained=pretrained_base)
self._norm_layer = norm_layer
self._norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
self._use_dconv2 = use_dcnv2
if 'v1b' in base_network:
feat = nn.Sequential(
net.conv1,
net.bn1,
net.relu,
net.maxpool,
net.layer1,
net.layer2,
net.layer3,
net.layer4
)
self.base_network = feat
else:
raise NotImplementedError
self.deconv = self._make_deconv_layer(deconv_filters, deconv_kernels)
def _get_deconv_cfg(self, deconv_kernel):
"""Get the deconv configs using presets"""
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
else:
raise ValueError('Unsupported deconvolution kernel: {}'.format(deconv_kernel))
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_filters, num_kernels):
# pylint: disable=unused-variable
"""Make deconv layers using the configs"""
assert len(num_kernels) == len(num_filters), \
'Deconv filters and kernels number mismatch: {} vs. {}'.format(
len(num_filters), len(num_kernels))
layers = nn.Sequential()
in_planes = self.base_network(torch.zeros((1, 3, 256, 256))).size(1)
for i, (planes, k) in enumerate(zip(num_filters, num_kernels)):
kernel, padding, output_padding = self._get_deconv_cfg(k)
if self._use_dconv2:
raise NotImplementedError
else:
conv = nn.Conv2d(in_planes, planes, 1, stride=1, padding=1)
deconv = nn.ConvTranspose2d(planes, planes, kernel, stride=2, padding=padding,
output_padding=output_padding, bias=False)
# TODO BilinearUpSample() for deconv
upsample = nn.Sequential(
conv,
self._norm_layer(planes, momentum=0.9, **self._norm_kwargs),
nn.ReLU(),
deconv,
self._norm_layer(planes, momentum=0.9, **self._norm_kwargs),
nn.ReLU()
)
layers.add_module('upsample_stage{}'.format(i), upsample)
in_planes = planes
return layers
def forward(self, x):
# pylint: disable=arguments-differ
out = self.base_network(x)
# print(out.size())
# for layer in self.deconv:
# out = layer(out)
# print(out.size())
out = self.deconv(out)
return out
def get_deconv_resnet(base_network, pretrained=False, device=torch.device('cpu'), use_dcnv2=False, **kwargs):
"""Get resnet with deconv layers.
Parameters
----------
base_network : str
Name of the base feature extraction network.
pretrained : bool
Whether load pretrained base network.
device : torch.Device
torch.device('cpu') or torch.device('cuda')
use_dcnv2 : bool
If true, will use DCNv2 layers in upsampling blocks
pretrained : type
Description of parameter `pretrained`.
Returns
-------
get_deconv_resnet(base_network, pretrained=False,
Description of returned object.
"""
net = DeconvResnet(base_network=base_network, pretrained_base=pretrained,
use_dcnv2=use_dcnv2, **kwargs)
net.to(device)
return net
def resnet18_v1b_deconv(**kwargs):
"""Resnet18 v1b model with deconv layers.
Returns
-------
torch.nn.Module
A Resnet18 v1b model with deconv layers.
"""
kwargs['use_dcnv2'] = False
return get_deconv_resnet('resnet18_v1b', **kwargs) | StarcoderdataPython |
4825058 | from minecraft.networking.packets import Packet
from minecraft.networking.types import (
Double, Float, Byte, VarInt, BitFieldEnum, PositionAndLook
)
class PlayerPositionAndLookPacket(Packet, BitFieldEnum):
@staticmethod
def get_id(context):
return 0x32 if context.protocol_version >= 389 else \
0x31 if context.protocol_version >= 352 else \
0x30 if context.protocol_version >= 345 else \
0x2F if context.protocol_version >= 336 else \
0x2E if context.protocol_version >= 332 else \
0x2F if context.protocol_version >= 318 else \
0x2E if context.protocol_version >= 70 else \
0x08
packet_name = "player position and look"
get_definition = staticmethod(lambda context: [
{'x': Double},
{'y': Double},
{'z': Double},
{'yaw': Float},
{'pitch': Float},
{'flags': Byte},
{'teleport_id': VarInt} if context.protocol_version >= 107 else {},
])
field_enum = classmethod(
lambda cls, field: cls if field == 'flags' else None)
FLAG_REL_X = 0x01
FLAG_REL_Y = 0x02
FLAG_REL_Z = 0x04
FLAG_REL_YAW = 0x08
FLAG_REL_PITCH = 0x10
# This alias is retained for backward compatibility.
PositionAndLook = PositionAndLook
# Update a PositionAndLook instance using this packet.
def apply(self, target):
# pylint: disable=no-member
if self.flags & self.FLAG_REL_X:
target.x += self.x
else:
target.x = self.x
if self.flags & self.FLAG_REL_Y:
target.y += self.y
else:
target.y = self.y
if self.flags & self.FLAG_REL_Z:
target.z += self.z
else:
target.z = self.z
if self.flags & self.FLAG_REL_YAW:
target.yaw += self.yaw
else:
target.yaw = self.yaw
if self.flags & self.FLAG_REL_PITCH:
target.pitch += self.pitch
else:
target.pitch = self.pitch
target.yaw %= 360
target.pitch %= 360
| StarcoderdataPython |
3546176 | import sys
import numpy as np
from os.path import join as opj
from brainiak.searchlight.searchlight import Searchlight
from nilearn.image import load_img
from scipy.stats import pearsonr
from searchlight_config import config
# voxel function for searchlight
def sfn(l, msk, sl_rad, bcast_var):
video_corrs, diag_mask = bcast_var
sl_activity = l[0][msk, :].T
sl_corrs = np.corrcoef(sl_activity)[diag_mask]
return pearsonr(sl_corrs, video_corrs)[0]
def kth_diag_indices(arr, k):
row_ix, col_ix = np.diag_indices_from(arr)
return row_ix[:-k], col_ix[k:]
subid, perm = int(sys.argv[1]), int(sys.argv[2])
input_dir = opj(config['datadir'], 'inputs')
traj_path = opj(input_dir, 'models_t100_v50_r10.npy')
scan_path = opj(input_dir, 'fMRI', f'sherlock_movie_s{subid}.nii.gz')
results_dir = opj(config['datadir'], 'outputs', 'searchlight_video')
# load video model
video_model = np.load(traj_path, allow_pickle=True)[0]
# load fMRI data, create mask
scan_data = load_img(scan_path).get_data()
mask = (scan_data != 0).all(axis=3)
try:
# ensure random shift is consistent across participants
np.random.seed(perm)
shift = np.random.randint(1, video_model.shape[0] - 1)
result_path = opj(results_dir, 'perms', f'sub{subid}_perm{perm}_shift{shift}.npy')
except ValueError:
# run searchlight on unaltered data (perm == -1)
shift = 0
result_path = opj(results_dir, f'sub{subid}.npy')
# shift recall model timeseries
shifted = np.roll(video_model, shift=shift, axis=0)
# subject 5 has some missing TRs at the end and was padded to length of other
# subjects. Truncate fMRI data and topic trajectory to exclude filler data
if subid == 5:
shifted = shifted[:1925, :]
scan_data = scan_data[:, :, :, :1925]
# compute shifted recall correlation matrix
shifted_corrmat = np.corrcoef(shifted)
# isolate off-diagonal values with video model temporal correlations > 0
# this was precomputed to save permutation runtime with:
# for k in range(1976):
# d = np.diag(np.corrcoef(video_model), k=k)
# if ~(d > 0).any():
# DIAG_LIMIT = k
# break
DIAG_LIMIT = 238
diag_mask = np.zeros_like(shifted_corrmat, dtype=bool)
for k in range(1, DIAG_LIMIT):
ix = kth_diag_indices(diag_mask, k)
diag_mask[ix] = True
video_corrs = shifted_corrmat[diag_mask]
to_broadcast = (video_corrs, diag_mask)
# create Searchlight object
sl = Searchlight(sl_rad=2)
# distribute data to processes
sl.distribute([scan_data], mask)
sl.broadcast(to_broadcast)
# run searchlight, save data
result = sl.run_searchlight(sfn)
np.save(result_path, result)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.