seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18801922589 | from flask import request, json, Response, Blueprint, g, jsonify, make_response
from ..models.UserModel import UserModel, UserSchema
from ..shared.Authentication import Auth
from marshmallow import ValidationError
user_api = Blueprint('users', __name__)
user_schema = UserSchema()
@user_api.after_request # blueprint can also be app~~
def after_request(response):
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
header['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS, PUT, PATCH, DELETE'
header['Access-Control-Allow-Headers'] = 'Origin, X-Requested-With, Content-Type, Accept, x-auth'
return response
@user_api.route('/', methods=['POST'])
def create():
"""
Create User Function
"""
req_data = request.get_json()
try:
data = user_schema.load(req_data)
except ValidationError as error:
return custom_response(error.messages, 400)
# check if user already exist in the db
user_in_db = UserModel.get_user_by_email(data.get('email'))
if user_in_db:
message = {'error': 'User already exist, please supply another email address'}
return custom_response(message, 400)
user = UserModel(data)
user.save()
ser_data = user_schema.dump(user)
token = Auth.generate_token(ser_data.get('id'))
return custom_response({'jwt_token': token}, 201)
# @user_api.route('/', methods=['GET'])
# @Auth.auth_required
# def get_all():
# users = UserModel.get_all_users()
# ser_users = user_schema.dump(users, many=True)
# return custom_response(ser_users, 200)
@user_api.route('/login', methods=['POST'])
def login():
req_data = request.get_json()
try:
data = user_schema.load(req_data, partial=True)
except ValidationError as error:
return custom_response(error.messages, 400)
if not data.get('email') or not data.get('password'):
return custom_response({'error': 'you need email and password to sign in'}, 400)
user = UserModel.get_user_by_email(data.get('email'))
if not user:
return custom_response({'error': 'invalid credentials'}, 400)
if not user.check_hash(data.get('password')):
return custom_response({'error': 'invalid credentials'}, 400)
ser_data = user_schema.dump(user)
token = Auth.generate_token(ser_data.get('id'))
return custom_response({'jwt_token': token}, 200)
# @user_api.route('/<int:user_id>', methods=['GET'])
# @Auth.auth_required
# def get_a_user(user_id):
# """
# Get a single user
# """
# user = UserModel.get_one_user(user_id)
# if not user:
# return custom_response({'error': 'user not found'}, 404)
# ser_user = user_schema.dump(user)
# return custom_response(ser_user, 200)
@user_api.route('/me', methods=['PUT'])
@Auth.auth_required
def update():
"""
Update me
"""
req_data = request.get_json()
try:
data = user_schema.load(req_data, partial=True)
except ValidationError as error:
return custom_response(error.messages, 400)
user = UserModel.get_one_user(g.user.get('id'))
user.update(data)
ser_user = user_schema.dump(user)
return custom_response(ser_user, 200)
@user_api.route('/me', methods=['GET'])
@Auth.auth_required
def get_me():
"""
Get me
"""
user = UserModel.get_one_user(g.user.get('id'))
ser_user = user_schema.dump(user)
return custom_response(ser_user, 200)
def custom_response(res, status_code):
"""
Custom Response Function
"""
resp = Response(
mimetype="application/json",
response=json.dumps(res),
status=status_code
)
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS'
resp.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'
return resp
| AnthonyClausing/movie-night-api | src/views/UserView.py | UserView.py | py | 3,721 | python | en | code | 0 | github-code | 36 |
29567698176 | import requests
KIND_SELL = "f3b277728b3fee749481eb3e0b3b48980dbbab78658fc419025cb16eee346775"
BALANCE_ERC20 = "5a28e9363bb942b639270062aa6bb295f434bcdfc42c97267bf003f272060dc9"
def api_get_sell_fee(sell_token, buy_token, sell_amount, network="mainnet"):
fee_url = f"https://api.cow.fi/{network}/api/v1/feeAndQuote/sell"
get_params = {"sellToken": sell_token, "buyToken": buy_token, "sellAmountBeforeFee": sell_amount}
r = requests.get(fee_url, params=get_params)
assert r.ok and r.status_code == 200
fee_amount = int(r.json()["fee"]["amount"])
buy_amount_after_fee = int(r.json()["buyAmountAfterFee"])
assert fee_amount > 0
assert buy_amount_after_fee > 0
return (fee_amount, buy_amount_after_fee)
def api_get_quote(sell_token, buy_token, sell_amount, valid_to, sender, partiallyFillable=False, network="mainnet"):
quote_url = f"https://api.cow.fi/{network}/api/v1/quote"
order_payload = {
"sellToken": sell_token,
"buyToken": buy_token,
"sellAmountBeforeFee": int(sell_amount),
"validTo": valid_to,
"partiallyFillable": partiallyFillable,
"from": sender,
"receiver": "0x0000000000000000000000000000000000000000",
"appData": "0x0000000000000000000000000000000000000000000000000000000000000000",
"kind": "sell",
"sellTokenBalance": "erc20",
"buyTokenBalance": "erc20",
"signingScheme": "presign", # Very important. this tells the api you are going to sign on chain
}
r = requests.post(quote_url, json=order_payload)
assert r.ok and r.status_code == 200
fee_amount = int(r.json()["fee"]["amount"])
buy_amount_after_fee = int(r.json()["buyAmountAfterFee"])
assert fee_amount > 0
assert buy_amount_after_fee > 0
return (fee_amount, buy_amount_after_fee)
def api_get_order_status(orderUid, network="mainnet"):
order_url = f"https://api.cow.fi/{network}/api/v1/orders/{orderUid}"
r = requests.get(order_url)
assert r.ok and r.status_code == 200
status = r.json()["status"]
return status
def api_create_order(
sell_token,
buy_token,
sell_amount,
buy_amount,
fee_amount,
valid_to,
sender,
receiver,
partiallyFillable=False,
app_data="0x0000000000000000000000000000000000000000000000000000000000000000",
network="mainnet",
):
order_url = f"https://api.cow.fi/{network}/api/v1/orders"
partiallyFillable = False
order_payload = {
"sellToken": sell_token,
"buyToken": buy_token,
"sellAmount": str(sell_amount), # sell amount before fee
"buyAmount": str(buy_amount), # buy amount after fee
"validTo": valid_to,
"appData": app_data,
"feeAmount": str(fee_amount),
"kind": "sell",
"partiallyFillable": partiallyFillable,
"receiver": receiver,
"signature": "0x",
"from": sender,
"sellTokenBalance": "erc20",
"buyTokenBalance": "erc20",
"signingScheme": "presign", # Very important. this tells the api you are going to sign on chain
}
r = requests.post(order_url, json=order_payload)
assert r.ok and r.status_code == 201
order_uid = r.json()
return order_uid
| lidofinance/lido-otc-seller | utils/cow.py | cow.py | py | 3,245 | python | en | code | 1 | github-code | 36 |
30129375464 | import cv2
face_cascade=cv2.CascadeClassifier(r"C:\Users\KIIT\AppData\Roaming\Python\Python310\site-packages\cv2\data\haarcascade_frontalface_default.xml")
cap=cv2.VideoCapture(0)
while 1:
ret,img=cap.read()
color=cv2.cvtColor(img,cv2.COLOR_BGR2RGBA)
faces=face_cascade.detectMultiScale(color,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,255),2)
cv2.imshow('img',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| rimo10/object-detection | face.py | face.py | py | 520 | python | en | code | 0 | github-code | 36 |
10873690970 | #!/usr/bin/env python
import ctags
import sys
import traceback
def main(tags_file, search_expression):
# read tags file
tags = ctags.CTags(tags_file)
status = tags.setSortType(ctags.TAG_SORTED)
# search
entry = ctags.TagEntry()
found = tags.find(entry, search_expression, ctags.TAG_PARTIALMATCH)
if not found:
return
# save entries
entries = []
while True:
file = entry['file']
pattern = entry['pattern']
name = entry['name']
kind = entry['kind']
entries.append({
'file':file,
'pattern': pattern,
'name': name,
'kind': kind})
# https://github.com/jonashaag/python-ctags3
#print
#print entry['name']
#print entry['kind']
#print entry['url']
#print entry['fileScope']
#print entry['line']
found = tags.findNext(entry)
if not found:
break
for i, entry in enumerate(entries):
file = entry['file']
pattern = entry['pattern']
name = entry['name']
kind = entry['kind']
print("{}\t{}\t{}\t{}".format(file, pattern, name, kind))
tags_file = sys.argv[1]
search_expression = sys.argv[2]
main(tags_file, search_expression)
| llynch/vim-config | autoload/ctrlp/search-tags.py | search-tags.py | py | 1,281 | python | en | code | 1 | github-code | 36 |
7755827639 | import asyncio
import io
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union
from urllib.parse import urlsplit
import aiohttp
import mercantile
from funclib.models import RenderOptions
from funclib.raster import (
Bbox,
ExportFormats,
GDALRaster,
PILRaster,
Raster,
RasterExtent,
)
from funclib.settings import BaseExporterSettings
from mercantile import Tile
from PIL import Image
from pccommon.backoff import BackoffStrategy, with_backoff_async
logger = logging.getLogger(__name__)
T = TypeVar("T", bound=Raster)
U = TypeVar("U", bound="TileSet")
class TilerError(Exception):
def __init__(self, msg: str, resp: aiohttp.ClientResponse):
super().__init__(msg)
self.resp = resp
@dataclass
class TileSetDimensions:
tile_cols: int
tile_rows: int
total_cols: int
total_rows: int
tile_size: int
def get_tileset_dimensions(tiles: List[Tile], tile_size: int) -> TileSetDimensions:
tile_cols = len(set([tile.x for tile in tiles]))
tile_rows = int(len(tiles) / tile_cols)
return TileSetDimensions(
tile_cols=tile_cols,
tile_rows=tile_rows,
total_cols=tile_cols * tile_size,
total_rows=tile_rows * tile_size,
tile_size=tile_size,
)
class TileSet(ABC, Generic[T]):
def __init__(
self,
tile_url: str,
render_options: RenderOptions,
max_concurrency: int = 10,
tile_size: int = 512,
) -> None:
self.tile_url = tile_url
self.render_options = render_options
self.tile_size = tile_size
self._async_limit = asyncio.Semaphore(max_concurrency)
def get_tile_url(self, z: int, x: int, y: int) -> str:
url = (
self.tile_url.replace("{x}", str(x))
.replace("{y}", str(y))
.replace("{z}", str(z))
)
url += f"?{self.render_options.encoded_query_string}"
return url
@abstractmethod
async def get_mosaic(self, tiles: List[Tile]) -> T:
...
@staticmethod
def get_covering_tiles(
bbox: Bbox,
target_cols: int,
target_rows: int,
tile_size: int = 512,
min_zoom: Optional[int] = None,
) -> List[Tile]:
"""Gets tiles covering the given geometry at a zoom level
that can produce a target_cols x target_rows image."""
if min_zoom:
candidate_zoom = min_zoom
else:
candidate_zoom = 3
while True:
sw_tile = mercantile.tile(bbox.xmin, bbox.ymin, candidate_zoom)
ne_tile = mercantile.tile(bbox.xmax, bbox.ymax, candidate_zoom)
x_diff = ne_tile.x - sw_tile.x
y_diff = sw_tile.y - ne_tile.y
width = (x_diff - 1) * tile_size
height = (y_diff - 1) * tile_size
if width < target_cols or height < target_rows:
candidate_zoom += 1
else:
break
return [
tile
for tile in mercantile.tiles(
bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax, candidate_zoom
)
]
@classmethod
async def create(
cls: Type[U],
cql: Dict[str, Any],
render_options: RenderOptions,
settings: BaseExporterSettings,
data_api_url_override: Optional[str] = None,
) -> U:
register_url = settings.get_register_url(data_api_url_override)
async with aiohttp.ClientSession() as session:
# Register the search and get the tilejson_url back
resp = await session.post(register_url, json=cql)
mosaic_info = await resp.json()
tilejson_href = [
link["href"]
for link in mosaic_info["links"]
if link["rel"] == "tilejson"
][0]
tile_url = f"{tilejson_href}?{render_options.encoded_query_string}"
# Get the full tile path template
resp = await session.get(tile_url)
tilejson = await resp.json()
tile_url = tilejson["tiles"][0]
scheme, netloc, path, _, _ = urlsplit(tile_url)
tile_url = f"{scheme}://{netloc}{path}".replace("@1x", "@2x")
return cls(tile_url, render_options)
class GDALTileSet(TileSet[GDALRaster]):
async def get_mosaic(self, tiles: List[Tile]) -> GDALRaster:
raise NotImplementedError()
class PILTileSet(TileSet[PILRaster]):
async def _get_tile(self, url: str) -> io.BytesIO:
async def _f() -> io.BytesIO:
async with aiohttp.ClientSession() as session:
async with self._async_limit:
async with session.get(url) as resp:
if resp.status == 200:
return io.BytesIO(await resp.read())
else:
raise TilerError(
f"Error downloading tile: {url}", resp=resp
)
try:
return await with_backoff_async(
_f,
is_throttle=lambda e: isinstance(e, TilerError),
strategy=BackoffStrategy(waits=[0.2, 0.5, 0.75, 1, 2]),
)
except Exception:
logger.warning(f"Tile request failed with backoff: {url}")
img_bytes = Image.new("RGB", (self.tile_size, self.tile_size), "gray")
empty = io.BytesIO()
img_bytes.save(empty, format="png")
return empty
async def get_mosaic(self, tiles: List[Tile]) -> PILRaster:
tasks: List[asyncio.Future[io.BytesIO]] = []
for tile in tiles:
url = self.get_tile_url(tile.z, tile.x, tile.y)
print(f"Downloading {url}")
tasks.append(asyncio.ensure_future(self._get_tile(url)))
tile_images: List[io.BytesIO] = list(await asyncio.gather(*tasks))
tileset_dimensions = get_tileset_dimensions(tiles, self.tile_size)
mosaic = Image.new(
"RGBA", (tileset_dimensions.total_cols, tileset_dimensions.total_rows)
)
x = 0
y = 0
for i, img in enumerate(tile_images):
tile = Image.open(img)
mosaic.paste(tile, (x * self.tile_size, y * self.tile_size))
# Increment the row/col position for subsequent tiles
if (i + 1) % tileset_dimensions.tile_rows == 0:
y = 0
x += 1
else:
y += 1
raster_extent = RasterExtent(
bbox=Bbox.from_tiles(tiles),
cols=tileset_dimensions.total_cols,
rows=tileset_dimensions.total_rows,
)
return PILRaster(raster_extent, mosaic)
async def get_tile_set(
cql: Dict[str, Any],
render_options: RenderOptions,
settings: BaseExporterSettings,
format: ExportFormats = ExportFormats.PNG,
data_api_url_override: Optional[str] = None,
) -> Union[PILTileSet, GDALTileSet]:
"""Gets a tile set for the given CQL query and render options."""
# Get the TileSet
if format == ExportFormats.PNG:
return await PILTileSet.create(
cql, render_options, settings, data_api_url_override
)
else:
return await GDALTileSet.create(
cql, render_options, settings, data_api_url_override
)
| microsoft/planetary-computer-apis | pcfuncs/funclib/tiles.py | tiles.py | py | 7,469 | python | en | code | 88 | github-code | 36 |
28520992117 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from urbansim.functions import attribute_label
from variable_functions import my_attribute_label
from opus_core.logger import logger
class number_of_jobs(Variable):
"""Number of jobs for a given gridcell """
_return_type="int32"
def dependencies(self):
return [attribute_label("job", "grid_id"),
my_attribute_label("grid_id")]
def compute(self, dataset_pool):
jobs = dataset_pool.get_dataset('job')
return self.get_dataset().sum_dataset_over_ids(jobs, constant=1)
def post_check(self, values, dataset_pool):
size = dataset_pool.get_dataset('job').size()
self.do_check("x >= 0 and x <= " + str(size), values)
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
gridcell_grid_id = array([1, 2, 3])
job_grid_id = array([2, 1, 3, 1]) #specify an array of 4 jobs, 1st job's grid_id = 2 (it's in gridcell 2), etc.
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
"gridcell":{
"grid_id":gridcell_grid_id
},
"job":{
"job_id":array([1,2,3,4]),
"grid_id":job_grid_id
}
}
)
should_be = array([2, 1, 1])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main() | psrc/urbansim | urbansim/gridcell/number_of_jobs.py | number_of_jobs.py | py | 1,881 | python | en | code | 4 | github-code | 36 |
16155659298 | import json
import logging
import os
import re
from pathlib import Path
from cdi_forms.forms import BackgroundForm, BackpageBackgroundForm
from cdi_forms.models import BackgroundInfo, Instrument_Forms, Zipcode
from django.conf import settings
from django.http import Http404
from django.utils import translation
from researcher_UI.models import administration_data, Instrument
from researcher_UI.models import Administration
logger = logging.getLogger("debug")
PROJECT_ROOT = str(
Path(os.path.dirname(__file__)).parent.absolute()
) # Declare root folder for project and files. Varies between Mac and Linux installations.
# This function is not written properly...
def language_map(language):
with translation.override("en"):
available_langs = dict(settings.LANGUAGES)
trimmed_lang = re.sub(r"(\s+)?\([^)]*\)", "", language).strip()
lang_code = None
for code, language in available_langs.items():
if language == trimmed_lang:
lang_code = code
assert lang_code, (
"'%s' not available in language mapping function (language_map, cdi_forms/views.py)"
% trimmed_lang
)
return lang_code
def has_backpage(filename):
back_page = 0
if os.path.isfile(filename):
pages = json.load(open(filename, encoding="utf-8"))
for page in pages:
if page["page"] == "back":
back_page = 1
return back_page
# Map name of instrument model to its string title
def model_map(name):
assert Instrument.objects.filter(name=name).exists(), (
"%s is not registered as a valid instrument" % name
)
instrument_obj = Instrument.objects.get(name=name)
cdi_items = Instrument_Forms.objects.filter(instrument=instrument_obj).order_by(
"item_order"
)
assert cdi_items.count() > 0, (
"Could not find any CDI items registered with this instrument: %s" % name
)
return cdi_items
# Prepare items with prefilled reponses for later rendering. Dependent on cdi_items
def prefilled_cdi_data(administration_instance):
prefilled_data_list = administration_data.objects.filter(
administration=administration_instance
).values(
"item_ID", "value"
) # Grab a list of prefilled responses
instrument_name = (
administration_instance.study.instrument.name
) # Grab instrument name
instrument_model = model_map(
instrument_name
) # Grab appropriate model given the instrument name associated with test
if (
not prefilled_data_list
and administration_instance.repeat_num > 1
and administration_instance.study.prefilled_data >= 2
):
word_items = instrument_model.filter(item_type="word").values_list(
"itemID", flat=True
)
old_admins = Administration.objects.filter(
study=administration_instance.study,
subject_id=administration_instance.subject_id,
completed=True,
)
if old_admins:
old_admin = old_admins.latest("last_modified")
old_admin_data = administration_data.objects.filter(
administration=old_admin, item_ID__in=word_items
).values("item_ID", "value")
new_data_objs = []
for admin_data_obj in old_admin_data:
new_data_objs.append(
administration_data(
administration=administration_instance,
item_ID=admin_data_obj["item_ID"],
value=admin_data_obj["value"],
)
)
administration_data.objects.bulk_create(new_data_objs)
prefilled_data_list = administration_data.objects.filter(
administration=administration_instance
).values("item_ID", "value")
prefilled_data = {
x["item_ID"]: x["value"] for x in prefilled_data_list
} # Store prefilled data in a dictionary with item_ID as the key and response as the value.
with open(
PROJECT_ROOT + "/form_data/" + instrument_name + "_meta.json",
"r",
encoding="utf-8",
) as content_file: # Open associated json file with section ordering and nesting
# Read json file and store additional variables regarding the instrument, study, and the administration
data = json.loads(content_file.read())
data["object"] = administration_instance
data["title"] = administration_instance.study.instrument.verbose_name
instrument_name = data[
"instrument_name"
] = administration_instance.study.instrument.name
data["completed"] = administration_instance.completed
data["due_date"] = administration_instance.due_date.strftime(
"%b %d, %Y, %I:%M %p"
)
data["page_number"] = administration_instance.page_number
data["hash_id"] = administration_instance.url_hash
data["study_waiver"] = administration_instance.study.waiver
data["confirm_completion"] = administration_instance.study.confirm_completion
try:
data["back_page"] = has_backpage(
PROJECT_ROOT + administration_instance.study.demographic.path
)
except:
data["back_page"] = 0
raw_objects = []
field_values = [
"itemID",
"item",
"item_type",
"category",
"definition",
"choices__choice_set",
]
field_values += [
"choices__choice_set_"
+ settings.LANGUAGE_DICT[administration_instance.study.instrument.language]
]
# As some items are nested on different levels, carefully parse and store items for rendering.
for part in data["parts"]:
for item_type in part["types"]:
if "sections" in item_type:
for section in item_type["sections"]:
group_objects = instrument_model.filter(
category__exact=section["id"]
).values(*field_values)
if "type" not in section:
section["type"] = item_type["type"]
x = cdi_items(
group_objects,
section["type"],
prefilled_data,
item_type["id"],
)
section["objects"] = x
if administration_instance.study.show_feedback:
raw_objects.extend(x)
if any(["*" in x["definition"] for x in section["objects"]]):
section["starred"] = "*Or the word used in your family"
else:
group_objects = instrument_model.filter(
item_type__exact=item_type["id"]
).values(*field_values)
x = cdi_items(
group_objects,
item_type["type"],
prefilled_data,
item_type["id"],
)
item_type["objects"] = x
if administration_instance.study.show_feedback:
raw_objects.extend(x)
# print (raw_objects)
data["cdi_items"] = json.dumps(raw_objects) # , cls=DjangoJSONEncoder)
# If age is stored in database, add it to dictionary
try:
age = BackgroundInfo.objects.values_list("age", flat=True).get(
administration=administration_instance
)
except:
age = ""
data["age"] = age
return data
# Stitch section nesting in cdi_forms/form_data/*.json and instrument models together and prepare for CDI form rendering
def cdi_items(object_group, item_type, prefilled_data, item_id):
for obj in object_group:
if "textbox" in obj["item"]:
obj["text"] = obj["definition"]
if obj["itemID"] in prefilled_data:
obj["prefilled_value"] = prefilled_data[obj["itemID"]]
elif item_type == "checkbox":
obj["prefilled_value"] = obj["itemID"] in prefilled_data
# print ( obj['itemID'] )
obj["definition"] = (
obj["definition"][0] + obj["definition"][1:]
if obj["definition"][0].isalpha()
else obj["definition"][0] + obj["definition"][1] + obj["definition"][2:]
)
obj["choices"] = obj["choices__choice_set"]
elif item_type in ["radiobutton", "modified_checkbox"]:
raw_split_choices = [
i.strip() for i in obj["choices__choice_set"].split(";")
]
# split_choices_translated = map(str.strip, [value for key, value in obj.items() if 'choice_set_' in key][0].split(';'))
split_choices_translated = [
value for key, value in obj.items() if "choice_set_" in key
][0].split(";")
prefilled_values = [
False
if obj["itemID"] not in prefilled_data
else x == prefilled_data[obj["itemID"]]
for x in raw_split_choices
]
obj["text"] = (
obj["definition"][0] + obj["definition"][1:]
if obj["definition"][0].isalpha()
else obj["definition"][0] + obj["definition"][1] + obj["definition"][2:]
)
if (
obj["definition"] is not None
and obj["definition"].find("\\") >= 0
and item_id in ["complexity", "pronoun_usage"]
):
instruction = re.search("<b>(.+?)</b>", obj["definition"])
if instruction:
obj_choices = obj["definition"].split(
instruction.group(1) + "</b><br />"
)[1]
else:
obj_choices = obj["definition"]
# split_definition = map(str.strip, obj_choices.split('\\'))
split_definition = obj_choices.split("\\")
obj["choices"] = list(
zip(split_definition, raw_split_choices, prefilled_values)
)
else:
obj["choices"] = list(
zip(split_choices_translated, raw_split_choices, prefilled_values)
)
if obj["definition"] is not None:
obj["text"] = (
obj["definition"][0] + obj["definition"][1:]
if obj["definition"][0].isalpha()
else obj["definition"][0]
+ obj["definition"][1]
+ obj["definition"][2:]
)
elif item_type == "textbox":
if obj["itemID"] in prefilled_data:
obj["prefilled_value"] = prefilled_data[obj["itemID"]]
return object_group
def safe_harbor_zip_code(obj):
zip_prefix = ""
raw_zip = obj.zip_code
if raw_zip and raw_zip != "None":
zip_prefix = raw_zip[:3]
if Zipcode.objects.filter(zip_prefix=zip_prefix).exists():
zip_prefix = Zipcode.objects.filter(zip_prefix=zip_prefix).first().state
else:
zip_prefix = zip_prefix + "**"
return zip_prefix
# Find the administration object for a test-taker based on their unique hash code.
def get_administration_instance(hash_id):
try:
administration_instance = Administration.objects.get(url_hash=hash_id)
except:
raise Http404("Administration not found")
return administration_instance
# If the BackgroundInfo model was filled out before, populate BackgroundForm with responses based on administation object
def prefilled_background_form(administration_instance, front_page=True):
background_instance = BackgroundInfo.objects.get(
administration=administration_instance
)
context = {}
context["language"] = administration_instance.study.instrument.language
context["instrument"] = administration_instance.study.instrument.name
context["min_age"] = administration_instance.study.min_age
context["max_age"] = administration_instance.study.max_age
context["birthweight_units"] = administration_instance.study.birth_weight_units
context["study_obj"] = administration_instance.study
context["study"] = administration_instance.study
context["source_id"] = administration_instance.backgroundinfo.source_id
if front_page:
background_form = BackgroundForm(
instance=background_instance, context=context, page="front"
)
else:
background_form = BackpageBackgroundForm(
instance=background_instance, context=context, page="back"
)
return background_form
| langcog/web-cdi | webcdi/cdi_forms/views/utils.py | utils.py | py | 13,035 | python | en | code | 7 | github-code | 36 |
70954832424 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains an interpreter class that takes raw grayscale data from the picarx grayscale module and maps it to a
direction value between -1 and 1. The direction value is determined with a psuedo PD controller."""
import picarx_improved
import logging
import time
import numpy as np
import logging
logging.basicConfig(format="%(asctime)s:%(message)s", level=logging.INFO, datefmt="%H:%M:%S")
logging.getLogger().setLevel(logging.DEBUG)
class Interpreter(object):
"""Class for interpreting the grayscale sensor data into a discrete position. Higher sensor numbers are lighter,
lower numbers are darker."""
def __init__(self, proportional_gain=50,derivative_gain=5, line_polarity='darker'):
polarity_map = {'darker':1,'lighter':-1}
self.line_polarity = polarity_map[line_polarity]
if line_polarity == 'darker': self.mostly_under_func = np.argmin
else: self.mostly_under_func = np.argmax
self.p_gain = proportional_gain / 5000
self.d_gain = derivative_gain / 500
self.running_data = [ [], [], [] ]
self.running_aves = [0,0,0]
self.deriv_vals = [0,0,0]
self.prop_vals = [0,0,0]
self.moving_ave_num = 2
self.buffer_full = False
self.line_centered = True
def reset(self):
self.running_data = [ [], [], [] ]
self.running_aves = [0,0,0]
self.deriv_vals = [0,0,0]
self.buffer_full = False
def get_direction(self, sensor_data):
"""Psuedo PD controller to turn sensor data into a direction value between -1 and 1, which is returned."""
mostly_under = self.mostly_under_func(self.running_aves) # The sensor index the target is mostly under.
self.line_centered = True if mostly_under == 1 else False
if self.buffer_full: # A buffer us used to fill up queues of data to get more reliable readings.
for sensor_i in range(len(sensor_data)):
self.running_data[sensor_i].append(sensor_data[sensor_i])
del self.running_data[sensor_i][0]
ave = np.average(self.running_data[sensor_i])
"""The derivative portion of the controller gives values to steer the car in response to the change
of the values. There are two relevant scenarios:
(1) The target is mostly under an outside sensor, and that sensor is seeing values that are changing away
from the target (lighter/darker), and the center sensor is also not changing in the right way.
In this case, if the outside sensor is turning less like the target it is about the lose the line (not moving
more centered over the line.) Need to steer strongly back towards the outside sensor with the target under it.
(which is the opposite response given from case 2.)
(2) Else. In all other cases if the outside sensor is becoming less like the target sensor, it is already
changing in the right direction. Don't need to turn into the center even more.
"""
change = ave - self.running_aves[sensor_i]
self.deriv_vals[sensor_i] = change * self.d_gain * self.line_polarity *-1
self.running_aves[sensor_i] = ave
# negative deriv_vals are changing to be more like the target. The /4 value is a hand picked threshold.
if not self.line_centered and self.deriv_vals[mostly_under] > 0 and self.deriv_vals[1] > -self.deriv_vals[mostly_under]/4:
# Case 1. The car is about to lose the line, which requires the opposite response.
self.deriv_vals.reverse() # Give the opposite response.
"""The derivative portion of the controller is calculated at this point. Ex. [20,3,-6]. Alone this would result in
a steering direction value towards the first sensor (20) and away from the third sensor (-6)."""
else: # Buffer isn't full yet. Fill it.
buffer_size = self._add_to_buffer(sensor_data)
if buffer_size == self.moving_ave_num: self.buffer_full = True
direction = 0
return direction # Return a neutral position until buffer fills.
# Start calculating the proportional values by multiplying the sensor readings by the polarity and p gain.
self.prop_vals = [x*self.line_polarity*self.p_gain for x in self.running_aves]
# adjust down so the lowest value is zero. This makes the proportional value robust to different lighting conditions.
self.prop_vals = self.prop_vals - np.min(self.prop_vals)
self.prop_vals = np.flip(self.prop_vals) # Flip, because that is the way it works out...
# Add the proportional and derivative terms to get a reference direction.
raw_direction = np.add(self.prop_vals,self.deriv_vals)
return self._transform_direction(raw_direction)
def _transform_direction(self,direction):
"""Transform the PD controller reference direction (3 number list) to a single number between -1 and 1."""
direction[0] = direction[0] * -1
direction[1] = 0
direction[2] = direction[2] * 1
direction = np.sum(direction) * self.line_polarity
if direction < -1: direction = -1
if direction > 1: direction = 1
return direction
def _add_to_buffer(self, sensor_data):
for i in range(len(sensor_data)):
self.running_data[i].append(sensor_data[i])
ave = np.average(self.running_data[i])
change = ave - self.running_aves[i]
self.deriv_vals[i] = change
self.running_aves[i] = ave
buffer_size = len(self.running_data[0])
return buffer_size
def test():
data = [[191, 223, 210],[181, 230, 214],[185, 224, 207],[184, 225, 211],[187, 224, 211],[186, 233, 205],
[181, 232, 206],[190, 226, 210],[187, 226, 211],[182, 229, 213],[184, 229, 211],[185, 231, 210],
[190, 227, 207],[187, 230, 210],[185, 227, 210]]
interpreter = Interpreter()
for i in range(len(data)):
print(interpreter.get_direction(data[i]))
def main():
logging.getLogger().setLevel(logging.INFO)
test()
if __name__ == '__main__':
main()
| EverardoG/RobotSystems | lib/line_following_interpreter.py | line_following_interpreter.py | py | 6,327 | python | en | code | 0 | github-code | 36 |
43622302488 | from WebScrapping import WebScrapper
from AppSettings import AppSettings
from DataAnalyzer import DataAnalyzer
from Utils import Utils
from MongoDbContext import DBContext
import _thread
import matplotlib.pyplot as plot
def execute_program(product1: str, product2: str, sample_size: int):
# 0 clear db
DBContext().drop_collection(product1)
DBContext().drop_collection(product2)
# 1 WEB SKRAPUJEMY
drone_scrapper = WebScrapper()
gopro_scrapper = WebScrapper()
drone_values = drone_scrapper.read_product_info(product1, sample_size)
gopro_values = gopro_scrapper.read_product_info(product2, sample_size)
# 2 ZAPISUJEMY DO BAZY + Zczytujemy bo wymagane :=D :=D
cena = 'cena produktu'
ilosc_transakcji = 'ilość zamówień'
drones_jsons = Utils.convert_to_json_pair_values(drone_values, cena, ilosc_transakcji)
gopros_jsons = Utils.convert_to_json_pair_values(gopro_values, cena, ilosc_transakcji)
db_context = DBContext()
db_context.add_rows(drones_jsons, collection_name=product1)
drones_from_db = db_context.get_all(collection_name=product1)
db_context2 = DBContext()
db_context2.add_rows(gopros_jsons, collection_name=product2)
gopros_from_db = db_context2.get_all(collection_name=product2)
# 4 Uzywamy na wykresiku
drones_list = Utils.convert_jsons_to_tuples(drones_from_db, cena, ilosc_transakcji)
gopros_list = Utils.convert_jsons_to_tuples(gopros_from_db, cena, ilosc_transakcji)
DataAnalyzer.analyze_data(drones_list, cena, ilosc_transakcji, product1)
DataAnalyzer.analyze_data(gopros_list, cena, ilosc_transakcji, product2)
DataAnalyzer.run_analyzed_data_charts()
# Run the program :
product1_name = 'Drone'
product2_name = 'GoPro'
data_sample_size = 10
execute_program(product1_name, product2_name, data_sample_size)
| kgalaszewski/Python_Big_Data_webscrapping_mongodb_visualizing | Big_Data_Project_UG/app.py | app.py | py | 1,837 | python | en | code | 0 | github-code | 36 |
25371860228 | from plugins.search_engine_scraping_plugin import SearchEngineScrapingPlugin
import requests
import re
from bs4 import BeautifulSoup
class DuckDuckGoPlugin(SearchEngineScrapingPlugin):
""" This plugin implements a screen scraping for Duck Duck Go search engine"""
_url = None
def get_results(self, query):
"""
Gets the first page result from DuckDuckGo search
:param query: query for search
:type query: string
:return: results
:rtype: list of dictionaries
"""
if type(query) is not str:
raise TypeError(f"Parameter 'query' is of type {type(query)}. Must be of type string.")
query = re.sub(' +', '+', query)
self._url = 'https://duckduckgo.com/html/?q={}'.format(query)
return self._do_scraping()
def do_scraping(self):
page = requests.get(self._url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
div_results = soup.select('div .result')
results = []
for div_result in div_results:
div_result.find('div.no-results')
result = {}
result['title'] = div_result.select('h2.result__title a')[0].text
if result['title'] == 'No results.':
break
result['snippet'] = div_result.select('a.result__snippet')[0].text
result['url'] = div_result.find('a', class_='result__snippet').get('href')
results.append(result)
return results
else:
raise RuntimeError(f"Error for {query}: {page.status_code}")
| willsimoes/projeto-final | plugins/duck_duck_go_plugin.py | duck_duck_go_plugin.py | py | 1,675 | python | en | code | 0 | github-code | 36 |
33109766349 | import ctypes
import re
import os
import sys
from codegen import clear
def extract():
extracts = 0
clear()
ctypes.windll.kernel32.SetConsoleTitleW("Loveless.codes | Google Mini Extractor")
if not os.path.exists("extracting.txt"):
open("extracting.txt", mode='x').close()
input('Please paste all your codes as such "link" in extracting.txt\r\n')
clear()
sys.exit(0)
with open("extracting.txt", "r") as extract:
for link in extract:
b = re.search('.*promoCode=(.*?)$',link)
extracted = b.group(1)
print(extracted)
with open("codes.txt", "a") as saveCode:
saveCode.write(extracted + "\n")
extracts += 1
ctypes.windll.kernel32.SetConsoleTitleW("Finished! | Extracted: [" + str(extracts) + "]")
print("\nExtracted: " + str(extracts) + "\r")
print("Done \o/\r")
input("Do you want to restart?\r\n")
| pepsi/LovelessGen | modules/extractor.py | extractor.py | py | 975 | python | en | code | 0 | github-code | 36 |
18069578323 | codon = {
'UUU': 'Phe',
'UUC': 'Phe',
'uua': 'Leu',
'UUG': 'Leu',
'CUU': 'Leu',
'CUC': 'Leu',
'CUA': 'Leu',
'CUG': 'Leu',
'AUU': 'Ile',
'AUC': 'Ile',
'AUA': 'Ile',
'AUG': 'Met',
'GUU': 'Val',
'GUC': 'Val',
'GUA': 'Val',
'GUG': 'Val',
'UCU': 'Ser',
'UCC': 'Ser',
'UCA': 'Ser',
'UCG': 'Ser',
'CCU': 'Pro',
'CCC': 'Pro',
'CCA': 'Pro',
'CCG': 'Pro',
'ACU': 'Thr',
'ACC': 'Thr',
'ACA': 'Thr',
'ACG': 'Thr',
'GCU': 'Ala',
'GCC': 'Ala',
'GCA': 'Ala',
'GCG': 'Ala',
'UAU': 'Tyr',
'UAC': 'Tyr',
'UAA': 'STOP',
'UAG': 'STOP',
'CAU': 'His',
'CAC': 'His',
'CAA': 'Gln',
'CAG': 'Gln',
'AAU': 'Asn',
'AAC': 'Asn',
'AAA': 'Lys',
'AAG': 'Lys',
'GAU': 'Asp',
'GAC': 'Asp',
'GAA': 'Glu',
'GAG': 'Glu',
'UGU': 'Cys',
'UGC': 'Cys',
'UGA': 'STOP',
'UGG': 'Trp',
'CGU': 'Arg',
'CGC': 'Arg',
'CGA': 'Arg',
'CGG': 'Arg',
'AGU': 'Ser',
'AGC': 'Ser',
'AGA': 'Arg',
'AGG': 'Arg',
'GGU': 'Gly',
'GGC': 'Gly',
'GGA': 'Gly',
'GGG': 'Gly',
'UUA':'STOP',
'UAG' :'STOP',
}
#=================================================
list = ['C', 'T', 'G', 'A']
file = open(r'D:\Genatics\P53_seq.txt', 'r')
seq=file.read()
file.close()
seq=seq.replace('\n',"")
#==================================================
def dna_compliment (seq):
seq2=""
for i in seq:
if i=='C':
seq2+='G'
if i=='A':
seq2+='T'
if i=='T':
seq2+='A'
if i=='G':
seq2+='C'
return seq2
# =========================================================
seqrna = dna_compliment(seq).replace('T', 'U')
print(seqrna)
# ========================================================
def listing_rna (seqrna):
triple=[]
for i in range(0,len(seqrna),3):
x=""
x=seqrna[i:i+3]
if x=='UGA'or x=='UAG'or x=='UAA':
break
triple.append(x)
return triple
#==========================================================
triple1 =listing_rna(seqrna)
#==========================================================
def getamino (triple1):
listamino=[]
for i in triple1:
listamino .append([i])
return listamino
getamino (triple1)
#==========================================================
def check_mutation(str1,str2):
if str1==str2:
print("identical")
else :
list1=listing_rna(str1)
list2=listing_rna(str2)
for i in range(0,len(list1)):
if list1[i]!=list2[i]:
print ("diff codon ")
print (i+1)
print("Normal " )
print (list1[i])
print (codon[list1[i]])
print ("abNormal" )
print ( list2[i] )
print (codon[list2[i]])
#============================================================
str1="ATGGTGCACCTGACTCCTGAGGAGAAGTCTGCCGTTACT"
str2="ATGGTGCACCTGACTCCTGTGGAGAAGTCTGCCGTTACT"
str1=str1.replace('T','U')
str2=str2.replace('T','U')
check_mutation(str1,str2)
seq2=dna_compliment(seq)
#print(seq2)
#validation(seq)
| omniakhaled123/Genetics_mutation | main.py | main.py | py | 3,277 | python | en | code | 0 | github-code | 36 |
27520193757 | from nltk.util import ngrams
import nltk
f1=open('wsw.txt', 'r')
f2=open('posonly.txt', 'w')
f3=open('negonly.txt', 'w')
for line in f1:
if line.split(None, 1)[0] == '+':
s=line.split(' ',1)[1]
f2.write(s)
f1.close()
f1=open('wsw.txt', 'r')
for line in f1:
if line.split(None, 1)[0] == '-':
s=line.split(' ',1)[1]
f3.write(s)
f2.close()
f3.close()
f2=open('posonly.txt', 'r')
f3=open('negonly.txt', 'r')
f4=open('posonly1.txt', 'w')
f5=open('negonly1.txt', 'w')
bgs=[]
for line in f2:
sentence = line
n = 2
bigrams = ngrams(sentence.split(), n)
bgs.extend(bigrams)
fdist=nltk.FreqDist(bgs)
for i,j in fdist.items():
if j>=2:
f4.write(i[0])
f4.write(" ")
f4.write(i[1])
f4.write(" ")
f4.write(str(j))
f4.write("\n")
bgs=[]
for line in f3:
sentence = line
n = 2
bigrams = ngrams(sentence.split(), n)
bgs.extend(bigrams)
fdist=nltk.FreqDist(bgs)
for i,j in fdist.items():
if j>=2:
f5.write(i[0])
f5.write(" ")
f5.write(i[1])
f5.write(" ")
f5.write(str(j))
f5.write("\n")
| koder951/NLP_Project | 6/bigram.py | bigram.py | py | 1,026 | python | en | code | 0 | github-code | 36 |
955722902 | pkgname = "python-pyasn1_modules"
pkgver = "0.3.0"
pkgrel = 0
build_style = "python_module"
hostmakedepends = ["python-setuptools"]
depends = ["python-pyasn1"]
checkdepends = ["python-pyasn1"]
pkgdesc = "Python ASN.1 protocol modules"
maintainer = "q66 <q66@chimera-linux.org>"
license = "BSD-2-Clause"
url = "https://pyasn1.readthedocs.io/en/latest/contents.html"
source = f"$(PYPI_SITE)/p/pyasn1_modules/pyasn1_modules-{pkgver}.tar.gz"
sha256 = "5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"
def post_install(self):
self.install_license("LICENSE.txt")
| chimera-linux/cports | main/python-pyasn1_modules/template.py | template.py | py | 580 | python | en | code | 119 | github-code | 36 |
44431954825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Try Vasicek Model
"""
from __future__ import print_function, division
import seaborn as sns
from diffusions import Vasicek, VasicekParam
from diffusions.helper_functions import (plot_trajectories, plot_final_distr,
plot_realized, take_time)
def try_simulation():
mean, kappa, eta = .5, .1, .2
theta_true = VasicekParam(mean, kappa, eta)
vasicek = Vasicek(theta_true)
x0, nperiods, nsub, ndiscr, nsim = 1, 500, 2, 10, 3
nobs = nperiods * nsub
paths = vasicek.simulate(x0, nsub=nsub, ndiscr=ndiscr,
nobs=nobs, nsim=nsim)
data = paths[:, 0, 0]
plot_trajectories(data, nsub, 'returns')
def try_marginal():
mean, kappa, eta = .5, .1, .2
theta_true = VasicekParam(mean, kappa, eta)
vasicek = Vasicek(theta_true)
x0, nperiods, nsub, ndiscr, nsim = mean, 500, 2, 10, 20
nobs = nperiods * nsub
paths = vasicek.simulate(x0, nsub=nsub, ndiscr=ndiscr,
nobs=nobs, nsim=nsim)
data = paths[:, :, 0]
plot_final_distr(data, 'returns')
def try_sim_realized():
mean, kappa, eta = .5, .1, .2
theta_true = VasicekParam(mean, kappa, eta)
vasicek = Vasicek(theta_true)
start, nperiods, nsub, ndiscr, nsim = 1, 500, 80, 1, 1
aggh = 10
returns, rvar = vasicek.sim_realized(start, nsub=nsub,
ndiscr=ndiscr, aggh=aggh,
nperiods=nperiods, nsim=nsim, diff=0)
plot_realized(returns, rvar)
if __name__ == '__main__':
sns.set_context('notebook')
with take_time('Marginal density'):
try_marginal()
with take_time('Simulation'):
try_simulation()
with take_time('Simulate realized'):
try_sim_realized()
| khrapovs/diffusions | examples/try_vasicek.py | try_vasicek.py | py | 1,849 | python | en | code | 1 | github-code | 36 |
2871941645 | ## Conditional statements
num=3
if num > 0:
print(num,' is positive')
num=-1
if num > 0 :
print(num,' is positive')
## Program checks if the numbers is positive or negative
# and displays an appropreate message
num=5
if num >= 0 :
print(num,' is positive')
else:
print(num,' is negative')
## check if number is positive, negative or zero
#num=2
#num=0
num=-3
if num > 0:
print(num,' is positive')
elif num==0:
print(num,' is zero')
else:
print(num,' is negative')
# The symbol "%" in python is called the 'Modulo'. It returns the remainder
#2%2=0
#3%2=1
## Nested if else
n=5
if n >= 0:
if n==0:
print('number is zero')
else:
print('number is positive')
else:
print('number is negative')
##########################################################################
#1.check if number is positive, if yes then add 5 to it and print the output
num=3
if num > 0:
print(num,' is positive ')
print(num+5)
#2.check if number is negative number if yes multiply it by 10 print the output
num=-3
if num < 0:
print(num,' is negitive ')
print(num*10)
#3.if number is positive add 10 to that number if it is negative multiply by 10
#and print the output accordingly
#num=3
num=-3
if num > 0:
print(num,' is positive')
print(num+5)
else:
print(num*10)
#4.If number is multiple of 2 print a message
num=8
if num%2 == 0:
print ('no is multiple')
#5.now check if number is multiple of 3
#if yes add 100 to the number
#if not then return its square
num=10
if num%3 == 0:
print ('no is multiple')
print(num+100)
else:
print(num*num)
#6.write a program such that if input is positive number
# output is same positive number, if input is negative number still output
# is positive number Take input from user
var1=input('whats your name:')
var2 = float(input("Enter a number: "))
print(' hi ',var1,'\nwelcome to metaverse','\nyour No is ',var2)
num=var2
if num >= 0:
print(num)
else:
print('output is',-num)
#7.take a sides of quadrilateral from user and check if it is a square or not
var1 = float(input("Enter the side of quad: "))
var2 = float(input("Enter the side of quad: "))
var3 = float(input("Enter the side of quad: "))
var4 = float(input("Enter the side of quad: "))
if var1==var2==var3==var4:
print ('quad is square')
else:
print('quad is not a square')
#8.in the above question find area if it is a square
var1 = float(input("Enter the side of quad: "))
var2 = float(input("Enter the side of quad: "))
var3 = float(input("Enter the side of quad: "))
var4 = float(input("Enter the side of quad: "))
if var1==var2==var3==var4:
print ('quad is square')
area=var1*var1
print('area is ',area, 'mm2')
#9.Take positive Radius of circle and find its area. Message if negative
var1 = float(input("enter radius: "))
if var1>0:
area=3.14*var1*var1
print('area of circleis', area)
else:
print(var1,' is negitive ')
| amolmahajan8055/Python-Flies- | L7_Conditional statements.py | L7_Conditional statements.py | py | 3,145 | python | en | code | 0 | github-code | 36 |
7165456030 | def count_primes(number: int) -> int:
if number < 2:
return 0
primes = [True] * number
primes[0] = primes[1] = False
for i in range(2, int(number ** 0.5) + 1):
if primes[i]:
for j in range(i*i, number, i):
primes[j] = False
return sum(primes)
print(count_primes(10))# # returns 4 - there are 4 prime numbers less than 10, they are 2, 3, 5, 7.
print(count_primes(0))# # returns 0
print(count_primes(1))# # returns 0
| SafonovVladimir/mornings | 03 march/07/1.py | 1.py | py | 484 | python | en | code | 0 | github-code | 36 |
36384158019 | """
Author: Kevin Owens
Date: 11 May 2014
Class: ArgParser
Problem description summary (from TopCoder Tournament Inv 2001 Semi C+D 500): Implement a class ArgParser with a method parse(string) that splits
a single string of arguments into a list of string arguments; e.g., '{a, b, c}' -> ['a', 'b', 'c']. The input string
must conform to the following rules:
begins with '{', ends with '}'
can't have internal curly braces without a preceding escape char \
commas delineate arguments
escaped commas do not delineate
delineating commas must be followed by a space
"""
class ArgParser:
def parse(self, arg):
# must begin with { and end with }
if len(arg) < 2 or arg[0] != '{' or arg[-1] != '}':
return 'INVALID'
# discard outer {}
arg = arg[1:-1]
# can't have internal {} without escape char
if len(arg.split('\{')) != len(arg.split('{')) or len(arg.split('\}')) != len(arg.split('}')):
return 'INVALID'
arg = arg.replace('\{', '{')
arg = arg.replace('\}', '}')
# all non-escaped commas have trailing space
arg = arg.replace('\,', ';') # replace escaped commas with a token
if arg.replace(', ', '#').find(',') != -1:
return 'INVALID'
# break out individual args
args = arg.split(', ')
# restore tokenized commas
return [arg.replace(';',',') for arg in args]
if __name__ == '__main__':
examples = '{a, b, c}', '{a\,b, c}', '{, , a, }', r'{\\, \,\, }', '{\ , \,, }', '{}', \
'{a, b, c', '{a, {b, c}', '{a,b,c}'
parser = ArgParser()
for example in examples:
print(example, ':', parser.parse(example)) | knaught/TopCoder | ArgParser.py | ArgParser.py | py | 1,726 | python | en | code | 0 | github-code | 36 |
30292556243 | from mesa import Agent
from src.agents import Environment, Food
from src.utils import calculate_distance, get_item, random_move
FORAGING = 'PROCURANDO'
HOMING = 'VOLTANDO'
class ForagingAnt(Agent):
def __init__(self, current_id, model, pos, color):
super().__init__(current_id, model)
self.state = FORAGING
self.home = pos
self.pos = pos
self.age = self.model.ant_max_age + self.random.randrange(75, 200)
self.color = color
self.with_food = False
self.go_home = self.random.randrange(100, 200)
def step(self):
if self.age <= 0:
food = Food(
self.model.next_id(),
self.model, self.pos,
self.model.food_group
)
self.model.register(food)
self.model.kill_agents.append(self)
return
food = get_item(self, Food)
# Procurando comida
if self.state == FORAGING:
# Não encontrou comida
if not food:
self.go_home -= 1
# Se o mãximo de exploração foi atingido, volta pra casa
if self.go_home <= 0:
self.home_move()
self.state = HOMING
# Randomiza e calcula a chance de seguir pelo feromônio
elif self.random.random() > self.model.random_change_to_move:
self.food_move()
# Se não, movimento aleatório
else:
random_move(self)
# Achou comida, volta pra casa com ela
else:
food.eat()
self.age *= (1 + (self.model.ant_age_gain / 100))
e = get_item(self, Environment)
e.food_smell = 0
self.with_food = True
self.state = HOMING
# Voltando para casa
elif self.state == HOMING:
# Enquanto não estiver em casa
if self.pos != self.home:
e = get_item(self, Environment)
# Se estiver carregando comida, deposita feromônio
if self.with_food:
e.deposit_pheromone()
self.home_move()
# Se não, tiver comida e achar um caminho ou comida, faz o movimento de comida
elif food or e.pheromone > 0 or e.food_smell:
self.state = FORAGING
self.food_move()
# Se não, só volta pra casa
else:
self.home_move()
# Estando em casa, volta a procurar comida
else:
self.go_home = self.random.randrange(100, 200)
self.with_food = False
self.state = FORAGING
self.age -= 1
# Procura caminhos para voltar pra casa, para não ser ideal usa
# o segundo melhor caminho encontrado. Se o melhor caminho for
# a sua casa, usa ele
def home_move(self):
possible_home = [
(calculate_distance(agent.pos, self.home), agent.pos)
for agent in self.model.grid.get_neighbors(self.pos, True)
if type(agent) is Environment
]
possible_home.sort(key=(lambda i: i[0]))
if possible_home[0][1] == self.home:
self.model.grid.move_agent(self, possible_home[0][1])
else:
self.model.grid.move_agent(self, possible_home[1][1])
# Procura feromônios
def food_move(self):
food_smells = []
food_points = []
possible_food = []
neighbors = self.model.grid.get_neighbors(self.pos, True)
for agent in neighbors:
# Salva se o vizinho for um ponto de comida
if type(agent) is Food:
food_points.append(agent.pos)
else:
# Se não, só salva se tiver feromônio
if type(agent) is Environment and agent.pheromone > 0:
possible_food.append((agent.pheromone, agent.pos))
# Se não, só salva se tiver cheiro de comida
if type(agent) is Environment and agent.food_smell > 0:
food_smells.append((agent.food_smell, agent.pos))
# Se tiver encontrado comida, randomiza e usa um desses pontos
if food_points:
self.model.grid.move_agent(self, food_points[0])
# Se não tiver encontrado nem comida, nem feromônio e nem cheiro movimenta aleatoriamente
elif not possible_food and not food_smells:
random_move(self)
# Se tiver encontrado cheiro de comida, segue pelo cheiro
elif not possible_food:
food_smells = max(
food_smells,
key=(lambda i: i[0])
)
self.model.grid.move_agent(self, food_smells[1])
# Se não, usa o caminho com a menor quantidade de feromônio e que
# se encontra mais distante de casa.
else:
possible_food = min(
possible_food,
key=(lambda i: i[0] - (10 * calculate_distance(i[1], self.home)))
)
if possible_food[0] > self.model.min_pheromone_needed:
self.model.grid.move_agent(self, possible_food[1])
else:
random_move(self)
| UnBParadigmas2022-1/2022.1_G3_SMA_Formigueiro | src/agents/foragingAgent.py | foragingAgent.py | py | 5,369 | python | pt | code | 6 | github-code | 36 |
22517487941 | from datetime import timedelta
from django.utils import timezone
from django.contrib.auth.models import User
import pytest
from typing import List
from .. import models
EVENT_NAME = "football with friends"
MAX_PART = 20
IS_PRIVATE = False
DATETIME = timezone.now() + timedelta(days=2)
POLL_END_TIME = timezone.now()
POLL_MAX_SUGGESTIONS = 3
@pytest.fixture
def category1():
category = models.Category(name="test1")
category.save()
return category
@pytest.fixture
def location1():
location = models.Location(
name="test1", city="test1", street="test1", street_number=1, indoor=False, description="test1"
)
location.save()
return location
@pytest.fixture
def category_location1(category1, location1):
cat_loc = models.CategoryLocation(category=category1, location=location1)
cat_loc.save()
return cat_loc
@pytest.fixture
def user1():
user = User.objects.create_user(username='test', password='test', email='myemail@example.com')
profile = models.Profile(
user=user, date_of_birth=timezone.now(), phone_number="test", image='default.jpg'
)
profile.save()
return profile
@pytest.fixture
def user2():
user = User.objects.create_user(username='test1', password='test1', email='myemail1@example.com')
profile = models.Profile(
user=user, date_of_birth=timezone.now(), phone_number="test1", image='default.jpg'
)
profile.save()
return profile
@pytest.fixture
def poll1():
poll = models.Poll(max_suggestions=POLL_MAX_SUGGESTIONS, end_time=POLL_END_TIME)
poll.save()
return poll
@pytest.fixture
def event1(category_location1, poll1):
new_event = models.Event(
category=category_location1.category,
location=category_location1.location,
poll=poll1,
name=EVENT_NAME,
max_participants=MAX_PART,
start_time=DATETIME,
end_time=DATETIME + timedelta(hours=3),
is_private=IS_PRIVATE,
)
new_event.save()
return new_event
@pytest.fixture
def validate_event1(category_location1, user1):
event_id = models.Event.manager.create_event(
category_id=category_location1.category.id,
location_id=category_location1.location.id,
name=EVENT_NAME,
max_participants=MAX_PART,
start_time=DATETIME,
end_time=DATETIME + timedelta(hours=3),
is_private=IS_PRIVATE,
poll_end_time=POLL_END_TIME,
poll_suggestions=POLL_MAX_SUGGESTIONS,
user_id=user1.id,
)
event = models.Event.manager.get(id=event_id)
return event
@pytest.fixture(scope="session")
def categories1(django_db_blocker) -> List[models.Category]:
with django_db_blocker.unblock():
query_set = models.Category.objects.all()
query_set = list(query_set)[:5]
return query_set
@pytest.fixture(scope="session")
def locations1(django_db_blocker) -> List[models.Location]:
with django_db_blocker.unblock():
names = ['Sportech', 'Sami offer stadium', 'Terner stadium', 'Tedi Stadium', 'Bluemfield Stadium']
cities = ['Tel Aviv', 'Haifa', 'Beer Sheva', 'Jerusalem', 'Yaffo']
locations_lst = []
for location_name, city in zip(names, cities):
new_location = models.Location(
name=location_name, city=city, street='test', street_number=1, indoor=False, description='test'
)
new_location.save()
locations_lst.append(new_location)
return locations_lst
@pytest.fixture(scope="session")
def categories_locations1(django_db_blocker, categories1, locations1) -> List[models.CategoryLocation]:
with django_db_blocker.unblock():
categories_locations = []
for category in categories1:
for location in locations1:
cat_loc = models.CategoryLocation(category=category, location=location)
cat_loc.save()
categories_locations.append(cat_loc)
return categories_locations
@pytest.fixture(scope="session")
def users1(django_db_blocker) -> List[models.Profile]:
with django_db_blocker.unblock():
users = []
for i in range(25):
user = User.objects.create_user(username=f'user{i}', password=f'password{i}', email=f'user{i}@example.com')
profile = models.Profile(
user=user, date_of_birth=timezone.now(), phone_number=f"user{i}", image='default.jpg'
)
profile.save()
users.append(profile)
return users
@pytest.fixture(scope="session")
def time_samples(django_db_blocker):
with django_db_blocker.unblock():
current_time = timezone.now()
lst = [
current_time + timedelta(days=1),
current_time + timedelta(days=3),
current_time + timedelta(weeks=1),
current_time + timedelta(days=3, weeks=1),
current_time + timedelta(weeks=3),
]
return lst
@pytest.fixture(scope="session")
def event_data_set(django_db_blocker, categories_locations1, users1, time_samples):
with django_db_blocker.unblock():
for index, user in enumerate(users1):
cat_loc = categories_locations1.pop()
start_time = time_samples[index % len(time_samples)]
end_time = start_time + timedelta(hours=3)
poll_end_time = start_time + timedelta(days=-1)
models.Event.manager.create_event(
category_id=cat_loc.category.id,
location_id=cat_loc.location.id,
max_participants=2 * index + 2,
name=f'test event {index}',
start_time=start_time,
end_time=end_time,
is_private=index > 15,
poll_end_time=poll_end_time,
poll_suggestions=3,
user_id=user.id,
)
@pytest.fixture
def base_url(db, user1):
return f'/{user1.id}/event/'
@pytest.fixture
def create_url(base_url):
return base_url + 'create/'
@pytest.fixture
def first_event_info_url(base_url):
event = models.Event.manager.first()
return f'{base_url}info/?id={event.id}'
@pytest.fixture
def create_event_form_data1(category_location1):
return {
'name': EVENT_NAME,
'category': category_location1.category.id,
'location': category_location1.location.id,
'max_participants': MAX_PART,
'start_time': DATETIME + timedelta(days=2),
'end_time': DATETIME + timedelta(days=2, hours=3),
'poll_end_time': DATETIME + timedelta(days=1),
'poll_max_suggestions': POLL_MAX_SUGGESTIONS,
'is_private': IS_PRIVATE,
}
| redhat-beyond/FitMeet | event/tests/conftest.py | conftest.py | py | 6,663 | python | en | code | 0 | github-code | 36 |
42544034746 | #!/usr/bin/env python3
import argparse
import csv
import os
import pickle
import shutil
import time
from concurrent import futures
import grpc
from pysrbup.backup_system_pb2 import (Block, DeleteBackupResponse,
GetBackupResponse, GetBlocksResponse,
GetMissingCodesResponse,
ListBackupsResponse, PushBlocksResponse,
Row, UpdateDictResponse,
UploadBackupResponse)
from pysrbup.backup_system_pb2_grpc import add_BackupServicer_to_server
class BackupServicer():
def __init__(self, backups_dir, dictionary_file):
self.backups_dir = backups_dir
self.meta_file = os.path.join(self.backups_dir, 'meta.csv')
self.dictionary_file = dictionary_file
with open(dictionary_file, 'rb') as f:
self.dictionary = pickle.load(f)
def UploadBackup(self, request, context):
# pylint: disable=invalid-name,unused-argument
curr_backup_dir = os.path.join(self.backups_dir, request.id)
os.mkdir(curr_backup_dir)
backup_file = os.path.join(curr_backup_dir, 'data.bin')
with open(backup_file, 'wb') as f:
f.write(request.data)
with open(self.meta_file, 'a') as f:
writer = csv.writer(f)
writer.writerow([request.id, time.asctime(time.gmtime())])
return UploadBackupResponse()
def GetMissingCodes(self, request, context):
# pylint: disable=invalid-name,unused-argument
missing_codes = []
for code in request.codes:
if code not in self.dictionary:
missing_codes.append(code)
else:
self.dictionary[code][1] += 1
return GetMissingCodesResponse(codes=missing_codes)
def PushBlocks(self, request, context):
# pylint: disable=invalid-name,unused-argument
for block in request.blocks:
self.dictionary[block.code] = [block.data, 1]
with open(self.dictionary_file, 'wb') as f:
pickle.dump(self.dictionary, f)
return PushBlocksResponse()
def GetBackup(self, request, context):
# pylint: disable=invalid-name,unused-argument
if not request.id in os.listdir(self.backups_dir):
return GetBackupResponse()
file_to_restore = os.path.join(self.backups_dir, request.id, 'data.bin')
with open(file_to_restore, 'rb') as f:
data = f.read()
return GetBackupResponse(data=data)
# pylint: disable=invalid-name,unused-argument
def GetBlocks(self, request, context):
blocks = []
for code in request.codes:
block = Block(code=code, data=self.dictionary[code][0])
blocks.append(block)
return GetBlocksResponse(blocks=blocks)
def DeleteBackup(self, request, context):
# pylint: disable=invalid-name,unused-argument
if request.id not in os.listdir(self.backups_dir):
return GetBackupResponse()
backup_dir_to_delete = os.path.join(self.backups_dir, request.id)
backup_file = os.path.join(backup_dir_to_delete, 'data.bin')
with open(backup_file, 'rb') as f:
data = f.read()
with open(self.meta_file, 'r') as infile:
rows = []
for row in csv.reader(infile):
if row and row[0] != request.id:
rows.append(row)
with open(self.meta_file, 'w') as outfile:
writer = csv.writer(outfile)
for row in rows:
writer.writerow(row)
shutil.rmtree(backup_dir_to_delete)
return DeleteBackupResponse(data=data)
def UpdateDict(self, request, context):
# pylint: disable=invalid-name,unused-argument
for code in request.codes:
if self.dictionary[code][1] == 1:
del self.dictionary[code]
else:
self.dictionary[code][1] -= 1
with open(self.dictionary_file, 'wb') as d:
pickle.dump(self.dictionary, d)
return UpdateDictResponse()
def ListBackups(self, request, context):
# pylint: disable=invalid-name,unused-argument
with open(self.meta_file, 'r') as mf:
rows = []
count = 0
for row in csv.reader(mf):
if row and count > 0:
rows.append(Row(col=row))
count += 1
return ListBackupsResponse(rows=rows)
def create_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--server-address', default='localhost:50000')
parser.add_argument('--num-threads', default=3)
parser.add_argument('backups_dir')
return parser
def create_dictionary(root_path):
dictionary_file = os.path.join(root_path, 'dictionary')
with open(dictionary_file, 'wb') as f:
pickle.dump({}, f)
return dictionary_file
def create_meta_file(root_path):
meta_file = os.path.join(root_path, 'meta.csv')
with open(meta_file, 'a') as f:
writer = csv.writer(f)
writer.writerow(['id', 'creation_time'])
def main():
args = create_args_parser().parse_args()
if 'dictionary' not in os.listdir(args.backups_dir):
dictionary_file = create_dictionary(args.backups_dir)
else:
dictionary_file = os.path.join(args.backups_dir, 'dictionary')
if 'meta.csv' not in os.listdir(args.backups_dir):
create_meta_file(args.backups_dir)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=args.num_threads))
add_BackupServicer_to_server(
BackupServicer(args.backups_dir, dictionary_file), server)
server.add_insecure_port(args.server_address)
server.start()
server.wait_for_termination()
if __name__ == '__main__':
main()
| dorchul/pysrbup | pysrbup/server.py | server.py | py | 5,900 | python | en | code | 0 | github-code | 36 |
26910125965 | #!/usr/bin/python3
import sys
import re
import string
import xml.etree.ElementTree as ET
import getopt
import math
import codecs
import time
import png
# global variables
inputFile = ""
outputFile = ""
parseAll = True
timeFrom = ""
timeTo = ""
objectNames = ""
eventTypes = ""
actionNames = ""
objectClasses = ""
pathSubstr = ""
textSubstr = ""
timeFromMs = 0
timeToMs = 0
tree = 0
root = 0
windowWidth = 0;
windowHeight = 0;
clickCnt = 0
dblClickCnt = 0
leftDblClickCnt = 0
rightDblClickCnt = 0
middleDblClickCnt = 0
leftClickCnt = 0
rightClickCnt = 0
middleClickCnt = 0
dragLength = 0
leftDragLength = 0
rightDragLength = 0
middleDragLength = 0
mat = 0
# Print help
def help():
print("\nlogProcessing.py -i <inputFilePath/inputFileName.xml> [-o <outputFilePath/outputFileName.xml>] [-f <hh:mm:ss>] [-t <hh:mm:ss>] [-e <event1,event2,...>] [-a <action1,action2,...>] [-n <object1,object2,...>]\n\n\
-i Full path to input xml file.\n\
-o Full path to output xml file, that will be created.\n\
-f Beginning of the restrictive time interval.\n\
-t End of the restrictive time interval.\n\
-e Names of events, which should be processed, separated by ','. If not set, all events will be processed.\n\
Possible values: mouseevent, wheelevent, keyboardevent, specialevent, customevent\n\
-a Actions, which should be processed, separated by ','. Works only for mouseevent and wheelevent. If not set, all actions will be processed.\n\
Possible values: (click, doubleclick, drag, menuclick) - for mouseevent; (rollup, rolldown) - for wheelevent \n\
-n Names of the objects, which should be processed, separated by ','.\n\
-c Names of object classes, which should be processed, separated by ','.\n\
-p String, which will be searched in object path.\n\
-s String, which will be searched in object text.\n\
")
# Process console parametrs
def processParameters(argv):
global inputFile
global outputFile
global parseAll
global timeFrom
global timeTo
global timeFromMs
global timeToMs
global objectNames
global eventTypes
global actionNames
global objectClasses
global pathSubstr
global textSubstr
try:
opts, args = getopt.getopt(argv,"hi:o:f:t:e:a:n:c:p:s:",["ifile=","ofile=","from=","to=","events=","actions=","objects=","classes=","path=","text="])
except getopt.GetoptError:
help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
help()
sys.exit()
elif opt in ("-i", "--ifile"):
inputFile = arg
elif opt in ("-o", "--ofile"):
outputFile = arg
elif opt in ("-f", "--from"):
timeFrom = arg
elif opt in ("-t", "--to"):
timeTo = arg
elif opt in ("-e", "--events"):
eventTypes = arg
elif opt in ("-a", "--actions"):
actionNames = arg
elif opt in ("-n", "--objects"):
objectNames = arg
elif opt in ("-c", "--classes"):
objectClasses = arg
elif opt in ("-p", "--path"):
pathSubstr = arg
elif opt in ("-s", "--text"):
textSubstr = arg
# some non-optional arguments missing
if (inputFile == ""):
help()
sys.exit(2)
if (outputFile == ""):
outputFile = "out.xml"
# process given time interval (convert times to miliseconds)
if ((timeFrom != "") | (timeTo != "")):
parseAll = False
if (timeFrom != ""):
timeFrom += ":000"
if (timeTo != ""):
timeTo += ":000"
# Parse one event (xml element)
def parseEvent(event):
global objectNames
global eventTypes
global actionNames
global tree
global root
global clickCnt
global leftClickCnt
global rightClickCnt
global middleClickCnt
global dblClickCnt
global leftDblClickCnt
global rightDblClickCnt
global middleDblClickCnt
global dragLength
global leftDragLength
global rightDragLength
global middleDragLength
global objectClasses
global windowWidth
global windowHeight
global mat
global pathSubstr
global textSubstr
# remove event with wrong type
if ((eventTypes != "") & (eventTypes.find(event.get("type")) == -1)):
root.remove(event)
return
# remove event with wrong action
if ((actionNames != "") & ((event.get("type") == "mouseevent") | (event.get("type") == "wheelevent"))):
try:
actionNames.split(",").index(event.find("action").text)
except ValueError:
root.remove(event)
return
# remove event with wrong objectClass
if (objectClasses != ""):
if (event.find("objectClass").text == None):
root.remove(event)
return
if (objectClasses.find(event.find("objectClass").text) == -1):
root.remove(event)
return
# remove event with wrong objectName
if (objectNames != ""):
if (event.find("objectName").text == None):
root.remove(event)
return
if (objectNames.find(event.find("objectName").text) == -1):
root.remove(event)
return
# remove event with wrong objectPath
if (pathSubstr != ""):
if (event.find("objectPath").text == None):
root.remove(event)
return
if ((event.find("objectPath").text).find(pathSubstr) == -1):
root.remove(event)
return
# remove event with wrong objectText
if (textSubstr != ""):
if (event.find("objectText").text == None):
root.remove(event)
return
if ((event.find("objectText").text).find(textSubstr) == -1):
root.remove(event)
return
# count mouse clicks, double click and drag length
if (event.get("type") == "mouseevent"):
if (event.find("action").text == "click"):
clickCnt += 1
x, y = event.find("position").text.split(":")
mat[int(y)][int(x)*2] += 1;
if (event.find("mouseButton").text == "left"):
leftClickCnt += 1
elif (event.find("mouseButton").text == "right"):
rightClickCnt += 1
elif (event.find("mouseButton").text == "middle"):
middleClickCnt += 1
elif (event.find("action").text == "doubleclick"):
dblClickCnt += 1
x, y = event.find("position").text.split(":")
mat[int(y)][int(x)*2] += 1;
if (event.find("mouseButton").text == "left"):
leftDblClickCnt += 1
elif (event.find("mouseButton").text == "right"):
rightDblClickCnt += 1
elif (event.find("mouseButton").text == "middle"):
middleDblClickCnt += 1
elif (event.find("action").text == "drag"):
startX, startY = event.find("startPosition").text.split(":")
endX, endY = event.find("endPosition").text.split(":")
distance = math.sqrt(math.pow((int(endX) - int(startX)), 2) + math.pow((int(endY) - int(startY)), 2))
dragLength += distance
if (event.find("mouseButton").text == "left"):
leftDragLength += distance
elif (event.find("mouseButton").text == "right"):
rightDragLength += distance
elif (event.find("mouseButton").text == "middle"):
middleDragLength += distance
# Load input xml file and parse it
def parseFile():
global inputFile
global outputFile
global parseAll
global timeFrom
global timeTo
global timeFromMS
global timeToMs
global tree
global root
global windowWidth
global windowHeight
global mat
inFile = codecs.open(inputFile, mode='r')
# parse file
tree = ET.parse(inFile)
# get root element
root = tree.getroot()
# get window size
width, height = root.get("windowSize").split(":")
windowWidth = int(width)
windowHeight = int(height)
# create matrix which is 2*wider than window (alpha channel)
mat = [[0 for x in range(windowWidth*2)] for y in range(windowHeight)]
# find all events
events = root.findall("event")
# get start and end time
start = (timeFrom if (timeFrom != "") else events[0].find("time").text)
end = (timeTo if (timeTo != "") else events[len(events)-1].find("time").text)
# convert it to miliseconds
hFrom, mFrom, sFrom, msFrom = start.split(":")
timeFromMs = int(hFrom) * 3600000 + int(mFrom) * 60000 + int(sFrom) * 1000 + int(msFrom)
hTo, mTo, sTo, msTo = end.split(":")
timeToMs = int(hTo) * 3600000 + int(mTo) * 60000 + int(sTo) * 1000 + int(msTo)
# count duration and convert it to readeable string
durationMs = timeToMs - timeFromMs
duration = str(int(durationMs/3600000)) if ((durationMs/3600000) > 9) else ("0" + str(int(durationMs/3600000)))
durationMs -= int(durationMs/3600000) * 3600000
duration += str(":" + str(int(durationMs/60000))) if ((durationMs/60000) > 9) else (":0" + str(int(durationMs/60000)))
durationMs -= int(durationMs/60000) * 60000
duration += str(":" + str(int(durationMs/1000))) if ((durationMs/1000) > 9) else (":0" + str(int(durationMs/1000)))
durationMs -= int(durationMs/1000) * 1000
duration += str(":" + str(durationMs))
# print all arguments - it is here, because i need to get start and end time from file (if it is not set)
print("\nParameters:")
print(" Input file: " + inputFile)
print(" Output file: " + outputFile)
print(" Start time: " + start)
print(" End time: " + end)
print(" Duration: " + str(duration))
print(" Event types: " + eventTypes)
print(" Action names: " + actionNames)
print(" Object names: " + objectNames)
print(" Object classes: " + objectClasses)
print(" Text substring: " + textSubstr)
print(" Path substring: " + pathSubstr)
print("\n")
# go thru all events
for event in events:
# no time interval given, process all events
if (parseAll):
parseEvent(event)
else:
# get event time and convert it to miliseconds
h, m, s, ms = event[0].text.split(":")
time = int(h) * 3600000 + int(m) * 60000 + int(s) * 1000 + int(ms)
# remove events that are not in given time interval, process the others
if (timeTo == ""):
if ((timeFrom != "") & (time < timeFromMs)):
root.remove(event)
elif ((timeFrom != "") & (time >= timeFromMs)):
parseEvent(event)
elif (timeFrom == ""):
if ((timeTo != "") & (time > timeToMs)):
root.remove(event)
elif ((timeTo != "") & (time <= timeToMs)):
parseEvent(event)
elif ((time < timeFromMs) | (time > timeToMs)):
root.remove(event)
elif ((time >= timeFromMs) & (time <= timeToMs)):
parseEvent(event)
# Create png image with heat map
def createHeatMap():
global mat
maxVal = 0;
newMat = [[0 for x in range(windowWidth*2)] for y in range(windowHeight)]
# copy value in matrix (which is bigger than 0) to 3x3 neighborhood (for better visibility in final image)
for x in range(1, windowHeight-1):
for y in range(2, windowWidth*2, 2):
val = mat[x][y]
if (val > 0):
for k in range(-1, 2):
for j in range(-2, 3, 2):
if (mat[x+k][y+j] <= val):
newMat[x+k][y+j] = val
# get max click count
for x in range(windowHeight):
for y in range(0, windowWidth*2, 2):
if (newMat[x][y] > maxVal):
maxVal = newMat[x][y]
# convert click counts to intensity value
if (maxVal != 0):
for x in range(windowHeight):
for y in range(0, windowWidth*2, 2):
newMat[x][y] *= int(255/maxVal)
if (newMat[x][y] > 0):
newMat[x][y+1] = 255;
# save image
png.from_array(newMat, 'LA').save("heat_map.png")
# Main
def main(argv):
processParameters(argv)
parseFile()
createHeatMap()
# write modified xml to output file
tree.write(outputFile)
# write statistics
print("Clicks count: " + str(clickCnt))
print(" Left mouse button clicks count: " + str(leftClickCnt))
print(" Right mouse button clicks count: " + str(rightClickCnt))
print(" Middle mouse button clicks count: " + str(middleClickCnt))
print("\n")
print("Doubleclicks count: " + str(dblClickCnt))
print(" Left mouse button doubleclicks count: " + str(leftDblClickCnt))
print(" Right mouse button doubleclicks count: " + str(rightDblClickCnt))
print(" Middle mouse button doubleclicks count: " + str(middleDblClickCnt))
print("\n")
print("Drag length: " + str(dragLength))
print(" Left mouse button drag length: " + str(leftDragLength))
print(" Right mouse button drag length: " + str(rightDragLength))
print(" Middle mouse button drag length: " + str(middleDragLength))
if __name__ == "__main__":
main(sys.argv[1:])
| SindenDev/3dimviewer | applications/3DimViewer/tools/activity_logging/logProcessing.py | logProcessing.py | py | 11,807 | python | en | code | 9 | github-code | 36 |
13247548723 | #1. Cree una lista idéntica a partir de la primera
# lista utilizando la comprensión de listas.
lst1=[1,2,3,4,5]
#Escriba su respuesta aquí.
lst2=[n for n in lst1]
print(lst2)
#2. Crear una lista a partir de los elementos de un
# rango de 1200 a 2000 con pasos de 130, utilizando
# la comprensión de listas.
rng = range(1200,2001,130)
lst = [x for x in rng]
#lst = [i for i in range(1200,2001,130)]
print(lst)
#3. Use la comprensión de listas para construir una
# nueva lista, pero agregue 6 a cada elemento.
lst1=[44,54,64,74,104]
lst2=[(num+6) for num in lst1]
print(lst2)
#4. Utilizando la comprensión de listas, construya
# una lista a partir de los cuadrados de cada
# elemento de la lista.
lst1=[2, 4, 6, 8, 10, 12, 14]
lst2=[(num**2) for num in lst1]
print(lst2)
# 5. Utilizando la comprensión de listas, construya
# una lista a partir de los cuadrados de cada
# elemento de la lista, si el cuadrado es mayor
# que 50.
lst1=[2, 4, 6, 8, 10, 12, 14]
lst2=[num**2 for num in lst1 if num**2>50]
print(lst2)
#6. El diccionario dado consta de vehículos y sus
# pesos en kilogramos. Construya una lista de nombres
# de vehículos con peso inferior a 1300 kilogramos.
# En la misma lista de comprensión, haga que los
# nombres de las claves estén en mayúsculas.
dict={"Susuke Ignis": 985, "Chevrolet park Activ": 1100, "Volkswagen CrossUP": 1245, "Masda CX-3": 1254, "Susuki Vitara": 1245, "Nissan Kicks": 1310, "Mazda CX-5": 1672, "Ford Escape": 1625}
lst=[coche.upper() for coche in dict if dict[coche]<1300]
print(lst)
#7. Cree un diccionario de la lista con los mismos
# pares clave:valor, como: {"clave": "clave"}.
lst=["NY", "FL", "CA", "VT"]
'''
dict={}
for ciudad in lst:
dict[ciudad]=ciudad
'''
dict={ciudad:ciudad for ciudad in lst}
print(dict)
#8. Cree un rango de 100 a 160 con paso 10.
# Utilizando la comprensión de listas, cree un
# diccionario en el que cada número del rango sea
# la clave y cada elemento dividido por 100 sea el
# valor.
dict={i:(i/100) for i in range(100,161,10)}
print(dict)
#9. Usando la comprensión de listas y un argumento
# condicional, cree un diccionario a partir del
# diccionario actual donde solo los pares clave:valor
# con un valor superior a 2000 se toman en el
# nuevo diccionario.
dict1={"NFLX":4950,"TREX":2400,"FIZZ":1800, "XPO":1700}
dict2={x:dict1[x] for x in dict1 if dict1[x]>2000}
#version con get
dict2={x:dict1.get(x) for x in dict1 if dict1.get(x) > 2000}
print(dict2)
| amaiasanchis/EOI-IntroProgramacionPython | listcomprehensionejerc.py | listcomprehensionejerc.py | py | 2,505 | python | es | code | 0 | github-code | 36 |
13097952438 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: lishuang
@description: 绘制 sigmoid 函数
"""
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
"""
sigmoid 函数
:param x:
:return:
"""
y = 1 / (1 + np.exp(-x))
return y
def derivative_sigmoid(x):
"""
sigmoid 函数的导数
:param x:
:return:
"""
y = sigmoid(x)
dy = y * (1 - y)
return dy
def plot_sigmoid(is_derivative):
# 设置参数 x(起点,终点,间距)
x = np.arange(-8, 8, 0.2)
if is_derivative:
y = derivative_sigmoid(x)
else:
y = sigmoid(x)
plt.plot(x, y)
plt.show()
if __name__ == '__main__':
plot_sigmoid(False)
| TatenLee/machine-learning | bi/core/l7/activation_function/sigmoid.py | sigmoid.py | py | 733 | python | en | code | 1 | github-code | 36 |
40357282801 | #!/usr/bin/python3
import socket
import subprocess
def client_program():
host = socket.gethostname() # as both code is running on same pc
port = 5000 # socket server port number
client_socket = socket.socket() # instantiate
client_socket.connect((host, port)) # connect to the server
message = "Connection Establish " # take input
while message.lower().strip() != 'bye':
client_socket.send(message.encode()) # send message
data = client_socket.recv(1024).decode() # receive response
output = subprocess.getoutput(f"{data}")
message = output # again take input
client_socket.close() # close the connection
if __name__ == '__main__':
client_program()
| Sachin880699/Ethical-Hacking-Course | 5 ) python for hacking/server_client/client.py.py | client.py.py | py | 735 | python | en | code | 0 | github-code | 36 |
35447618383 | import pandas as pd
import numpy as np
import re
from openpyxl import *
import copy
import ast
# Common Units for Volume
unwanted = {'FLXBN', 'ALSCN', 'FXBPN', 'PLBTN', 'GLBTN', 'ALCNN', 'ALSCN'}
# Path to Excel
FILEPATH = '/Users/jiehan/Desktop/HiMart/Data Cleaning Code/Masterlist160520 - Joel.xlsx'
SHEET = 'vegetables'
BRANDPATH = 'brand_list.txt'
QUANTITYPATH = 'quantity_lst.txt'
# Read Excel Sheet
df = pd.read_excel(FILEPATH,sheet_name=SHEET)
array = np.array(df)
# Number of columns and rows
columns = len(array[0,:])
rows = len(array[:,0])
# Find Volume from Product Name
def changeVolume(ws,i):
vol_cell = ws.cell(i+2,6) # get specific volume cells
proc_cell = ws.cell(i+2,8) # get specific processing cells
product_string = proc_cell.value
if (proc_cell.value != None and vol_cell.value == None):
match = re.search(r'((\d\.?)+(ml|oz|l|g|kg|cl|mm|cm|ft| kg))', product_string, re.I)
if match:
# Find volume in string
volume = match.group(1)
# Fill in Volumn column
vol_cell.value = volume.lower()
# Remove volume from original string
proc_cell.value = product_string.replace(volume,'')
# Remove unwanted
for element in unwanted:
if element in proc_cell.value:
substrings = proc_cell.value.split()
new_string = copy.deepcopy(substrings)
for substring in substrings:
if element in substring:
new_string.remove(substring)
break
proc_cell.value = ' '.join(new_string)
break
print('Volume added')
# Find Brand from Product Name
def changeBrand(ws,i):
brand_cell = ws.cell(i+2,4) # get specific brand cells
proc_cell = ws.cell(i+2,8) # get specific processing cells
product_cell = ws.cell(i+2,5) # get specific product name cells
product_string = proc_cell.value
if (proc_cell.value != None):
if (brand_cell.value == None and product_cell.value == None):
brand_name, new_string = inputBrand(product_string)
brand_cell.value = brand_name
proc_cell.value = new_string
if (brand_name == ""): # No Brand
add_brand = raw_input("Do you want to add brand name?(Y/N) ").lower()
if (add_brand == 'y'):
brand_name = raw_input("Write new brand name: ")
brand_cell.value = brand_name
print('Brand added')
def changeQuantity(ws, i):
brand_cell = ws.cell(i+2,4) # get specific brand cells
proc_cell = ws.cell(i+2,8) # get specific processing cells
product_cell = ws.cell(i+2,5) # get specific product name cells
quantity_cell = ws.cell(i+2,7) # get specific quantity cells
product_string = proc_cell.value
if (proc_cell.value != None or quantity_cell.value != None):
# CLEAN UP REMAINING NAME: Remove Quantity from Product Name
contains_digit = any(map(str.isdigit, str(product_string)))
if (contains_digit):
print("This is the new string: {0}".format(product_string))
quantity, product_string = inputQuantity(product_string)
quantity_cell.value = quantity
proc_cell.value = product_string
print('Quantity added')
# Find Product Name
def changeProduct(ws, i):
brand_cell = ws.cell(i+2,4) # get specific brand cells
proc_cell = ws.cell(i+2,8) # get specific processing cells
product_cell = ws.cell(i+2,5) # get specific product name cells
quantity_cell = ws.cell(i+2,7) # get specific quantity cells
product_string = proc_cell.value
if (proc_cell.value != None):
# No product name
if (product_cell.value == None):
product_string = product_string.title()
print("This is the new string: {0}".format(product_string))
add_product = raw_input("Do you want to add product name?(Y/N) ").lower()
if (add_product == 'y'):
change_product = raw_input("Do you want to use this product name (Y) or change it (N)?").lower()
if (change_product == 'y'):
product_cell.value = product_string
proc_cell.value = None
elif (change_product == 'n'):
product_cell.value = raw_input("Write new product name: ")
proc_cell.value = None
print('Product added')
def inputQuantity(string):
f = open(QUANTITYPATH, "r")
quantity_dict = ast.literal_eval(f.readline())
f.close()
string = string.lower()
while(True):
if len(quantity_dict) == 0:
add_quantity = raw_input("Do you want to add quantity?(Y/N) ").lower()
if add_quantity == 'y':
quantity_value = raw_input("What is the quantity? ")
quantity_key = raw_input("Add quantity abbreviation (based on product name): ").lower()
quantity_dict[quantity_key] = quantity_value
f = open(QUANTITYPATH, "w")
f.write(str(quantity_dict))
f.close()
new_string = string.replace(quantity_key, '')
return quantity_value, new_string
elif add_quantity == 'n':
return "", string
if len(quantity_dict) > 0:
for key in sorted(quantity_dict, key=len, reverse=True):
value = quantity_dict[key]
if key in string:
quantity = value
new_string = string.replace(key, '')
return quantity, new_string
add_quantity = raw_input("Do you want to add quantity?(Y/N) ").lower()
if add_quantity == 'y':
quantity_value = raw_input("What is the quantity? ")
quantity_key = raw_input("Add quantity abbreviation (based on product name): ").lower()
quantity_dict[quantity_key] = quantity_value
f = open(QUANTITYPATH, "w")
f.write(str(quantity_dict))
f.close()
new_string = string.replace(quantity_key, '')
return quantity_value, new_string
elif add_quantity == 'n':
return "", string
def inputBrand(string):
f = open(BRANDPATH, "r")
brand_dict = ast.literal_eval(f.readline())
f.close()
string = string.lower()
while (True):
if len(brand_dict) > 0:
test_string = ''
length = len(string.split())
if length > 3:
length = 3
for i in range(length):
if i == 0:
test_string = string.split()[i]
else:
test_string = test_string + ' ' + string.split()[i]
for key in brand_dict:
if test_string.strip().lower() == key.strip().lower():
new_string = string.replace(key.lower(),'')
brand_name = brand_dict[key]
f = open(BRANDPATH, "w")
f.write(str(brand_dict))
f.close()
return (brand_name, new_string)
for key in sorted(brand_dict, key=len, reverse=True):
value = brand_dict[key]
# Additional Spacings
if (len(key.split()) > 1):
if key.lower() in string:
new_string = string.replace(key.lower(),'')
brand_name = value
f = open(BRANDPATH, "w")
f.write(str(brand_dict))
f.close()
return (brand_name, new_string)
# Proper Brand (brand in set) and Capitalized, e.g. ZICO => Zico
elif key.lower() in string.split():
new_string = string.replace(key.lower(),'')
brand_name = value
# Write into brand_list.txt
f = open(BRANDPATH, "w")
f.write(str(brand_dict))
f.close()
return (brand_name, new_string)
elif key.lower() in string:
new_string = string.replace(key.lower(),'')
brand_name = value
f = open(BRANDPATH, "w")
f.write(str(brand_dict))
f.close()
return (brand_name, new_string)
is_brand = raw_input("String is: {0}\nIs there a brand? (Y/N)".format(string)).lower()
# No Brand, e.g. PEACH TEA
if (is_brand == "n"):
print ("No Brand")
return ("", string)
# Abbreviation, e.g. MM => Minute Maid; Additional Spacings, e.g. A & W => A&W; Incomplete, e.g. Authentic Tea vs Authentic Tea House
elif (is_brand == "y"):
brand_value = raw_input("What is the brand name? ")
brand_key = raw_input("Add brand name/abbreviation (based on product name): ").capitalize()
brand_dict[brand_key] = brand_value
# Load Excel File
wb = load_workbook(FILEPATH)
ws = wb[SHEET]
for i in range(rows):
# changeVolume(ws, i)
changeBrand(ws, i)
# changeQuantity(ws, i)
# changeProduct(ws, i)
proc_cell = ws.cell(i+2, 8)
if (proc_cell.value != None):
proc_cell.value = proc_cell.value.title()
print("Done with line {}".format(i+2))
# final_name = ''
# for j in range(4,8):
# cell = ws.cell(i+2,j)
# if cell.value != None:
# final_name = final_name + ' ' + cell.value
# print("Final product name for line{0}: {1}".format(i+2,final_name))
# if (ws.cell(i+2,8).value != None):
# print ("Processing is left with: {}".format(ws.cell(i+2,8).value))
# Save excel file after every 30
if (i % 50 == 0):
wb.save(FILEPATH)
print('Saving')
wb.save(FILEPATH)
print('Saving') | joeljhanster/productnamecleaning | fixnaming.py | fixnaming.py | py | 10,159 | python | en | code | 0 | github-code | 36 |
27888263770 | from django.db import models
from django.core.exceptions import ValidationError
def validate_thumbnail_size(field_file_obj):
file_size = field_file_obj.file.size
kilobyte_limit = 256
if file_size >= kilobyte_limit * 1024:
raise ValidationError(f"This image is {file_size / 1024}kb. Please make sure it is less than 256kb.")
class CreationModificationDateBase(models.Model):
"""
Abstract base class with a creation and modification date and time
"""
created = models.DateTimeField(
"Creation Date and Time",
auto_now_add=True,
)
modified = models.DateTimeField(
"Modification Date and Time",
auto_now=True,
)
class Meta:
abstract = True
| theorangeyakco/sarpanch | content/utils.py | utils.py | py | 700 | python | en | code | 2 | github-code | 36 |
5314308477 | import plotly.express as px
import pandas as pd
import numpy as np
import plotly.graph_objs as go
from numpy.core.fromnumeric import shape
def bar_chart(df):
df2 = df[(df['homelessness'] == 'Overall Homeless') & (df['state'] == 'Total')]
barchart = px.bar(df2, x = 'year' , y = 'number' , title = 'Year wise count of Overall Homeless people in USA' , color='year')
barchart.update_layout({'title_x': 0.5})
return barchart
def line_chart1(df):
df3 = df[(df['state'] != 'Total')]
df3 = df3.groupby(['state','year'])['number'].sum().reset_index()
#df4 = df3.groupby('state')['number'].sum().reset_index()
line_chart_1 = px.line(df3, x = 'year', y = 'number',color='state', title= 'Year on Year Homeless Trend statewise')
line_chart_1.update_layout({'title_x': 0.5})
return line_chart_1
def corrplot(df):
corr_plot = px.imshow(df.corr(), zmin=-1, zmax=1, color_continuous_scale='rdbu', title='Master Correlation Plot')
corr_plot.update_layout({'title_x': 0.5})
return corr_plot
def pie1(df):
df_1 = df[(df['homelessness'] == 'Chronically Homeless Individuals' ) & (df['state'] != 'Total')]
df_1 = df_1.groupby(['year','state','region'])['number'].sum().reset_index()
pie_1= px.pie(
data_frame=df_1,
names='region',
values='number',
color='region',
title='Region wise Chronically Homeless Individuals',
template=None,
width=None,
height=None,
opacity=None,
hole=0.8
)
pie_1.update_layout({'title_x': 0.5})
return pie_1
def pie2(df):
df_1 = df[(df['homelessness'] == 'Chronically Homeless Individuals' ) & (df['state'] != 'Total')]
df_1 = df_1.groupby(['year','state','region'])['number'].sum().reset_index()
pie_2= px.pie(
data_frame=df_1,
names='state',
values='number',
color='region',
title='State wise Chronically Homeless Individuals',
color_discrete_sequence=px.colors.sequential.RdBu,
template=None,
width=1000,
height=1000,
opacity=None,
hole=0.3
)
pie_2.update_layout({'title_x': 0.5})
return pie_2
def scat1(df):
df_2 = df[(df['homelessness'] == 'Chronically Homeless People in Families')|(df['homelessness'] =='Unsheltered Chronically Homeless' )|(df['homelessness'] =='Sheltered Total Chronically Homeless') | (df['homelessness'] == 'Chronically Homeless Individuals') & (df['region'] != 'Total')]
scat_1 = px.scatter(df_2, x="homelessness", y="number", title="Scatterplot of Chronically homeless population",color="year",
size='number' )
scat_1.update_layout({'title_x': 0.5})
return scat_1
def scat2(df):
scat_2 = px.scatter(df, x="year", y="number", title="Scatterplot of homeless population through years",color="homelessness",
size='number' )
scat_2.update_layout({'title_x': 0.5})
return scat_2
def scat3(df):
df_1 = df[(df['homelessness'] == 'Chronically Homeless Individuals' ) & (df['state'] != 'Total')]
df_1 = df_1.groupby(['year','state','region'])['number'].sum().reset_index()
scat_3 = px.scatter(df_1, x="year", y="number", color="region", facet_row=None, facet_col="region", title='Scatterplot of Homeless Population through Years Across Regions')
scat_3.update_layout({'title_x': 0.5})
return scat_3
def chlor(df):
overall=df[(df.homelessness=='Sheltered ES Homeless') & ((df.state!='Total') & (df.state != 'CA') & (df.state != 'NY') & (df.state != 'MA') & (df.state != 'PA'))]
overall=overall.sort_values(by = 'year', ascending = True)
choro = px.choropleth(overall, locations='state',
locationmode="USA-states", color='number', animation_frame="year", scope="usa", color_continuous_scale="oranges", title='Homeless count on Region level for all States')
choro.update_layout({'title_x': 0.5})
return choro
def get_ratio(df):
shel = 'Sheltered Total Homeless'
totl = 'Overall Homeless'
def minidf(df, count_type):
temp_df = df.loc[df.homelessness == count_type]
return temp_df.drop(labels='homelessness', axis=1).rename({'number': count_type}, axis=1)
ratio_df = pd.merge(
minidf(df, shel),
minidf(df, totl),
on=['year', 'state', 'state_new', 'region']
)
# Ratio DF building complete
ratio_df.insert(len(ratio_df.columns), 'pct_sheltered', ratio_df.apply(lambda x: x[shel] / x[totl], axis=1))
return ratio_df
def implot(df,rdf):
# Turn to np for better vis of timelines use in px.imshow
statecodes = df.loc[df.state_new == 'State'].state.unique()
matrix = np.array([rdf.loc[rdf.state == ST].sort_values(by='year').pct_sheltered.to_numpy() for ST in statecodes])
imf = px.imshow(
np.transpose(matrix),
y=np.linspace(2007,2018,12),
x=statecodes,
range_color=[0,1],
origin='lower',
labels={
'x': 'State',
'y': 'Year'
}
)
imf.update_layout(title={'text': 'Sheltered Ratio', 'x': 0.5})
imf.update_xaxes(dtick=1)
imf.update_yaxes(dtick=1)
return (imf, matrix, statecodes)
def sidbox(df):
df_1 = df[(df['homelessness'] == 'Chronically Homeless Individuals' ) & (df['state'] != 'Total')]
df_1 = df_1.groupby(['year','state','region'])['number'].sum().reset_index()
sid_box = px.box(df_1, x="region", y="number", title = "Boxplot analyis in each region with the count")
sid_box.update_layout(
font_family="Courier New",
font_color="blue",
title_font_family="Times New Roman",
title_font_color="red",
legend_title_font_color="green",
title_x = 0.5
)
return sid_box
def stackbar(df):
#stacked - bar
chronically_homeless = ['Chronically Homeless','Chronically Homeless Individuals','Chronically Homeless People in Families']
Overall_homeless = ['Overall Homeless']
Homeless_individuals = ['Homeless Children of Parenting Youth',
'Homeless Family Households',
'Homeless Individuals',
'Homeless Parenting Youth (Under 25)',
'Homeless Parenting Youth Age 18-24',
'Homeless Parenting Youth Under 18',
'Homeless People in Families',
'Homeless Unaccompanied Youth (Under 25)',
'Homeless Unaccompanied Youth Age 18-24',
'Homeless Unaccompanied Youth Under 18',
'Homeless Veterans']
Sheltered_Chronically_homeless = ['Sheltered ES Chronically Homeless',
'Sheltered ES Chronically Homeless Individuals',
'Sheltered ES Chronically Homeless People in Families']
Sheltered_homeless = ['Sheltered ES Homeless',
'Sheltered ES Homeless Children of Parenting Youth',
'Sheltered ES Homeless Family Households',
'Sheltered ES Homeless Individuals',
'Sheltered ES Homeless Parenting Youth (Under 25)',
'Sheltered ES Homeless Parenting Youth Age 18-24',
'Sheltered ES Homeless Parenting Youth Under 18',
'Sheltered ES Homeless People in Families',
'Sheltered ES Homeless Unaccompanied Youth (Under 25)',
'Sheltered ES Homeless Unaccompanied Youth Age 18-24',
'Sheltered ES Homeless Unaccompanied Youth Under 18',
'Sheltered ES Homeless Veterans',
'Sheltered SH Chronically Homeless',
'Sheltered SH Chronically Homeless Individuals',
'Sheltered SH Homeless',
'Sheltered SH Homeless Individuals',
'Sheltered SH Homeless Unaccompanied Youth (Under 25)',
'Sheltered SH Homeless Unaccompanied Youth Age 18-24',
'Sheltered SH Homeless Unaccompanied Youth Under 18',
'Sheltered SH Homeless Veterans',
'Sheltered TH Homeless',
'Sheltered TH Homeless Children of Parenting Youth',
'Sheltered TH Homeless Family Households',
'Sheltered TH Homeless Individuals',
'Sheltered TH Homeless Parenting Youth (Under 25)',
'Sheltered TH Homeless Parenting Youth Age 18-24',
'Sheltered TH Homeless Parenting Youth Under 18',
'Sheltered TH Homeless People in Families',
'Sheltered TH Homeless Unaccompanied Youth (Under 25)',
'Sheltered TH Homeless Unaccompanied Youth Age 18-24',
'Sheltered TH Homeless Unaccompanied Youth Under 18',
'Sheltered TH Homeless Veterans',
'Sheltered Total Chronically Homeless',
'Sheltered Total Chronically Homeless Individuals',
'Sheltered Total Chronically Homeless People in Families',
'Sheltered Total Homeless',
'Sheltered Total Homeless Children of Parenting Youth',
'Sheltered Total Homeless Family Households',
'Sheltered Total Homeless Individuals',
'Sheltered Total Homeless Parenting Youth (Under 25)',
'Sheltered Total Homeless Parenting Youth Age 18-24',
'Sheltered Total Homeless Parenting Youth Under 18',
'Sheltered Total Homeless People in Families',
'Sheltered Total Homeless Unaccompanied Youth (Under 25)',
'Sheltered Total Homeless Unaccompanied Youth Age 18-24',
'Sheltered Total Homeless Unaccompanied Youth Under 18',
'Sheltered Total Homeless Veterans']
Unsheltered_homeless = ['Unsheltered Homeless',
'Unsheltered Homeless Children of Parenting Youth',
'Unsheltered Homeless Family Households',
'Unsheltered Homeless Individuals',
'Unsheltered Homeless Parenting Youth (Under 25)',
'Unsheltered Homeless Parenting Youth Age 18-24',
'Unsheltered Homeless Parenting Youth Under 18',
'Unsheltered Homeless People in Families',
'Unsheltered Homeless Unaccompanied Youth (Under 25)',
'Unsheltered Homeless Unaccompanied Youth Age 18-24',
'Unsheltered Homeless Unaccompanied Youth Under 18',
'Unsheltered Homeless Veterans'
]
unsheltered_chronically_homeless = ['Unsheltered Chronically Homeless',
'Unsheltered Chronically Homeless Individuals',
'Unsheltered Chronically Homeless People in Families']
df.loc[df['homelessness'].isin(chronically_homeless) , 'homeless_type'] = 'chronically_homeless'
df.loc[df['homelessness'].isin(Homeless_individuals) , 'homeless_type'] = 'Homeless_individuals'
df.loc[df['homelessness'].isin(Unsheltered_homeless) , 'homeless_type'] = 'Unsheltered_homeless'
df.loc[df['homelessness'].isin(unsheltered_chronically_homeless) , 'homeless_type'] = 'Unsheltered_chronically_homeless'
df.loc[df['homelessness'].isin(Sheltered_Chronically_homeless) , 'homeless_type'] = 'Sheltered_Chronically_homeless'
df.loc[df['homelessness'].isin(Sheltered_homeless) , 'homeless_type'] = 'Sheltered_homeless'
df.loc[df['homelessness'].isin(Overall_homeless) , 'homeless_type'] = 'Overall_homeless'
# df.head(2)
df8 = df.groupby(['year','homeless_type'])['number'].sum().reset_index()
# df8.head(10)
# stacked = df8[(df8['state'] == 'Total')]
stacked = px.bar(
df8,
x = 'year' ,
y = 'number' ,
title = 'Year on Year Proportions of Homeless Type' ,
color='homeless_type',
pattern_shape_sequence=[".", "x", "+"],
pattern_shape='homeless_type'
)
stacked.update_layout(title_text='year on Year Proportions of Homeless Type', title_x=0.5, title_font_color="magenta")
return stacked
def line_2(df):
df_3 = df[(df['region'] == 'west') & ((df['homelessness'] == 'Chronically Homeless People in Families')|(df['homelessness'] =='Unsheltered Chronically Homeless' ) |
(df['homelessness'] =='Sheltered Total Chronically Homeless') | (df['homelessness'] == 'Chronically Homeless Individuals')) ]
df_3 = df_3.groupby(['year','state'])['number'].sum().reset_index()
line2 = px.line(df_3, x="year", y="number",color='state',title='Chronical Homelessness trend spawning over years in west region of USA')
line2.update_layout(title_x=0.5)
return line2
def area1(df):
df_4 = df[(df['region'] == 'southwest') & ((df['homelessness'] == 'Chronically Homeless People in Families')|(df['homelessness'] =='Unsheltered Chronically Homeless' ) |
(df['homelessness'] =='Sheltered Total Chronically Homeless') | (df['homelessness'] == 'Chronically Homeless Individuals')) ]
df_4 = df_4.groupby(['year','state'])['number'].sum().reset_index()
area_1=px.area(df_4, x="year", y="number", color="state", line_group="state")
title='Chronical Homelessness trend spawning over years in southwest region of USA'
area_1.update_layout(title_text='Chronical Homelessness trend spawning over years in southwest region of USA', title_x=0.5, title_font_color="blue")
return area_1
def vio_plot(df):
df_4 = df[(df['region'] == 'southwest') & ((df['homelessness'] == 'Chronically Homeless People in Families')|(df['homelessness'] =='Unsheltered Chronically Homeless' ) |
(df['homelessness'] =='Sheltered Total Chronically Homeless') | (df['homelessness'] == 'Chronically Homeless Individuals')) ]
df_4 = df_4.groupby(['year','state'])['number'].sum().reset_index()
vio=px.violin(df_4, x="state", y="number",title='Statistical attributes of Southwest region states ', color='state')
vio.update_layout(title_x=0.5)
return vio
def sun_plot(df):
df7 = df[df['state']!= 'Total']
sun = px.sunburst(df7, path=['region', 'state'], values='number', height=600, title='State wise homeless population distribution')
sun.update_layout(title_x=0.5)
return sun
def sun_plot_1(df):
df_2 = df[
(
(df['homelessness'] == 'Chronically Homeless People in Families') |
(df['homelessness'] =='Unsheltered Chronically Homeless' ) |
(df['homelessness'] =='Sheltered Total Chronically Homeless') |
(df['homelessness'] == 'Chronically Homeless Individuals')
) &
(
df['region'] != 'Total'
)
]
sun_1 = px.sunburst(
df_2,
values='number',
path=['region','homelessness'],
ids=None,
color=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title='Chronical data distribution data in various regions',
template=None,
width=None,
height=750,
branchvalues=None,
maxdepth=None
)
sun_1.update_layout(title_x=0.5)
return sun_1
def mjbox(df):
df_m1 = df[(df['homelessness'] == 'Overall Homeless' ) & (df['region'] == 'west')]
bx1=px.box(df_m1,x='year',y='number',color='year',title='Descriptive Statistics of Overall Homeless in West')
return bx1
| MSidda/Plotly-Dashboard | utilities/vis.py | vis.py | py | 13,487 | python | en | code | 0 | github-code | 36 |
7659517632 | #
# Test of keyword Extraction based on Graph-of-Words
#
# Test examples come from the following paper:
# A Graph Degeneracy-based Approach to Keyword Extraction. Tixier, Antoine, Malliaros, Fragkiskos, and
# Vazirgiannis, Michalis. Proceedings of the 2016 Conference on Empirical Methods in Natural Language
# Processing. (EMNLP 2016)
#
# It exemplifies the results of these alternative methods:
# Batch keyword extraction based on k-core
# main core
# k-core + dense selection method
# k-core + inflexion selection method
# Word-level keyword extraction
# CoreRank + elbow method
# CoreRank + top 33%
#
import pytest
from gowpy.summarization.unsupervised import KcoreKeywordExtractor
from gowpy.summarization.unsupervised import CoreRankKeywordExtractor
# """
# Mathematical aspects of computer-aided share trading. We consider
# problems of statistical analysis of share prices and propose
# probabilistic characteristics to describe the price series.
# We discuss three methods of mathematical modelling of price
# series with given probabilistic characteristics.
# """
_preprocessed_text = """
Mathemat aspect computer-aid share trade problem
statist analysi share price probabilist characterist price
seri method mathemat model price seri probabilist
characterist
""".strip().lower()
def test_keyword_extraction_kcore():
expected_result = [('mathemat', 11),
('method', 11),
('model', 11),
('probabilist', 11),
('price', 11),
('characterist', 11),
('seri', 11)]
extractor_kw = KcoreKeywordExtractor(directed=False, weighted=True, window_size=8)
gowpy_result = extractor_kw.extract(_preprocessed_text)
assert expected_result == gowpy_result
def test_keyword_extraction_density():
expected_result = [('mathemat', 11),
('price', 11),
('probabilist', 11),
('characterist', 11),
('seri', 11),
('method', 11),
('model', 11),
('share', 10)]
extractor_kw = KcoreKeywordExtractor(directed=False, weighted=True, window_size=8,
selection_method='density')
gowpy_result = extractor_kw.extract(_preprocessed_text)
assert expected_result == gowpy_result
def test_keyword_extraction_inflexion():
expected_result = [('mathemat', 11),
('price', 11),
('probabilist', 11),
('characterist', 11),
('seri', 11),
('method', 11),
('model', 11),
('share', 10),
('trade', 9),
('problem', 9),
('statist', 9),
('analysi', 9)]
extractor_kw = KcoreKeywordExtractor(directed=False, weighted=True, window_size=8,
selection_method='inflexion')
gowpy_result = extractor_kw.extract(_preprocessed_text)
assert expected_result == gowpy_result
def test_keyword_extraction_corerank_elbow():
expected_result = [('mathemat', 128),
('price', 120),
('analysi', 119),
('share', 118),
('probabilist', 112),
('characterist', 112),
('statist', 108),
('trade', 97),
('problem', 97),
('seri', 94)]
extractor_kw_cr = CoreRankKeywordExtractor(directed=False, weighted=True, window_size=8)
gowpy_result = extractor_kw_cr.extract(_preprocessed_text)
assert expected_result == gowpy_result
def test_keyword_extraction_corerank_firstier():
expected_result = [('mathemat', 128),
('price', 120),
('analysi', 119),
('share', 118),
('probabilist', 112)]
extractor_kw_cr = CoreRankKeywordExtractor(directed=False, weighted=True, window_size=8, n=0.33)
gowpy_result = extractor_kw_cr.extract(_preprocessed_text)
assert expected_result == gowpy_result
| GuillaumeDD/gowpy | tests/test_keyword_extraction.py | test_keyword_extraction.py | py | 4,387 | python | en | code | 11 | github-code | 36 |
74486753063 | def DecimalToBinary(num):
if num > 1:
DecimalToBinary(num // 2)
print(num % 2,end='')
#since a print() statement ends in new line by default so by using end = ''
#we are giving the command to print in same line in continuation
# decimal number
number = int(input("Enter any positive number: "))
DecimalToBinary(number)
| Charut24/Programming-Paradigms- | Lab Assignment 3_2.py | Lab Assignment 3_2.py | py | 354 | python | en | code | 0 | github-code | 36 |
7813076216 | from datetime import datetime
from typing import Dict, List
import pytest
import sqlalchemy as sa
from httpx import AsyncClient
from sqlalchemy.ext.asyncio import AsyncSession
from aspen.api.views.tests.data.auth0_mock_responses import DEFAULT_AUTH0_USER
from aspen.auth.auth0_management import Auth0Client
from aspen.database.models import User
from aspen.test_infra.models.usergroup import group_factory, userrole_factory
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
async def test_users_me(http_client: AsyncClient, async_session: AsyncSession) -> None:
group = group_factory()
user = await userrole_factory(async_session, group)
async_session.add(group)
await async_session.commit()
response = await http_client.get(
"/v2/users/me", headers={"user_id": user.auth0_user_id}
)
assert response.status_code == 200
expected = {
"id": 1,
"name": "test",
"acknowledged_policy_version": None,
"agreed_to_tos": True,
"groups": [
{"id": group.id, "name": group.name, "roles": ["member"]},
],
"gisaid_submitter_id": None,
}
resp_data = response.json()
for key in expected:
assert resp_data[key] == expected[key]
assert len(resp_data["split_id"]) == 20
assert len(resp_data["analytics_id"]) == 20
async def test_users_view_put_pass(
auth0_apiclient: Auth0Client,
http_client: AsyncClient,
async_session: AsyncSession,
):
group = group_factory()
user = await userrole_factory(async_session, group, agreed_to_tos=False)
async_session.add(group)
await async_session.commit()
new_name = "Alice Alison"
auth0_apiclient.update_user.return_value = DEFAULT_AUTH0_USER.copy().update( # type: ignore
name=new_name
)
headers = {"user_id": user.auth0_user_id}
requests: List[Dict] = [
{"agreed_to_tos": True, "acknowledged_policy_version": "2022-06-22"},
{"agreed_to_tos": False},
{"acknowledged_policy_version": "2020-07-22"},
{"name": new_name},
{"gisaid_submitter_id": "alice_phd"},
]
for req in requests:
res = await http_client.put("/v2/users/me", headers=headers, json=req)
assert res.status_code == 200
# start a new transaction
await async_session.close()
async_session.begin()
updated_user = (
(
await async_session.execute(
sa.select(User).filter(User.auth0_user_id == user.auth0_user_id) # type: ignore
)
)
.scalars()
.one()
)
if "agreed_to_tos" in req:
assert updated_user.agreed_to_tos == req["agreed_to_tos"]
if "acknowledged_policy_verison" in req:
assert (
updated_user.acknowledged_policy_version
== datetime.strptime(
req["acknowledged_policy_version"], "%Y-%m-%d"
).date()
)
if "name" in req:
assert updated_user.name == req["name"]
if "gisaid_submitter_id" in req:
assert updated_user.gisaid_submitter_id == req["gisaid_submitter_id"]
async def test_usergroup_view_put_fail(
http_client: AsyncClient, async_session: AsyncSession
):
group = group_factory()
user = await userrole_factory(async_session, group, agreed_to_tos=False)
async_session.add(group)
await async_session.commit()
headers = {"user_id": user.auth0_user_id}
bad_requests = [
{"agreed_to_tos": 11, "acknowledged_policy_version": "2022-06-22"},
{"agreed_to_tos": True, "acknowledged_policy_version": "hello"},
]
for req in bad_requests:
res = await http_client.put("/v2/users/me", headers=headers, json=req)
assert res.status_code == 422
| chanzuckerberg/czgenepi | src/backend/aspen/api/views/tests/test_users.py | test_users.py | py | 3,879 | python | en | code | 11 | github-code | 36 |
1500291881 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class BundleDataProp(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, key=None, value=None): # noqa: E501
"""BundleDataProp - a model defined in OpenAPI
:param key: The key of this BundleDataProp. # noqa: E501
:type key: str
:param value: The value of this BundleDataProp. # noqa: E501
:type value: str
"""
self.openapi_types = {
'key': str,
'value': str
}
self.attribute_map = {
'key': 'key',
'value': 'value'
}
self._key = key
self._value = value
@classmethod
def from_dict(cls, dikt) -> 'BundleDataProp':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The BundleDataProp of this BundleDataProp. # noqa: E501
:rtype: BundleDataProp
"""
return util.deserialize_model(dikt, cls)
@property
def key(self):
"""Gets the key of this BundleDataProp.
Bundle data key # noqa: E501
:return: The key of this BundleDataProp.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this BundleDataProp.
Bundle data key # noqa: E501
:param key: The key of this BundleDataProp.
:type key: str
"""
self._key = key
@property
def value(self):
"""Gets the value of this BundleDataProp.
Bundle data value # noqa: E501
:return: The value of this BundleDataProp.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this BundleDataProp.
Bundle data value # noqa: E501
:param value: The value of this BundleDataProp.
:type value: str
"""
self._value = value
| shinesolutions/swagger-aem | clients/python-flask/generated/openapi_server/models/bundle_data_prop.py | bundle_data_prop.py | py | 2,253 | python | en | code | 35 | github-code | 36 |
2022880805 | import logging as log
import math
import torch
import torch.nn as nn
from pytorch_lightning.core.lightning import LightningModule
import nltocode.decoder as custom_decoder
import nltocode.encoder as custom_encoder
class Transformer(LightningModule):
def __init__(self,
d_model,
nhead,
num_encoder_layers,
num_decoder_layers,
dim_feedforward,
dropout,
multihead_attention_dropout,
normalize_before,
activation,
learning_rate,
batch_size,
num_dataloader_workers,
train_valid_data_path,
train_split,
val_split,
vocabsrc_size,
vocabtgt_size,
vocab_pad_id,
max_src_sentence_length,
max_tgt_sentence_length,
max_path_depth,
path_multiple=0,
tgt_pos_enc_type=None,
label_smoothing=None,
logits_forbidden_token_modifier=None,
logits_forbidden_token_modifier_schedule=None,
logits_forbidden_token_op='sum',
logits_forbidden_token_modifier_learning_rate=None,
enable_copy=True,
copy_att_layer=-1,
withcharemb=False,
vocabchar_size=None,
max_charseq_len=None,
# Test-only parameters
test_data_path=None,
grammar_graph_file=None,
target_language='python',
num_beams=None,
disable_decoder_constraint_mask=False,
max_beam_length=None,
max_num_predicted_results=None,
beam_search_mode=None,
keep_invalid_beamsearch_results=False,
target_output_file=None,
is_test_only_run=False
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.dim_feedforward = dim_feedforward
self.dropout = dropout
self.multihead_attention_dropout = multihead_attention_dropout
self.normalize_before = normalize_before
self.activation = activation
self.learning_rate = learning_rate
self.label_smoothing = label_smoothing
self.batch_size = batch_size
self.withcharemb = withcharemb
self.num_dataloader_workers = num_dataloader_workers
self.vocabsrc_size = vocabsrc_size
self.vocabtgt_size = vocabtgt_size
self.vocabchar_size = vocabchar_size
self.max_charseq_len = max_charseq_len
self.tgt_pos_enc_type = tgt_pos_enc_type
self.max_path_depth = max_path_depth
self.path_multiple = path_multiple
self.enable_copy = enable_copy
self.encoder_src = nn.Embedding(self.vocabsrc_size, self.d_model)
self.encoder_tgt = nn.Embedding(self.vocabtgt_size, self.d_model)
if self.withcharemb:
self.encoder_char = nn.Embedding(self.vocabchar_size, self.d_model)
self.linear_char = nn.Linear(self.d_model * self.max_charseq_len, self.d_model)
self.norm_char = nn.LayerNorm(self.d_model)
self.dropout_reg_src = nn.Dropout(p=self.dropout)
self.dropout_reg_tgt = nn.Dropout(p=self.dropout)
decoder_layer = custom_decoder.TransformerDecoderLayer(d_model=self.d_model,
nhead=self.nhead,
dim_feedforward=self.dim_feedforward,
dropout=self.dropout,
multihead_attention_dropout=self.multihead_attention_dropout,
normalize_before=self.normalize_before)
self.decoder = custom_decoder.TransformerDecoder(decoder_layer,
num_layers=self.num_decoder_layers,
att_layer=copy_att_layer,
norm=nn.LayerNorm(self.d_model))
encoder_layer = custom_encoder.TransformerEncoderLayer(d_model=self.d_model,
nhead=self.nhead,
dim_feedforward=self.dim_feedforward,
dropout=self.dropout,
activation=self.activation,
withcharemb=self.withcharemb)
encoder_norm = nn.LayerNorm(self.d_model)
self.encoder = custom_encoder.TransformerEncoder(encoder_layer,
self.num_encoder_layers,
encoder_norm)
self.lin_vocab = nn.Linear(self.d_model, vocabtgt_size)
if self.enable_copy:
self.p_gen = nn.Sequential(nn.Linear(self.d_model * 3, 1), nn.Sigmoid())
def forward(self, src, char, tgt, edge_path_seqs, logits_token_mask=None):
try:
mask = torch.triu(torch.ones(len(tgt), len(tgt)), 1)
mask = mask.type_as(tgt).type(torch.float)
tgt_mask = mask.masked_fill(mask == 1, float('-inf'))
src_pad_mask = (src == 0).transpose(0, 1)
tgt_pad_mask = (tgt == 0).transpose(0, 1)
src_embeddings = self.encoder_src(src)
tgt_embeddings = self.encoder_tgt(tgt)
src_embeddings_with_pos = self.pos_encode_seq(src_embeddings, self.d_model)
tgt_embeddings_with_pos = self.pos_encode(tgt_embeddings, self.tgt_pos_enc_type, edge_path_seqs)
src_embeddings_with_pos = self.dropout_reg_src(src_embeddings_with_pos)
tgt_embeddings_with_pos = self.dropout_reg_tgt(tgt_embeddings_with_pos)
if self.withcharemb:
char_embeddings = self.encoder_char(char)
char_embeddings = char_embeddings.view(-1, char.size(1), self.d_model * self.max_charseq_len)
char_embeddings = self.linear_char(char_embeddings)
char_embeddings = self.norm_char(char_embeddings)
else:
char_embeddings = None
enc_hs = self.encoder(src_embeddings_with_pos,
char_embeddings,
mask=None,
src_key_padding_mask=src_pad_mask)
dec_hs, attention = self.decoder(tgt_embeddings_with_pos,
enc_hs,
tgt_mask=tgt_mask,
memory_mask=None,
tgt_key_padding_mask=tgt_pad_mask,
memory_key_padding_mask=None)
logits = self.lin_vocab(dec_hs)
if self.enable_copy:
return self.apply_copy(attention, dec_hs, enc_hs, logits, src, tgt_embeddings_with_pos)
else:
return torch.log_softmax(logits, dim=-1)
except:
log.error("ERROR SRC/TGT SHAPE: %s / %s", src.shape, tgt.shape)
raise
def apply_copy(self, attention, dec_hs, enc_hs, logits, src, tgt_embeddings_with_pos):
p_vocab = logits.softmax(dim=-1)
hidden_states = enc_hs.transpose(0, 1)
context_vectors = torch.matmul(attention, hidden_states).transpose(0, 1)
total_states = torch.cat((context_vectors, dec_hs, tgt_embeddings_with_pos), dim=-1)
p_gen = self.p_gen(total_states)
p_copy = 1 - p_gen
src_t = src.transpose(0, 1)
one_hot = torch.zeros(src_t.size(0),
src_t.size(1),
self.vocabsrc_size,
device=src_t.device)
one_hot = one_hot.scatter_(dim=-1,
index=src_t.unsqueeze(-1),
value=1)
p_copy_src_vocab = torch.matmul(attention, one_hot)
input_vocab = torch.arange(self.vocabtgt_size - self.vocabsrc_size,
self.vocabtgt_size,
device=src_t.device)
src_to_tgt_conversion_matrix = torch.zeros(self.vocabsrc_size,
self.vocabtgt_size,
device=src_t.device)
src_to_tgt_conversion_matrix_scatter = src_to_tgt_conversion_matrix.scatter_(dim=-1,
index=input_vocab.unsqueeze(
-1), value=1)
p_copy_tgt_vocab = torch.matmul(p_copy_src_vocab, src_to_tgt_conversion_matrix_scatter).transpose(0, 1)
p = torch.add(p_vocab * p_gen, p_copy_tgt_vocab * p_copy)
log_probs = torch.log(p)
return log_probs
def pos_encode(self, embeddings, pos_enc_type, edge_path_seqs):
if pos_enc_type == 'tree':
return self.pos_encode_tree(embeddings, edge_path_seqs, self.d_model)
elif pos_enc_type == 'seq':
return self.pos_encode_seq(embeddings, self.d_model)
elif pos_enc_type is None:
raise ValueError("Positional encoding type not specified")
else:
raise ValueError('Unknown positional encoding type %s' % pos_enc_type)
def pos_encode_seq(self, embeddings, dmodel):
length = embeddings.size(0)
pos_encoder = torch.zeros(length, dmodel).type_as(embeddings)
pos = torch.arange(0, length, dtype=torch.float).type_as(embeddings).unsqueeze(1)
div_term = torch.exp(torch.arange(0, dmodel, 2).type_as(embeddings) * (-math.log(10000.0) / dmodel))
phase = pos * div_term
pos_encoder[:, 0::2] = torch.sin(phase)
pos_encoder[:, 1::2] = torch.cos(phase)
pos_encoder = pos_encoder.unsqueeze(0).transpose(0, 1)
embeddings_with_pos = embeddings + pos_encoder
return embeddings_with_pos
def pos_encode_tree(self, embeddings, edge_path_seqs, dmodel):
max_sentence_length = embeddings.size(0)
batch_size = embeddings.size(1)
max_depth = edge_path_seqs.size(2)
d_pos = dmodel // max_depth
pos = edge_path_seqs.unsqueeze(-1)
freq_index = torch.arange(0, d_pos, 2, device=embeddings.device, dtype=torch.float)
frequency = torch.exp(freq_index * (-math.log(10000.0) / d_pos))
phase = pos * frequency
phase = phase.view(max_sentence_length, batch_size, max_depth * d_pos // 2)
padding = (phase == 0)
pos_encoder = torch.zeros(max_sentence_length, batch_size, max_depth * d_pos, device=embeddings.device)
pos_encoder[:, :, 0::2] = torch.sin(phase)
pos_encoder[:, :, 1::2] = torch.cos(phase)
padding_replacement = 0.0
pos_encoder[:, :, 0::2].masked_fill_(padding, padding_replacement)
pos_encoder[:, :, 1::2].masked_fill_(padding, padding_replacement)
embeddings_with_pos = embeddings + pos_encoder
return embeddings_with_pos
| SmartDataAnalytics/codeCAI | nl2codemodel/src/nltocode/transformerinf.py | transformerinf.py | py | 11,778 | python | en | code | 4 | github-code | 36 |
70010914665 | import urllib.request
import ssl
import json
ssl._create_default_https_context = ssl._create_unverified_context
def search(keyword):
client_id = "BoqP7ttLY0wDhxvLzawS"
client_secret = "ztLCPvpLyO"
encText = urllib.parse.quote(keyword)
url = "https://openapi.naver.com/v1/search/blog?query=" + encText
request = urllib.request.Request(url)
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urllib.request.urlopen(request)
rescode = response.getcode()
if (rescode == 200):
response_body = response.read()
result = response_body.decode('utf-8')
result = json.loads(result)
return result['items']
else:
print("Error Code:" + rescode)
return None
result = search("라디오 스타")
print(result) | Kyeongrok/python_yla | com/week7/am/naver_openapi8.py | naver_openapi8.py | py | 862 | python | en | code | 1 | github-code | 36 |
31527895703 | import sys
sys.stdin = open('6.txt', 'r')
def btk(n, s):
global result
if s >= result:
return
if n >= 12:
result = min(result, s)
return
# 일 이용권
btk(n+1, s+(plan[n]*d1))
if plan[n]:
# 월 이용권
btk(n+1, s+m1)
# 3개월 이용권
btk(n+3, s+m3)
Test_case = int(input())
for t in range(Test_case):
d1, m1, m3, y1 = map(int, input().split())
plan = list(map(int, input().split()))
result = y1
btk(0, 0)
print(f'#{t+1} {result}')
| Ikthegreat/TIL | Algorithm/0331/Backtrack/6.py | 6.py | py | 543 | python | ko | code | 0 | github-code | 36 |
25285589404 | '''
Find out the relationship between Liquor Sales and Vehicle Accidents
'''
import sys
import math
import uuid
from schema import liquor_schema
from matplotlib import pyplot as plt
import pandas as pd
from pyspark.sql import SparkSession, types
from pyspark.sql import functions as F
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
cluster_seeds = ['199.60.17.32', '199.60.17.65']
spark = SparkSession.builder.appName('liquor vehicle').config('spark.cassandra.connection.host', ','.join(cluster_seeds)).getOrCreate()
spark.sparkContext.setLogLevel('WARN')
sc = spark.sparkContext
assert spark.version >= '2.4' # make sure we have Spark 2.4+
def main(liquor_sales_inputs, vehicle_accidents_inputs):
# process vehicle accidents dataset
va_df = spark.read.csv(vehicle_accidents_inputs, header=True)
va_df = va_df.select(F.to_date(F.split(va_df['Crash Date & Time'], ' ')[0], "MM/dd/yyyy").alias('Date'),\
'County', 'City', 'Drug/Alcohol Related')
va_df = va_df.withColumn('Year', F.year(va_df['Date']))
va_df = va_df.withColumn('Month', F.month(va_df['Date']))
va_df = va_df.where("Year <= 2018 and Year >= 2013")
va_df = va_df.where(va_df['Drug/Alcohol Related'].like('%Alcohol%'))
va_df = va_df.groupBy(['Year', 'Month']).agg(F.count("*").alias('total_number_of_accidents'))
# process liquor sales dataset
liquor_df = spark.read.csv(liquor_sales_inputs, schema=liquor_schema)
liquor_df = liquor_df.select(F.to_date(liquor_df['Date'], 'MM/dd/yyyy').alias('Date'), 'Category', 'Bottles Sold')
liquor_df = liquor_df.withColumn('Year', F.year(liquor_df['Date']))
liquor_df = liquor_df.withColumn('Month', F.month(liquor_df['Date']))
liquor_df = liquor_df.where("Year <= 2018 and Year >= 2013")
liquor_df = liquor_df.groupBy(['Year', "Month"]).sum('Bottles Sold')
# join two datasets
liquor_vehicle_df = liquor_df.join(va_df, ['Year', 'Month'])
liquor_vehicle_df = liquor_vehicle_df.withColumn('id', F.lit(str(uuid.uuid1())))
liquor_vehicle_df = liquor_vehicle_df.withColumnRenamed('Year', 'year').withColumnRenamed('Month', 'month')\
.withColumnRenamed('sum(Bottles Sold)', 'bottles_sold')
liquor_vehicle_df = liquor_vehicle_df.orderBy(['year', 'month'], ascending=True)
liquor_vehicle_df.show()
# liquor_vehicle_df.write.format("org.apache.spark.sql.cassandra") \
# .options(table="liquor_accidents", keyspace="qya23").mode('append').save()
# calculate correlation coefficient
six_values = liquor_vehicle_df.select(F.lit(1).alias('sum'), liquor_vehicle_df['bottles_sold'].alias('x'), liquor_vehicle_df['total_number_of_accidents'].alias('y'), \
(liquor_vehicle_df['bottles_sold']**2).alias('x^2'), (liquor_vehicle_df['total_number_of_accidents']**2).alias('y^2'), \
(liquor_vehicle_df['bottles_sold'] * liquor_vehicle_df['total_number_of_accidents']).alias('x*y'))
six_sums = six_values.select(F.sum('sum'), F.sum('x'), F.sum('y'), F.sum('x^2'), F.sum('y^2'), F.sum('x*y'))
params = six_sums.collect()[0]
r = (params['sum(sum)'] * params['sum(x*y)'] - params['sum(x)'] * params['sum(y)']) / ((math.sqrt(params['sum(sum)'] * params['sum(x^2)'] - params['sum(x)']**2)) * (math.sqrt(params['sum(sum)'] * params['sum(y^2)'] - params['sum(y)']**2)))
print('r^2 = {0:.6f}'.format(r**2))
if __name__ == '__main__':
liquor_sales_inputs = sys.argv[1]
vehicle_accidents_inputs = sys.argv[2]
main(liquor_sales_inputs, vehicle_accidents_inputs) | libou/Liquor-Sale-Analysis | liquor_vehicle.py | liquor_vehicle.py | py | 3,441 | python | en | code | 0 | github-code | 36 |
2496560289 | #!/usr/bin/env python
"""
photomongo - read and process all tweets possible
Usage:
photomongo [options] CONFIGFILE
Options:
-h --help Show this screen.
--pickle-to=<picklefile> file to save tweets.
--pickle-from=<picklefile> file to read tweets.
--max=<number> maximum number to process, primarily for debug
--since=<days> maximum number of days to get in the past
--progress-bar display the progress bar
"""
import sys
import logging
logging.basicConfig(
format = '%(asctime)s %(levelname)s %(module)s [%(funcName)s] %(message)s',
datefmt='%Y-%m-%d,%H:%M:%S',
level=logging.DEBUG,
handlers = [logging.FileHandler('photomongo.log')] )
log = logging.getLogger(__name__)
import os
from docopt import docopt
import configparser
import json
import searcher
from twitter import Twitter
from gmail import Gmail
import progress_bar
# to save/reload tweets use pickle
import pickle
# control logging level of modules
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("PIL").setLevel(logging.WARNING)
logging.getLogger("oauthlib").setLevel(logging.WARNING)
logging.getLogger("tweepy").setLevel(logging.WARNING)
logging.getLogger("requests_oauthlib").setLevel(logging.WARNING)
logging.getLogger("googleapiclient").setLevel(logging.WARNING)
if __name__=='__main__':
args = docopt(__doc__)
try:
pickleFromFile = args['--pickle-from']
except KeyError:
pickleFromFile = None
try:
pickleToFile = args['--pickle-to']
except KeyError:
pickleToFile = None
try:
maxCount = int(args['--max'])
except ( KeyError, TypeError ):
maxCount = None
try:
sinceDays = int(args['--since'])
except ( KeyError, TypeError):
sinceDays = None
try:
showProgressBar = args['--progress-bar']
except KeyError:
showProgressBar = False
log.debug('pickleToFile = ' + str(pickleToFile))
log.debug('pickleFromFile = ' + str(pickleFromFile))
# get the configuration file
conf_file = args['CONFIGFILE']
# read the config file and determine what to do
config = configparser.ConfigParser()
config.read(conf_file)
# check if gmail is configured
# if the gmail section is not present in the config file, then a Null
# Gmail handler will be created that does nothing.
try:
gmailconf = config['gmail']
except KeyError:
# gmail not configured
log.info('gmail not configured, emails will not be sent')
gmailconf = None
try:
gm = Gmail(gmailconf)
log.info('gmail configured')
except:
# gmail configuration error
log.error('gmail configuration error')
raise
# require a 'search' section in the config file know what to search fo
try:
searchconf = config['search']
except KeyError:
log.exception('Search configuration parameters not configured')
raise
# check if configured to write out save file
try:
results_save_file = searchconf['save results file']
log.info('Will save search results to: ' + results_save_file)
except KeyError:
results_save_file = None
log.info('No configured file for search results')
# require a twitter configuration unless reading from an external file
if pickleFromFile:
# read tweets from pickle file instead of from twitter api
with open(pickleFromFile,'rb') as f:
alltweets = pickle.load(f)
else:
# read the tweets from twitter api directly
try:
twitconfig = config['twitter']
except KeyError:
log.exception('Twitter not configured')
raise
twit = Twitter(twitconfig)
# get all the tweets
# alltweets will be a dict of lists
# each dict key is a followed twitter stream
alltweets = twit.getAllTweets(sinceDays = sinceDays)
#alltweets = twit.getAllTweets()
# save the tweets if needed
if pickleToFile:
# write the tweets to a picklefile
with open(pickleToFile,'wb') as f:
pickle.dump(alltweets,f)
# set up to search the tweets
tweetsearcher = searcher.TweetSearcher(searchconf)
# search all the tweets
# https://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
flatten = lambda l: [item for sublist in l for item in sublist]
# convert all tweets to a list if needed
try:
# assume a dictionary
alltweets = flatten( alltweets.values() )
except AttributeError:
# assume it's a list
pass
if not maxCount:
# if maxCount not set, use all tweets at max
totlen = len(alltweets)
elif len(alltweets) < maxCount:
# if fewer tweets found than max count, use that number
totlen = len(alltweets)
else:
# otherwise, process maxcCount at most
totlen = maxCount
searchresults = []
if alltweets:
for i,tweet in enumerate( alltweets ):
# this searches on tweet at a time
searchresults.extend( tweetsearcher.searchTweet(tweet) )
# count in progress_bar is i+1 because we start at zero and
# this should not be zero-based counter
if showProgressBar:
progress_bar.print_progress(i+1,totlen)
if i == totlen:
break
# send email if search results come back
if searchresults:
# format message to send
msg = ''
for sr in searchresults:
url = 'https://twitter.com/' +\
sr.reference.user.screen_name +\
'/status/' +\
sr.reference.id_str
msg += url + ' at ' + str(sr.match_loc) + '\n\n'
log.info(msg)
gm.create_and_send_message('photomongo results to review',\
msg)
else:
msg = 'Photomongo found no results in ' + str(totlen) + ' tweets.'
log.info(msg)
gm.create_and_send_message('photomongo no results',\
msg)
| anielsen001/photomongo | photomongo.py | photomongo.py | py | 6,387 | python | en | code | 1 | github-code | 36 |
4617186912 | # File: ExpressionTree.py
# Description: demonstrates knowledge on binary trees
# Student's Name: Jerry Che
# Student's UT EID: jc78222
# Partner's Name:
# Partner's UT EID:
# Course Name: CS 313E
# Unique Number: 86235
# Date Created: 08/02/18
# Date Last Modified: 08/06/18
operators=['+', '-', '*', '/']
pre = []
post = []
class Stack (object):
def __init__ (self):
self.stack = []
# add an item to the top of the stack
def push (self, item):
self.stack.append (item)
# remove an item from the top of the stack
def pop (self):
return self.stack.pop()
# check the item on the top of the stack
def peek (self):
return self.stack[-1]
# check if the stack is empty
def isEmpty (self):
return (len(self.stack) == 0)
# return the number of elements in the stack
def size (self):
return (len(self.stack))
# Create Binary Node class with data and children
class Node (object):
def __init__ (self, data):
self.data = data
self.lchild = None
self.rchild = None
# Create tree class with a root
class Tree (object):
def __init__ (self):
self.root = None
# Follows algorithm given to make a tree from the given string expression
def createTree (self, expr):
s1 = Stack()
expr = expr.split()
current = Node(None)
self.root = current
for val in expr:
if(val == '('):
current.lchild = Node(None)
s1.push(current)
current = current.lchild
elif(val in operators):
current.data = val
s1.push(current)
current.rchild = Node(None)
current = current.rchild
elif(val==')'):
if(s1.isEmpty() == False):
current = s1.pop()
else:
current.data = val
current = s1.pop()
# Helper function to evaluate, performs rudamentary mathematics
def operate (self, oper1, oper2, val):
if (val == "+"):
return oper1 + oper2
elif (val == "-"):
return oper1 - oper2
elif (val == "*"):
return oper1 * oper2
elif (val == "/"):
return oper1 / oper2
# Runs evaluation of the binary tree
def evaluate (self, aNode):
s2 = Stack()
expr = self.postOrder(self.root)
for val in expr:
if (val in operators):
oper2 = s2.pop()
oper1 = s2.pop()
s2.push (self.operate (oper1, oper2, val))
else:
s2.push (float(val))
return s2.pop()
# pre order traversal - center, left, right - returns array
def preOrder (self, aNode):
if (aNode != None):
pre.append(aNode.data)
self.preOrder (aNode.lchild)
self.preOrder (aNode.rchild)
return pre
# post order traversal - left, right, center - returns array
def postOrder (self, aNode):
if (aNode != None):
self.postOrder (aNode.lchild)
self.postOrder (aNode.rchild)
post.append(aNode.data)
return post
def main():
expressiontree=Tree()
expr_file = open ("expression.txt", "r")
expr = expr_file.readline()
expressiontree.createTree(str(expr))
result = expressiontree.evaluate(expressiontree.root)
print(str(expr), " = ", str(result))
expressiontree.preOrder(expressiontree.root)
print("Prefix Expression:", end=' ')
for value in pre:
print (value, end = " ")
print("\n"+ "Postfix Expression:", end=' ')
for value in post:
print(value,end = " ")
main() | jerry-che/Data_Structures_with_Python | ExpressionTree.py | ExpressionTree.py | py | 3,558 | python | en | code | 0 | github-code | 36 |
28691721043 | from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication, QThreadPool
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
#from .ui.upload import UploadDialog
from .module.workspace.login import LoginDialog
from .module.workspace.upload import UploadDialog
from .module.workspace.layer_umum import LayerUmum
import os.path
class Palapa:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'Palapa_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Palapa')
self.threadpool = QThreadPool()
self.check_worker = None
self.login = LoginDialog()
self.upload = UploadDialog()
self.umum = LayerUmum()
self.login.UserSignal.connect(self.openUpload)
self.login.UmumMasuk.connect(self.openLayer)
self.upload.UserLogout.connect(self.logout)
self.umum.UserLogout.connect(self.logout)
self.LoggedIn = False
# Check if plugin was started the first time in current QGIS session
# Must be set in initGui() to survive plugin reloads
self.first_start = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('Palapa', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
# Adds plugin icon to Plugins toolbar
self.iface.addToolBarIcon(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/Plugin-upload-Palapa/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Palapa'),
callback=self.run,
parent=self.iface.mainWindow())
# will be set False in run()
self.first_start = True
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Palapa'),
action)
self.iface.removeToolBarIcon(action)
def run(self):
"""Run method that performs all the real work"""
# Create the dialog with elements (after translation) and keep reference
# Only create GUI ONCE in callback, so that it will only load when the plugin is started
if self.first_start == True:
self.first_start = False
if self.LoggedIn == False:
self.login.show()
elif self.LoggedIn == "upload":
self.upload.show()
elif self.LoggedIn == "umum":
self.umum.show()
# # show the dialog
# # Run the dialog event loop
# result = self.login.exec_()
# # See if OK was pressed
# if result:
# # Do something useful here - delete the line containing pass and
# # substitute with your code.
# pass
def openLayer(self):
self.LoggedIn = "umum"
self.login.close()
self.umum.show()
self.umum.refresh_grid()
def openUpload(self,payload):
self.LoggedIn = "upload"
self.login.close()
self.upload.checkUser()
self.upload.show()
self.upload.UserParam(payload)
def logout(self):
self.LoggedIn = False
self.upload.close()
self.umum.close()
self.login.show() | sandynagara/Palapa | Palapa.py | Palapa.py | py | 5,460 | python | en | code | 5 | github-code | 36 |
43751035063 | from __future__ import unicode_literals
import os
from collections import OrderedDict, namedtuple
from operator import itemgetter
from pathlib import Path
import six
if six.PY2:
from collections import MutableSequence, Sized
elif six.PY3:
from collections.abc import MutableSequence, Sized
class Table(MutableSequence):
def __init__(self, fields, meta=None):
from rows.fields import slug
# TODO: should we really use OrderedDict here?
# TODO: should use slug on each field name automatically or inside each
# plugin?
self.fields = OrderedDict(
[
(slug(field_name), field_type)
for field_name, field_type in OrderedDict(fields).items()
]
)
# TODO: should be able to customize row return type (namedtuple, dict
# etc.)
self.Row = namedtuple("Row", self.field_names)
self._rows = []
self.meta = dict(meta) if meta is not None else {}
@classmethod
def copy(cls, table, data):
table = cls(fields=table.fields, meta=table.meta)
table._rows = list(data) # TODO: verify data?
return table
def head(self, n=10):
return Table.copy(self, self._rows[:n])
def tail(self, n=10):
return Table.copy(self, self._rows[-n:])
def _repr_html_(self):
import rows.plugins
convert_to_html = rows.plugins.html.export_to_html
total = len(self)
if total <= 20:
result = convert_to_html(self, caption=True)
else: # Show only head and tail
representation = Table(
fields=OrderedDict(
[
(field_name, rows.fields.TextField)
for field_name in self.field_names
]
),
meta={"name": self.name},
)
for row in self.head():
representation.append(
{
field_name: field_type.serialize(getattr(row, field_name))
for field_name, field_type in self.fields.items()
}
)
representation.append(
{field_name: "..." for field_name in self.field_names}
)
for row in self.tail():
representation.append(
{
field_name: field_type.serialize(getattr(row, field_name))
for field_name, field_type in self.fields.items()
}
)
result = convert_to_html(representation, caption=True).replace(
b"</caption>",
b" (showing 20 rows, out of "
+ str(total).encode("ascii")
+ b")</caption>",
)
return result.decode("utf-8")
@property
def field_names(self):
return list(self.fields.keys())
@property
def field_types(self):
return list(self.fields.values())
@property
def name(self):
"""Define table name based on its metadata (filename used on import)
If `filename` is not available, return `table1`.
"""
from rows.fields import slug
name = self.meta.get("name", None)
if name is not None:
return slug(name)
source = self.meta.get("source", None)
if source and source.uri:
return slug(os.path.splitext(Path(source.uri).name)[0])
return "table1"
def __repr__(self):
length = len(self._rows) if isinstance(self._rows, Sized) else "?"
imported = ""
if "imported_from" in self.meta:
imported = " (from {})".format(self.meta["imported_from"])
return "<rows.Table{} {} fields, {} rows>".format(
imported, len(self.fields), length
)
def _make_row(self, row):
# TODO: should be able to customize row type (namedtuple, dict etc.)
return [
field_type.deserialize(row.get(field_name, None))
for field_name, field_type in self.fields.items()
]
def append(self, row):
"""Add a row to the table. Should be a dict"""
self._rows.append(self._make_row(row))
def __len__(self):
return len(self._rows)
def __getitem__(self, key):
key_type = type(key)
if key_type == int:
return self.Row(*self._rows[key])
elif key_type == slice:
return Table.copy(self, self._rows[key])
elif key_type is six.text_type:
try:
field_index = self.field_names.index(key)
except ValueError:
raise KeyError(key)
# TODO: should change the line below to return a generator exp?
return [row[field_index] for row in self._rows]
else:
raise ValueError("Unsupported key type: {}".format(type(key).__name__))
def __setitem__(self, key, value):
key_type = type(key)
if key_type == int:
self._rows[key] = self._make_row(value)
elif key_type is six.text_type:
from rows import fields
values = list(value) # I'm not lazy, sorry
if len(values) != len(self):
raise ValueError(
"Values length ({}) should be the same as "
"Table length ({})".format(len(values), len(self))
)
field_name = fields.slug(key)
is_new_field = field_name not in self.field_names
field_type = fields.detect_types(
[field_name], [[value] for value in values]
)[field_name]
self.fields[field_name] = field_type
self.Row = namedtuple("Row", self.field_names)
if is_new_field:
for row, value in zip(self._rows, values):
row.append(field_type.deserialize(value))
else:
field_index = self.field_names.index(field_name)
for row, value in zip(self._rows, values):
row[field_index] = field_type.deserialize(value)
else:
raise ValueError("Unsupported key type: {}".format(type(key).__name__))
def __delitem__(self, key):
key_type = type(key)
if key_type == int:
del self._rows[key]
elif key_type is six.text_type:
try:
field_index = self.field_names.index(key)
except ValueError:
raise KeyError(key)
del self.fields[key]
self.Row = namedtuple("Row", self.field_names)
for row in self._rows:
row.pop(field_index)
else:
raise ValueError("Unsupported key type: {}".format(type(key).__name__))
def insert(self, index, row):
self._rows.insert(index, self._make_row(row))
def __radd__(self, other):
if other == 0:
return self
raise ValueError()
def __iadd__(self, other):
return self + other
def __add__(self, other):
if other == 0:
return self
if not isinstance(self, type(other)) or self.fields != other.fields:
raise ValueError("Tables have incompatible fields")
else:
table = Table(fields=self.fields)
table._rows = self._rows + other._rows
return table
def order_by(self, key):
# TODO: implement locale
# TODO: implement for more than one key
reverse = False
if key.startswith("-"):
key = key[1:]
reverse = True
field_names = self.field_names
if key not in field_names:
raise ValueError('Field "{}" does not exist'.format(key))
key_index = field_names.index(key)
self._rows.sort(key=itemgetter(key_index), reverse=reverse)
class FlexibleTable(Table):
def __init__(self, fields=None, meta=None):
if fields is None:
fields = {}
super(FlexibleTable, self).__init__(fields, meta)
def __getitem__(self, key):
if isinstance(key, int):
return self.Row(**self._rows[key])
elif isinstance(key, slice):
return [self.Row(**row) for row in self._rows[key]]
else:
raise ValueError("Unsupported key type: {}".format(type(key).__name__))
def _add_field(self, field_name, field_type):
self.fields[field_name] = field_type
self.Row = namedtuple("Row", self.field_names)
def _make_row(self, row):
from rows import fields
for field_name in row.keys():
if field_name not in self.field_names:
self._add_field(field_name, fields.identify_type(row[field_name]))
return {
field_name: field_type.deserialize(row.get(field_name, None))
for field_name, field_type in self.fields.items()
}
def insert(self, index, row):
self._rows.insert(index, self._make_row(row))
def __setitem__(self, key, value):
self._rows[key] = self._make_row(value)
def append(self, row):
"""Add a row to the table. Should be a dict"""
self._rows.append(self._make_row(row))
| turicas/rows | rows/table.py | table.py | py | 9,338 | python | en | code | 851 | github-code | 36 |
3723310513 | import random
class Solution:
def __init__(self, N, blacklist):
blacklist = set(blacklist)
self.length = N - len(blacklist)
self.remap = {}
remap_list = []
for num in blacklist:
if num < self.length:
remap_list.append(num)
j = 0
for i in range(self.length, N):
if i not in blacklist:
self.remap[remap_list[j]] = i
j += 1
def pick(self):
idx = random.randint(0, self.length-1)
return self.remap[idx] if idx in self.remap else idx
# Your Solution object will be instantiated and called as such:
# obj = Solution(N, blacklist)
# param_1 = obj.pick()
| sokazaki/leetcode_solutions | problems/0710_random_pick_with_blacklist.py | 0710_random_pick_with_blacklist.py | py | 718 | python | en | code | 1 | github-code | 36 |
18011299310 | activ# wrapping into a class.
from isolation import Board
from plotter import plotgrid
class movestore():
'''
Reverse Moves :
'''
def __init__(self, moveloc, level=6):
self.depthcheck = level
self.vectorlist = self.seeker(moveloc)
def dist(self, x, y):
'''
returns euclidean distance (int)
:param x: (int, int) move# 0
:param y: (int, int) move# 1
:return: int
'''
"return euclidean distance"
ans = (x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2
return ans
def movemark(self, moveslist):
'''
mark all moves from moveslist by adding vectors,
result is a set of algebraic offsets that
can be added to playerloc to get all possible moves
:param moveslist: [(int, int),...]
:return: {DISTANCE: [(int, int),..]}
'''
newmoves = moveslist
results = {}
vectors = [(-1, 2), (-1, -2), (1, 2), (1, -2), (2, 1), (2, -1), (-2, 1), (-2, -1)]
for trials in newmoves:
for rays in vectors:
# debug
# print(type(rays), rays, type(trials), trials, moveslist)
DIST = self.dist(trials, rays)
MOV = (rays[0] + trials[0], rays[1] + trials[1])
temp = {MOV, }
if DIST <= 98 and DIST > 0:
try:
temp = temp.union(results[DIST])
results[DIST] = temp
except KeyError:
results[DIST] = temp
return results
def flattendict(self, dicto):
'''
returns a flattened list
:param dicto: {:}
:return: []
'''
listres = []
for keys in dicto.keys():
for items in dicto[keys]:
listres.append(items)
return listres
def seeker(self, moveloc):
'''
:param moveloc: [], list of player location(s)
:return: {:}, dictionary of alebgraic offsets and depths
'''
"take current location as 'moveloc' and try adding all move vectors"
"stores results into a dictionary of openmoves at corresp. depthlevels"
tempargs = moveloc
limit = max = self.depthcheck
vectordict = {}
while limit > 0:
dict = self.movemark(tempargs)
vectordict[max - limit + 1] = tempargs = tuple(self.flattendict(dict))
limit -= 1
return vectordict
def display(self):
"display"
for keys in self.vectorlist:
print(keys, str(len(self.vectorlist[keys])) + str(" positions"), end='\n')
def repr_val(self, depth):
#return entry at correct depth
return self.vectorlist[depth]
def regrid(self, x, y):
#algebra
return (x[0] + y[0], x[1] + y[1])
def eval(self, target, checklist, depth):
openset = set()
for items in self.repr_val(depth):
# get pos to add items in vectorlist at chosen depth
temp = self.regrid(target, items)
# print(temp)
if temp in checklist: # if result is in legals, move is valid
openset.add(temp)
# print(openset)
#return the fraction of the board that is accessible to the player
return openset
if __name__ == "__main__":
#testing the class
vector00 = [(0, 0)]
MOVX = movestore(vector00)
gametry = Board('0', '1')
locinit = gametry.get_blank_spaces()
#dummy moves
gametry.apply_move((3, 3))
gametry.apply_move((4, 4))
playerloc = gametry.get_player_location('0')
legals = gametry.get_blank_spaces()
maxdepth = 6
#printing figures
for xstep in range(1, maxdepth + 1):
#Print Accessibility
listthing = MOVX.eval(playerloc, legals, xstep)
pplot = plotgrid(playerloc, locinit, legals, listthing, xstep)
pplot.start(maxdepth, xstep ) | learningwithmachines/knights-isolation-tree-search | extras/territory_eval.py | territory_eval.py | py | 3,995 | python | en | code | 0 | github-code | 36 |
15576814335 | from collections import deque
T = int(input())
for _ in range(T):
N, M = map(int, input().split())
queue = deque(map(int, input().split()))
queue = deque([(i, idx) for idx, i in enumerate(queue)])
count = 0
while True:
if queue[0][0] == max(queue, key=lambda x: x[0])[0]:
count += 1
if queue[0][1] == M:
print(count)
break
else:
queue.popleft()
else:
queue.append(queue.popleft()) | HelloWook/AlgorithmStudy | 백준/Silver/1966. 프린터 큐/프린터 큐.py | 프린터 큐.py | py | 532 | python | en | code | 0 | github-code | 36 |
5063097451 | from intervaltree import Interval as interval, IntervalTree
class OutputExporter(object):
def __init__( self, mz_prec ):
self.mz_prec = mz_prec
self.BG = None
def iBG(self, node_type):
'''Iterate over all nodes of a given type in the whole big graph **BG** of solved subproblems.
node_type - either
'''
assert node_type in ('G','I','M'), "specified wrong type of node. Was %s. Should be G, I, M" % node_type
for r in self.BG:
SG = r['SG']
for N in SG:
N_D = SG.node[N]
if N_D['type'] == node_type:
yield N, N_D
def add_mz_ranges_to_results(self, masstodon_res):
'''Add information about the m/z ranges for the G nodes.'''
self.BG = masstodon_res
prec = self.mz_prec
I_tree = IntervalTree(interval(I_D['mz']-prec,I_D['mz']+prec) for _,I_D in self.iBG('I'))
I_tree.split_overlaps()
for G, G_D in self.iBG('G'):
min_mz, max_mz = G_D['min_mz'], G_D['max_mz']
if min_mz == max_mz:
## This copes with stupid border issues.
## iso_intervals = I_tree[II(min_mz-prec/10., max_mz+prec/10.)]
# set digits to mz_prec/10
iso_intervals = I_tree[min_mz]
else:
iso_intervals = I_tree[interval(min_mz, max_mz)]
if len(iso_intervals) == 1:
mz = iso_intervals.pop()
G_D['mz_L'] = mz.begin
G_D['mz_R'] = mz.end
else:
G_D['mz_L'] = G_D['mz_R'] = None
def iter_G(self):
'''Iterate over information on experimental groupings G.'''
for cluster_id, r in enumerate(self.BG):
SG = r['SG']
for G in SG:
if SG.node[G]['type'] == 'G':
G_D = SG.node[G]
if G_D['mz_L'] and G_D['mz_R']:
yield { 'mz_L': G_D['mz_L'],
'mz_R': G_D['mz_R'],
'tot_estimate': G_D['estimate'],
'tot_intensity':G_D['intensity'],
'cluster_id': cluster_id,
'G_tag': G }
def iter_MIG(self):
'''Iterate over information on pathways MIG.'''
for cluster_id, r in enumerate(self.BG):
SG = r['SG']
for M in SG:
if SG.node[M]['type'] == 'M':
M_D = SG.node[M]
for I in SG[M]:
I_D = SG.node[I]
for G in SG[I]:
if SG.node[G]['type'] == 'G':
G_D = SG.node[G]
yield { 'formula': M_D['formula'],
'molType': M_D['molType'],
'q': M_D['q'],
'g': M_D['g'],
'mz_L': G_D['mz_L'],
'mz_R': G_D['mz_R'],
'estimate': SG.edge[G][I]['estimate'],
'tot_estimate_tmp': G_D['estimate'], # these are repeating
'tot_intensity_tmp':G_D['intensity'], # these are repeating
'cluster_id': cluster_id,
'G_tag': G,
'I_tag': I,
'M_tag': M }
def make_data_for_spectrum_plot(self):
return {'G_nodes_data': list(self.iter_G()),
'MIG_paths_data': list(self.iter_MIG()) }
| MatteoLacki/MassTodonPy | MassTodonPy/Outputting/export_outputs.py | export_outputs.py | py | 3,880 | python | en | code | 1 | github-code | 36 |
6765175956 | def soma(n):
d = 1
h = 0
while d <= n:
x = 1 / d
h += x
d += 1
return h
def main():
num = int(input())
resultado = soma(num)
print(resultado)
if __name__ == "__main__":
main() | Larissapy/aula-remota-7 | t2_q3.py | t2_q3.py | py | 236 | python | pt | code | 0 | github-code | 36 |
14644417459 | from fastapi import FastAPI, Response
from starlette.middleware.cors import CORSMiddleware
from routes import jobs
from logger.log_info import logger
app = FastAPI()
@app.middleware('http')
async def log_request(request, call_next):
logger.info(f'{request.method} {request.url}')
response = await call_next(request)
logger.info(f'Status code: {response.status_code}')
body = b""
async for chunk in response.body_iterator:
body += chunk
return Response(
content=body,
status_code=response.status_code,
headers=dict(response.headers),
media_type=response.media_type
)
# app.add_middleware(
# CORSMiddleware,
# allow_origins=["*"],
# allow_credentials=True,
# allow_methods=["*"],
# allow_headers=["*"],
# )
app.include_router(jobs.router)
| Baktybek0312/Log_BloG | main.py | main.py | py | 831 | python | en | code | 0 | github-code | 36 |
5897947024 | def turnOver(number):
'''
Returns a list of all digits of this number in reverse order
:return: list of digits
NOTE: output 0 if the number is not a natural number
'''
if number < 1 or type(number) is not int:
return 0
digits = []
remainder = number
while remainder > 0:
digits.append(remainder % 10)
remainder //= 10
return digits
if __name__ == '__main__':
assert turnOver(123) == [3, 2, 1]
assert turnOver(12.3) == 0
assert turnOver(-123) == 0
assert turnOver(0) == 0
| Shvoeva/Exercise9 | 1.ListFromAmong/main.py | main.py | py | 555 | python | en | code | 0 | github-code | 36 |
3585569128 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
master_list = list()
with open("../Data_sets/corpus_2_unigram_sorted.txt") as fileobj:
for line in fileobj:
if line == "" or line == " " or line == "\n":
continue
l = line.strip(' ')
l = l.strip('\n')
line_split = l.split(' ')
master_list.append((line_split[0],int(line_split[1])))
index = list()
freq = list()
for i in range(10000,11000):
freq.append(master_list[i][1])
index.append(i)
df = pd.DataFrame({'10000-11000': freq}, index=index)
lines = df.plot.line()
lines.set_title('Corpus 2 Zipfs Law')
lines.set_xlabel('Rank')
lines.set_ylabel('Frequency')
plt.show() | tarun0409/nlp-assignment-3-4 | zipfs.py | zipfs.py | py | 705 | python | en | code | 0 | github-code | 36 |
71050589225 | import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
st.title("💬 Prompt Critic")
openai_api_key = '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'
def prompt_critic(prompts):
# Instantiate LLM model
llm = OpenAI(model_name="gpt-4", openai_api_key=openai_api_key)
# Prompt
template = "As an expert prompt enigneer, critique this '{prompts}'. Provide feedback and areas of improvement. At the end, please improve it"
prompt = PromptTemplate(input_variables=["prompts"], template=template)
prompt_query = prompt.format(prompts=prompts)
# Run LLM model
response = llm(prompt_query)
# Print results
return st.info(response)
with st.form("myform"):
topic_text = st.text_input("Enter prompt:", "")
submitted = st.form_submit_button("Submit")
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
elif submitted:
prompt_critic(topic_text)
| shihwesley/CMPE297 | Prompt Engineering assignment/pages/1_Prompt Critic.py | 1_Prompt Critic.py | py | 990 | python | en | code | 0 | github-code | 36 |
16127616120 | import os
import json
from keras.models import load_model
import pandas as pd
import pickle
import numpy as np
import shutil
from keras.preprocessing import image
from tqdm.notebook import tqdm
from PIL import ImageFile
BASE_MODEL_PATH = os.path.join(os.getcwd(),"model")
PICKLE_DIR = os.path.join(os.getcwd(),"pickle_files")
JSON_DIR = os.path.join(os.getcwd(),"json_files")
if not os.path.exists(JSON_DIR):
os.makedirs(JSON_DIR)
BEST_MODEL = os.path.join(BASE_MODEL_PATH,"self_trained","distracted-23-1.00.hdf5")
model = load_model(BEST_MODEL)
with open(os.path.join(PICKLE_DIR,"labels_list.pkl"),"rb") as handle:
labels_id = pickle.load(handle)
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(128, 128))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
ImageFile.LOAD_TRUNCATED_IMAGES = True
# test_tensors = paths_to_tensor(data_test.iloc[:,0]).astype('float32')/255 - 0.5
# image_tensor = paths_to_tensor(image_path).astype('float32')/255 - 0.5
def predict_result(image_tensor):
ypred_test = model.predict(image_tensor,verbose=1)
ypred_class = np.argmax(ypred_test,axis=1)
print(ypred_class)
id_labels = dict()
for class_name,idx in labels_id.items():
id_labels[idx] = class_name
ypred_class = int(ypred_class)
print(id_labels[ypred_class])
#to create a human readable and understandable class_name
class_name = dict()
class_name["c0"] = "SAFE_DRIVING"
class_name["c1"] = "TEXTING_RIGHT"
class_name["c2"] = "TALKING_PHONE_RIGHT"
class_name["c3"] = "TEXTING_LEFT"
class_name["c4"] = "TALKING_PHONE_LEFT"
class_name["c5"] = "OPERATING_RADIO"
class_name["c6"] = "DRINKING"
class_name["c7"] = "REACHING_BEHIND"
class_name["c8"] = "HAIR_AND_MAKEUP"
class_name["c9"] = "TALKING_TO_PASSENGER"
with open(os.path.join(JSON_DIR,'class_name_map.json'),'w') as secret_input:
json.dump(class_name,secret_input,indent=4,sort_keys=True)
with open(os.path.join(JSON_DIR,'class_name_map.json')) as secret_input:
info = json.load(secret_input)
label = info[id_labels[ypred_class]]
print(label)
return label
| Abhinav1004/Distracted-Driver-Detection | demo_on_video/driver_prediction.py | driver_prediction.py | py | 2,607 | python | en | code | 37 | github-code | 36 |
2847508443 |
# Qus:https://leetcode.com/problems/combination-sum/
def solve(candidates,target,result,start=0,out=[]):
#if target becomes less then 0 means we can't add more number /can't add new number to creat a new
#combination
if(target<0):
return
#if target==0 means this is a valid combination
if(target==0 ):
result.append(out)
return
#we are looping through candidate array
#if target is >=then 0 then more combinations are possible
for i in range(start,len(candidates)):
if(target-candidates[i]<0):
break
#this will see any possible combination
solve(candidates,target-candidates[i],result,i,out+[candidates[i]])
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
#we are sorting array to make qustion more efficient and easy
candidates.sort()
#this is the result array of array which contains unique combinations to make a target
result=[]
#this function will populate result with unique combination sum array
solve(candidates,target,result)
return result | mohitsinghnegi1/CodingQuestions | leetcoding qus/Combination_sum.py | Combination_sum.py | py | 1,290 | python | en | code | 2 | github-code | 36 |
25376871762 | from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import classification_report
import numpy as np
def get_cv_metrics(text_clf, train_data, train_class, k_split):
accuracy_scores = cross_val_score(text_clf, # steps to convert raw messages into models
train_data, # training data
train_class, # training labels
cv=k_split, # split data randomly into 10 parts: 9 for training, 1 for scoring
scoring='accuracy', # which scoring metric?
n_jobs=-1, # -1 = use all cores = faster
)
cv_predicted = cross_val_predict(text_clf,
train_data,
train_class,
cv=k_split)
return np.mean(accuracy_scores), classification_report(train_class, cv_predicted)
| abelsaug/EmpathyPrediction | CV_metrics.py | CV_metrics.py | py | 1,078 | python | en | code | 0 | github-code | 36 |
14719000244 | from multiprocessing.sharedctypes import Value
from xmlrpc.client import Boolean
from source.dataloaders.data_loaders_factory import DataLoaderFactory
import torch
import torchaudio
import os
import numpy as np
import csv
from source.dataloaders.project_2_dataset import Project_2_Dataset
from source.utils.config_manager import ConfigManager
UNKNOWN_DIRS = ["bed", "bird", "cat", "dog", "eight", "five", "four", "happy", "house", "marvin", "nine", "one", "seven", "sheila", "six", "three", "tree", "two", "wow", "zero"]
class OneVsAllDataLoadersFactory(DataLoaderFactory):
def __init__(self, dataset_path, transform_train: torch.nn.Sequential = None, transform_test: torch.nn.Sequential = None, one='silence',
from_file_path=None, labels=None, no_workers=4, no_workers_eden=16):
super().__init__(dataset_path)
self.train_transformer = transform_train
self.test_transfomer = transform_test
self.one = one
self.from_file_path = from_file_path
self.labels = labels
if one == 'silence':
self.with_silence = 'extra'
self.labels = {'silence' : 0, 'silence_extra': 0, 'unknown': 1 }
elif one == 'unknown':
self.with_silence = False
self.labels = {'unknown': 0, 'yes': 1, 'no': 1, 'up': 1, 'down': 1, 'left': 1, 'right': 1,
'on': 1, 'off': 1, 'stop': 1, 'go': 1, 'silence': 1, 'silence_extra': 1}
else:
print("one should be one of ['silence', 'unknown']")
raise ValueError()
if ConfigManager.is_eden():
self.no_workers = no_workers_eden
else:
self.no_workers = no_workers
def __str__(self):
return f'Silence vs. speech: Train_transformer:{self.train_transformer.__str__()}; Silence vs. speech: Test_transformer:{self.test_transfomer.__str__()}'.format(self=self)
def get_train_loader(self, batch_size: int):
self.__load_train_data(
self.dataset_path, self.train_transformer)
sampler = self.__get_sampler_to_balance_classes(self.train_ds._walker)
return torch.utils.data.DataLoader(self.train_ds, batch_size, shuffle=False, drop_last=True, sampler = sampler, num_workers=self.no_workers)
def get_train_valid_loader(self, batch_size: int):
raise NotImplemented()
def get_valid_loader(self, batch_size: int):
self.__load_valid_data(
self.dataset_path, self.test_transfomer)
return torch.utils.data.DataLoader(self.valid_ds, batch_size, shuffle=False, drop_last=True, num_workers=self.no_workers)
def get_test_loader(self, batch_size: int):
self.__load_test_data(
self.dataset_path, self.test_transfomer)
return torch.utils.data.DataLoader(self.test_ds, batch_size, shuffle=False, drop_last=False, num_workers=self.no_workers)
def __load_train_data(self, data_dir: str, transform_train: torch.nn.Sequential):
self.train_ds = Project_2_Dataset(
with_silence=self.with_silence, with_unknown=True, root=data_dir, subset='training', transform=transform_train, labels=self.labels)
def __load_valid_data(self, data_dir: str, transform_valid: torch.nn.Sequential):
self.valid_ds = Project_2_Dataset(
with_silence=self.with_silence, with_unknown=True, root=data_dir, subset='validation', transform=transform_valid, labels=self.labels)
def __load_test_data(self, data_dir: str, transform_test: torch.nn.Sequential):
self.test_ds = Project_2_Dataset(
True, True, data_dir, 'testing', transform=transform_test, from_file_path=self.from_file_path)
def __get_sampler_to_balance_classes(self, samples_filenames):
if self.one == 'silence':
zeros_for_one_class = [0 if 'silence' in x else 1 for x in samples_filenames]
else:
zeros_for_one_class = [0 if any(unknown in x for unknown in UNKNOWN_DIRS) else 1 for x in samples_filenames]
samples_count = len(samples_filenames)
rest_count = np.count_nonzero(zeros_for_one_class)
one_class_count = samples_count - rest_count
class_sample_count = [one_class_count, rest_count]
weights = 1 / torch.Tensor(class_sample_count)
weights = weights.double()
sample_weights = weights[zeros_for_one_class]
sampler = torch.utils.data.sampler.WeightedRandomSampler(sample_weights, len(sample_weights))
return sampler
| Shaveek23/cifar10-computer_vision | source/dataloaders/onevsall_dataloadersfactory.py | onevsall_dataloadersfactory.py | py | 4,491 | python | en | code | 0 | github-code | 36 |
30490693575 | # encoding: utf-8
from paddlelite.lite import *
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
from time import time
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from PIL import ImageEnhance
class PPYOLO_Detector(object):
def __init__(self, nb_path=None, # nb路径
label_list=None, # 类别list
input_size=[320, 320], # 输入图像大小
img_means=[0., 0., 0.], # 图片归一化均值
img_stds=[0., 0., 0.], # 图片归一化方差
threshold=0.1, # 预测阈值
num_thread=1, # ARM CPU工作线程数
):
# 验证必要的参数格式
assert nb_path is not None, \
"Please make sure the model_nb_path has inputed!(now, nb_path is None.)"
assert len(input_size) == 2, \
"Please make sure the input_shape length is 2, but now its length is {0}".format(len(input_size))
assert len(img_means) == 3, \
"Please make sure the image_means shape is [3], but now get image_means' shape is [{0}]".format(
len(img_means))
assert len(img_stds) == 3, \
"Please make sure the image_stds shape is [3], but now get image_stds' shape is [{0}]".format(len(img_stds))
assert len([i for i in img_stds if i <= 0]) < 1, \
"Please make sure the image_stds data is more than 0., but now get image_stds' data exists less than or equal 0."
assert threshold > 0. and threshold < 1., \
"Please make sure the threshold value > 0. and < 1., but now get its value is {0}".format(threshold)
assert num_thread > 0 and num_thread <= 4, \
"Please make sure the num_thread value > 1 and <= 4., but now get its value is {0}".format(num_thread)
# 模型nb文件路径
self.model_path = nb_path
# ARM CPU工作线程数
self.num_thread = num_thread
# 预测显示阈值
self.threshold = threshold
# 预测输入图像大小
self.input_size = input_size
# 图片归一化参数
# 均值
self.img_means = img_means
# 方差
self.img_stds = img_stds
# 预测类别list
self.label_list = label_list
# 预测类别数
self.num_class = len(label_list) if (label_list is not None) and isinstance(label_list, list) else 1
# 类别框颜色map
self.box_color_map = self.random_colormap()
# 记录模型加载参数的开始时间
self.prepare_time = self.runtime()
# 配置预测
self.config = MobileConfig()
# 设置模型路径
self.config.set_model_from_file(nb_path)
# 设置线程数
self.config.set_threads(num_thread)
# 构建预测器
self.predictor = create_paddle_predictor(self.config)
# 模型加载参数的总时间花销
self.prepare_time = self.runtime() - self.prepare_time
print("The Prepare Model Has Cost: {0:.4f} s".format(self.prepare_time))
def get_input_img(self, input_img):
'''输入预测图片
input_img: 图片路径或者np.ndarray图像数据 - [h, w, c]
'''
assert isinstance(input_img, str) or isinstance(input_img, np.ndarray), \
"Please enter input is Image Path or numpy.ndarray, but get ({0}) ".format(input_img)
# 装载图像到预测器上的开始时间
self.load_img_time = self.runtime()
if isinstance(input_img, str):
# 读取图片路径下的图像数据
self.input_img = Image.open(input_img)
elif isinstance(input_img, np.ndarray):
# 读取ndarray数据下的图像数据
self.input_img = Image.fromarray(input_img)
# 获取图片原始高宽 : h,w
self.input_shape = np.asarray(self.input_img).shape[:-1]
# 重置图片大小为指定的输入大小
input_data = self.input_img.resize(self.input_size, Image.BILINEAR)
# 转制图像shape为预测指定shape
input_data = np.array(input_data).transpose(2, 0, 1).reshape([1, 3] + self.input_size).astype('float32')
# 将图像数据进行归一化
input_data = self.normlize(input_data)
self.scale_factor = [1., 1.] # [1., 1.]
# 配置输入tensor
# 输入[[shape, shape]]的图片大小
self.input_tensor0 = self.predictor.get_input(0)
self.input_tensor0.from_numpy(np.asarray([self.input_size], dtype=np.int32))
# 输入[1, 3, shape, shape]的归一化后的图片数据
self.input_tensor1 = self.predictor.get_input(1)
self.input_tensor1.from_numpy(input_data)
# 输入模型处理图像大小与实际图像大小的比例
self.input_tensor2 = self.predictor.get_input(2)
self.input_tensor2.from_numpy(np.asarray(self.scale_factor, dtype=np.int32))
# 装载图像到预测器上的总时间花销
self.load_img_time = self.runtime() - self.load_img_time
# print("The Load Image Has Cost: {0:.4f} s".format(self.load_img_time))
def get_output_img(self):
'''获取输出标注图片
num_bbox: 最大标注个数
'''
# 预测器开始预测的时间
self.predict_time = self.runtime()
# 根据get_input_img的图像进行预测
self.predictor.run()
# 获取输出预测bbox结果
self.output_tensor = self.predictor.get_output(0)
# 转化为numpy格式
output_bboxes = self.output_tensor.numpy()
# 根据阈值进行筛选,大于等于阈值的保留
output_bboxes = output_bboxes[output_bboxes[:, 1] >= self.threshold]
# 根据预测结果进行框绘制,返回绘制完成的图片
# self.output_img = self.load_bbox(output_bboxes, num_bbox)
# 预测器预测的总时间花销
self.predict_time = self.runtime() - self.predict_time
print("The Predict Image Has Cost: {0:.4f} s".format(self.predict_time))
# return self.output_img
return output_bboxes
def normlize(self, input_img):
'''数据归一化
input_img: 图像数据--numpy.ndarray
'''
# 对RGB通道进行均值-方差的归一化
input_img[0, 0] = (input_img[0, 0] / 255. - self.img_means[0]) / self.img_stds[0]
input_img[0, 1] = (input_img[0, 1] / 255. - self.img_means[1]) / self.img_stds[1]
input_img[0, 2] = (input_img[0, 2] / 255. - self.img_means[2]) / self.img_stds[2]
return input_img
def load_bbox(self, input_bboxs, num_bbox):
'''根据预测框在原始图片上绘制框体,并标注
input_bboxs: 预测框
num_bbox: 允许的标注个数
'''
# 创建间绘图参数:[cls_id, score, x1, y1, x2, y2]
self.draw_bboxs = [0] * 6
# 绘图器 -- 根据get_input_img的输入图像
draw = ImageDraw.Draw(self.input_img)
# 根据最大标注个数进行实际标注个数的确定
# input_bboxs.shape[0]: 表示预测到的有效框个数
if len(input_bboxs) != 0: # 存在有效框时
num_bbox = input_bboxs.shape[0] if num_bbox > input_bboxs.shape[0] else num_bbox
else:
num_bbox = 0 # 没有有效框,直接不标注
# 遍历框体,并进行标注
for i in range(num_bbox):
# 类别信息
self.draw_bboxs[0] = input_bboxs[i][0]
# 类别得分
self.draw_bboxs[1] = input_bboxs[i][1]
print(self.label_list[int(self.draw_bboxs[0])], '- score{', self.draw_bboxs[1], "} : ", input_bboxs[i][2],
input_bboxs[i][3], input_bboxs[i][4], input_bboxs[i][5])
# 框体左上角坐标
# max(min(input_bboxs[i][2] / self.input_size[0], 1.), 0.):保证当前预测坐标始终在图像内(比例,0.-1.)
# max(min(input_bboxs[i][2] / self.input_size[0], 1.), 0.) * self.input_shape[1]: 直接预测得到的坐标
# min(max(min(input_bboxs[i][2] / self.input_size[0], 1.), 0.) * self.input_shape[1], self.input_shape[1]):保证坐标在图像内(h, w)
self.draw_bboxs[2] = min(max(min(input_bboxs[i][2] / self.input_size[0], 1.), 0.) * self.input_shape[1],
self.input_shape[1])
self.draw_bboxs[3] = min(max(min(input_bboxs[i][3] / self.input_size[1], 1.), 0.) * self.input_shape[0],
self.input_shape[0])
# 框体右下角坐标
self.draw_bboxs[4] = min(max(min(input_bboxs[i][4] / self.input_size[0], 1.), 0.) * self.input_shape[1],
self.input_shape[1])
self.draw_bboxs[5] = min(max(min(input_bboxs[i][5] / self.input_size[1], 1.), 0.) * self.input_shape[0],
self.input_shape[0])
# print(self.draw_bboxs[2], self.draw_bboxs[3], self.draw_bboxs[4], self.draw_bboxs[5])
# 绘制框体
# self.box_color_map[int(self.draw_bboxs[i][0])]: 对应类别的框颜色
draw.rectangle(((self.draw_bboxs[2], self.draw_bboxs[3]),
(self.draw_bboxs[4], self.draw_bboxs[5])),
outline=tuple(self.box_color_map[int(self.draw_bboxs[0])]),
width=2)
# 框体位置写上类别和得分信息
draw.text((self.draw_bboxs[2], self.draw_bboxs[3] + 1),
"{0}:{1:.4f}".format(self.label_list[int(self.draw_bboxs[0])], self.draw_bboxs[1]),
tuple(self.box_color_map[int(self.draw_bboxs[0])]))
# 返回标注好的图像数据
return np.asarray(self.input_img)
def random_colormap(self):
'''获取与类别数量等量的color_map
'''
np.random.seed(2021)
color_map = [[np.random.randint(20, 255),
np.random.randint(64, 200),
np.random.randint(128, 255)]
for i in range(self.num_class)]
return color_map
def runtime(self):
'''返回当前计时
'''
return time()
| Feng1909/PPYOLO-Tiny | Tiny.py | Tiny.py | py | 10,384 | python | en | code | 2 | github-code | 36 |
29986560679 | #!/usr/bin/env python
import glob
import os
def main():
paths = glob.glob("./data/mags/*.fa")
all_mags = ""
for p in paths:
filename = os.path.basename(p)
header = ">" + os.path.splitext(filename)[0]
print(filename, header)
with open(p, "r") as f:
sequence = ""
for line in f:
if line[0] != ">":
sequence += line.strip()
entry = header + "\n" + sequence + "\n"
all_mags += entry
# with open("./data/mags_concat/"+filename, "w") as f:
# f.write(entry)
with open("./data/mags_concat.fa", "w") as f:
f.write(all_mags)
if __name__ == '__main__':
main()
| LuiggiTenorioK/minhash-benchmark | src/concat_mags.py | concat_mags.py | py | 715 | python | en | code | 0 | github-code | 36 |
14267295600 | from typing import Any
from data_structures.stacks.stack import Stack, PopEmpty
class StackWithBottom(Stack):
def __init__(self):
self._bottom = None
super(StackWithBottom, self).__init__()
def push(self, value: Any):
super(StackWithBottom, self).push(value)
if not self._bottom:
self._bottom = self._top
def pop(self):
value = super(StackWithBottom, self).pop()
if self.is_empty():
self._bottom = None
return value
def pop_bottom(self):
if not self._bottom:
raise PopEmpty()
value = self._bottom.value
self._bottom = self._bottom.before
return value
@property
def bottom(self):
return self._bottom
class StackOfStacks(object):
def __init__(self, limit):
self._limit = limit
self.stacks = list()
self.stacks.append(StackWithBottom())
def push(self, value):
if not self.stacks:
self.stacks.append(StackWithBottom())
if len(self.stacks[-1]) >= self._limit:
self.stacks.append(StackWithBottom())
self.stacks[-1].push(value)
def pop(self) -> Any:
if not self.stacks:
raise PopEmpty
value = self.stacks[-1].pop()
if self.stacks[-1].is_empty():
# Remove empty stack
self.stacks.pop()
return value
def pop_at(self, index):
if len(self.stacks) < index:
raise Exception("No such stack")
stack = self.stacks[index]
value = stack.pop()
if not stack.bottom:
del self.stacks[index]
else:
self.left_shift(index)
return value
def left_shift(self, index):
if len(self.stacks) > index + 1:
value = self.stacks[index + 1].pop_bottom()
self.stacks[index].push(value)
index += 1
if not self.stacks[index].bottom:
del self.stacks[index]
else:
self.left_shift(index)
| goncalossantos/CtCI | chapter_3/stack_of_stacks.py | stack_of_stacks.py | py | 2,062 | python | en | code | 0 | github-code | 36 |
20456498612 | import discum
import json
import os.path
import random
import time
from rich import print
def load_from_data_else_ask(field, message):
global data
if data is not None and field in data.keys():
return data[field]
return input(message)
data = None
if os.path.exists("data.json"):
data = json.loads(open("data.json").read())
if data is not None and "token" in data.keys():
token = data["token"]
else:
token = input('ur token: ')
bot = discum.Client(token=token, log=False)
memberz = []
guildz = load_from_data_else_ask("guildid", "Please input guild ID: ")
if data is not None and "channelids" in data.keys():
channelz = data["channelids"]
else:
channelz = [load_from_data_else_ask("channelid", "Please input a channel ID in that guild: ")]
if data is not None and "messages" in data.keys():
messagz = data["messages"]
else:
messagz = [load_from_data_else_ask("message", "Please input your message: ")]
timez = load_from_data_else_ask("time", "How long between DMs: ")
if data is not None and "ignoreRoles" in data.keys():
ignores = data["ignoreRoles"]
else:
ignores = []
@bot.gateway.command
def memberTest(resp):
if resp.event.ready_supplemental:
for channel in channelz:
bot.gateway.fetchMembers(guildz, channel)
if bot.gateway.finishedMemberFetching(guildz):
bot.gateway.removeCommand(memberTest)
bot.gateway.close()
bot.gateway.run()
badMemberz = set()
print("Getting members not to message")
for role in ignores:
for mem in bot.getRoleMemberIDs(guildz, role).json():
badMemberz.add(mem)
print(badMemberz)
print("Starting add members.")
for memberID in bot.gateway.session.guild(guildz).members:
if memberID in badMemberz:
continue
memberz.append(memberID)
print("Starting to DM.")
for x in memberz:
try:
rand = random.randint(0, 20)
if rand == 20:
print(f'Sleeping for 45 seconds to prevent rate-limiting.')
time.sleep(45)
print(f'Done sleeping!')
print(f"Preparing to DM {x}.")
time.sleep(int(timez))
newDM = bot.createDM([f"{x}"]).json()["id"]
bot.sendMessage(newDM,
f"{random.choice(messagz)} DM bot by https://github.com/Apophis52/Python-Mass-DM-Selfbot/")
print(f'DMed {x}.')
except Exception as E:
print(E)
print(f'Couldn\'t DM {x}.')
| Apophis52/Python-Mass-DM-Selfbot | index.py | index.py | py | 2,499 | python | en | code | 21 | github-code | 36 |
7435604725 | with open("prog.txt", "r") as file:
lines = file.readlines()
file.close
def Syntaxe():
return print("SYNTAXE INVALIDE")
def variable(name, value):
mavar = value
return mavar #print("variable " + name + " = " + value)
def direPrint(value):
return print(value)
def PrintWithVar(value):
return print(value)
for line in lines:
line = line.strip()
args = line.split()
if args[0] == "variable":
if len(args) == 1:
Syntaxe()
elif len(args) == 2:
Syntaxe()
else:
if args[2] == "=":
variable(args[1], args[3])
elif args[0] == "print":
if len(args) == 1:
Syntaxe()
else:
direPrint(args[1]) #print(args[1])
elif args[0] == "BeyProg":
print("Commande de Test du langage") | Program132/Python | BeyProg/main.py | main.py | py | 956 | python | en | code | 1 | github-code | 36 |
24997436439 | """empty message
Revision ID: 60ec815b1cea
Revises: e20eb33302a2
Create Date: 2023-01-28 01:32:26.058698
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '60ec815b1cea'
down_revision = 'e20eb33302a2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.alter_column('id',
existing_type=sa.INTEGER(),
server_default=None,
existing_nullable=False,
autoincrement=True)
batch_op.alter_column('created_on',
existing_type=sa.DATE(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.alter_column('created_on',
existing_type=sa.DATE(),
nullable=False)
batch_op.alter_column('id',
existing_type=sa.INTEGER(),
server_default=sa.Identity(always=True, start=1, increment=1, minvalue=1, maxvalue=2147483647, cycle=False, cache=1),
existing_nullable=False,
autoincrement=True)
# ### end Alembic commands ###
| gaelzarco/aihub | flask/migrations/versions/60ec815b1cea_.py | 60ec815b1cea_.py | py | 1,371 | python | en | code | 1 | github-code | 36 |
10837308716 | #!/bin/python3
# https://www.hackerrank.com/challenges/30-scope/problem?isFullScreen=true
class Difference:
def __init__(self, a):
self.__elements = a
def computeDifference(self):
_ = list(map(int, self.__elements))
self.maximumDifference = abs(max(_) - min(_))
# End of Difference class
_ = input()
a = [int(e) for e in input().split(' ')]
d = Difference(a)
d.computeDifference()
print(d.maximumDifference)
| sidorkinandrew/python-coding | hackerrank/30-days-of-code/day-14-scope.py | day-14-scope.py | py | 449 | python | en | code | 0 | github-code | 36 |
5862498200 | #!/usr/bin/env python3
"""
DropOut Regularization
"""
import tensorflow as tf
def dropout_create_layer(prev, n, activation, keep_prob):
"""
Creates a tensorflow layer that includes dropout regularization.
"""
dropout = tf.layers.Dropout(keep_prob)
init = tf.contrib.layers.variance_scaling_initializer(mode="FAN_AVG")
layer = tf.layers.Dense(units=n,
activation=activation,
kernel_initializer=init,
kernel_regularizer=dropout)
return layer(prev)
| ZakariaALLA/mundiapolis-ml | 0x05-regularization/6-dropout_create_layer.py | 6-dropout_create_layer.py | py | 561 | python | en | code | 0 | github-code | 36 |
70295411304 | import torch.utils.data
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.transforms as T
def load_cifar10():
train_data = datasets.CIFAR10(
root='../data',
train=True,
download=True,
transform=T.Compose([
T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
T.Resize(size=(128, 128), antialias=True)
]),
target_transform=None
)
splits = torch.utils.data.random_split(train_data, [40000, 10000])
train_data = splits[0]
val_data = splits[1]
test_data = datasets.CIFAR10(
root='../data',
train=False,
download=True,
transform=T.Compose([
T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
T.Resize(size=(128, 128), antialias=True)
]),
)
return train_data, val_data, test_data
def create_dataloaders(train_data, val_data, test_data, batch_size):
train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
return train_dataloader, val_dataloader, test_dataloader
| simogiovannini/DLA-lab1 | utils/data_loader.py | data_loader.py | py | 1,311 | python | en | code | 0 | github-code | 36 |
31641438007 | import requests
from bs4 import BeautifulSoup
import category
site_url = 'https://books.toscrape.com/index.html' # global var
def save_all_categories(site_url):
"""
Save all data of the books
of the entire category (it handles pagination)
and for each site's category
"""
global links # reutilisation apres boucle
url = site_url # preserve global var
response = requests.get(url) # 200 si ok
print(response) # afficher code retour
if response.ok: # si justement ok
soup = BeautifulSoup(response.text, features='html.parser') # format txt, delete warnings
for x in soup.find_all('ul', {'class': 'nav nav-list'}): # unique classe 'nav nav-list' mais find_all iteration
links = x.find('ul').find_all('a') # autre ul, lister tous les lien des iterations
for n in range(len(links)): # parcourir toute la liste de liens récupérés
link = links[n]['href'].replace('catalogue', 'http://books.toscrape.com/catalogue') # affecter dans variable
category.save_one_category(link) # même variable pour appeler fonction
save_all_categories(site_url)
if __name__ == "__main__":
book_data = get_book_data(EXAMPLE_URL)
save_book_csv(book_data)
save_one_category(index_url)
save_all_categories(site_url)
save_image_file(url_image) | o0nekov0o/OpenClassrooms_P2 | main.py | main.py | py | 1,351 | python | en | code | 1 | github-code | 36 |
40517176305 | import sys
# Printing with topological location
def topo_loc(line,lineno,colno,msg=False):
buff = ""
buff += "{}| {}\n".format(lineno,line)
pad = len(str(lineno))+colno+2
buff += "^\n".rjust(pad)
if msg:
buff += msg + "\n"
buff += "colno {}, lineno {}\n".format(colno,lineno)
return buff
def lexing_error(msg):
print(msg)
sys.exit("Lexing Error")
def parse_error(msg):
print(msg)
sys.exit("Parsing Error") | archanpatkar/Bhaskara | src/core/error.py | error.py | py | 460 | python | it | code | 1 | github-code | 36 |
75310096425 | import random
import re
import socket
import uuid
from scapy.all import IP, TCP, wrpcap, Raw, Ether
from sys import version_info
if version_info.major == 2:
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
else:
from http.server import BaseHTTPRequestHandler
from io import StringIO
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
DOMAIN_REGEX = re.compile(
r"^(?:https?:\/\/)?(?:[^@\n]+@)?(?:www\.)?([a-zA-Z0-9\-]+\.[a-zA-Z]{2,})(?:[\/\w\.-]*)*\/?$"
)
def get_ip():
"""get local ip"""
ssc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
ssc.connect(("10.255.255.255", 1))
local_ip = ssc.getsockname()[0]
except:
local_ip = "127.0.0.1"
finally:
ssc.close()
return local_ip
def gen_pkt(
dst,
dst_port,
random_port=43454,
http_request_text="",
http_response_text="",
src_mac="18:26:3a:30:3c:e8",
dst_mac="02:42:AC:11:00:03",
):
"""gen pcap packet"""
http_request_bytes = bytes(http_request_text)
if not http_request_bytes.endswith("\n"):
http_request_bytes = http_request_bytes + b"\n"
http_response_bytes = bytes(http_response_text)
if not http_response_bytes.endswith("\n"):
http_response_bytes = http_response_bytes + b"\n"
http_request = (Ether(src=src_mac, dst=dst_mac) / IP(dst=dst) / TCP(
dport=dst_port,
sport=random_port,
flags="A",
) / Raw(http_request_bytes))
# http_request.show()
(http_response, ) = (Ether(dst=src_mac, src=dst_mac) / IP(
src=dst,
dst=get_ip(),
) / TCP(
dport=random_port,
sport=dst_port,
seq=http_request[TCP].ack,
ack=http_request[TCP].seq + len(http_request[Raw]),
flags="PA",
) / Raw(http_response_bytes))
# http_response.show()
return http_request, http_response
def get_mac_address():
"""get interface mac address"""
mac_address = uuid.getnode()
return ":".join([
"{:02x}".format((mac_address >> ele) & 0xFF)
for ele in range(0, 8 * 6, 8)
][::-1])
def get_host_and_port(request=""):
"""get host and port"""
host = ""
port = 80
req = HTTPRequest(request)
host_str = req.headers.get("host", "")
if ":" in host_str:
tmp = host_str.replace("http://", "").replace("https://","").split(":")
if len(tmp) >= 2:
host = tmp[0]
port = int(tmp[1])
else:
host = host_str
if re.search(DOMAIN_REGEX, host):
host = get_ip()
return host, port
def gen_all_packet(multi_http_packet):
"""gen all http text to http packet"""
result = []
for req, resp in multi_http_packet:
host, port = get_host_and_port(req)
http_pkt = gen_pkt(
host,
port,
random.randint(23456, 65500),
req,
resp,
src_mac=get_mac_address(),
)
result.append(http_pkt)
return result
def test_from_files():
"""test http request and response from file"""
with open("http-req.txt", "r") as f:
http_request_text = f.read()
# Read the HTTP response from a file
with open("http-resp.txt", "r") as f:
http_response_text = f.read()
pkts = gen_all_packet([(http_request_text, http_response_text)])
# Write the request and response packets to a PCAP file
wrpcap(
"http.pcap",
pkts,
)
| b40yd/security | http_text_to_pcap.py | http_text_to_pcap.py | py | 3,848 | python | en | code | 96 | github-code | 36 |
27053053809 | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
while head:
current = head
head = head.next
current.next = prev
prev = current
return prev
# def reverseList(self, head: ListNode) -> ListNode:
# self._reverseList(head, None)
# def _reverseList(self, node, prev):
# if not node:
# return prev
# next_ = node.next
# node.next = prev
# self._reverseList(next_, node)
if __name__ == "__main__":
nums = [1, 2, 3, 4, 5]
head = ListNode(nums[0])
current = head
for x in nums[1:]:
current.next = ListNode(x)
current = current.next
nums.reverse()
expected = ListNode(nums[0])
current = expected
for x in nums[1:]:
current.next = ListNode(x)
current = current.next
actual = Solution().reverseList(head)
while actual or expected:
print("a:", actual.val, "e:", expected.val)
actual = actual.next
expected = expected.next
| ikedaosushi/leetcode | problems/python/reverseList.py | reverseList.py | py | 1,172 | python | en | code | 1 | github-code | 36 |
5184467463 | def reverseStringIterative(wod):
# return word[::-1]
word = list(wod)
for i in range(len(word) // 2):
temp = word[i]
word[i] = word[len(word) - 1 - i]
word[len(word) - 1 - i] = temp
return "".join(word)
def reverseRecursive(strr):
size = len(strr)
if size == 0:
return
last_char = strr[size - 1]
print(last_char, end="")
reverseRecursive(strr[0 : size - 1])
reverseRecursive("young")
| fortyung/Data-Structures-Algorithms | Algorithms/Recursion/reverse.py | reverse.py | py | 461 | python | en | code | 0 | github-code | 36 |
12567632552 | import torch
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from torch import nn, optim
from ae import AE
import visdom
from vae import VAE
from torchvision import models
def main():
vis = visdom.Visdom()
tf = transforms.Compose([
transforms.ToTensor(), # 转换成tensor 格式,并且压缩到[0 ,1]
])
# 构建训练集 测试集
mnist_train = datasets.MNIST('mnist', train=True, transform=tf, download=True)
mnist_train = DataLoader(mnist_train, batch_size=32, shuffle=True)
mnist_test = datasets.MNIST('mnist', train=False, transform=tf, download=True)
mnist_test = DataLoader(mnist_test, batch_size=32, shuffle=True)
x, _ = next(iter(mnist_train))
print("x:", x.shape)
# ************************** train **************************
device = torch.device('cuda:0')
model_vae = VAE().to(device)
criteon = nn.MSELoss().to(device)
optimizer = optim.Adam(model_vae.parameters(), lr=1e-3)
print(model_vae)
loss = None
kld=None
for epoch in range(20 ):
for idx, (x, _) in enumerate(mnist_train):
x = x.to(device)
x_hat,kld = model_vae(x)
# print(x.shape , x_hat.shape)
loss = criteon(x, x_hat)+1.0*kld
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch {} loss: {} include kld loss: {}'.format(epoch, loss.item(),kld.item()))
if epoch % 1 == 0: # 每1次epoch做一次可视化
vis.line([loss.item()], [epoch], win='train loss', update='append', opts=dict(
title='train loss', xlabel='epoch', ylabel='loss'
))
#******************** test ************************
x,_=next(iter(mnist_test))
x=x.to(device)
with torch.no_grad():
x_hat,_ = model_vae(x) # x : [32,1,28,28] 32 张图片
vis.images(x,nrow=8,win='x source',opts=dict(
title = 'x source'
))
vis.images(x_hat,win='x hat',nrow=8,opts=dict(title = 'x hat'))
if __name__ == '__main__':
main()
| wy171205/DeepLearning | AutoEncoder/main.py | main.py | py | 2,114 | python | en | code | 0 | github-code | 36 |
71899611623 | # 2018.10.10 Parse the 9G file to get all of the Movie ID
Text = b'product/productId'
movieID = set()
with open('movies.txt', 'rb') as file:
for line in file:
if Text == line[0:17]:
movieID.add(line[19:29])
with open('movieID.txt', 'w') as f:
for movie in movieID:
f.write(str(movie, encoding='utf-8') + '\n') | haoranpb/DWAssignments | Assignment1/file_parser.py | file_parser.py | py | 349 | python | en | code | 4 | github-code | 36 |
40264765469 | flowers = input()
count = int(input())
budget = int(input())
price = 0
if flowers == "Roses":
price = 5
if count > 80:
price = price * 0.9
elif flowers == "Dahlias":
price = 3.8
if count > 90:
price = price * 0.85
elif flowers == "Tulips":
price = 2.8
if count > 80:
price = price * 0.85
elif flowers == "Narcissus":
price = 3
if count < 120:
price = price * 1.15
elif flowers == "Gladiolus":
price = 2.5
if count < 80:
price = price * 1.2
total = price * count
rest = abs(budget - total)
if budget >= total:
print(f"Hey, you have a great garden with {count} {flowers} and {rest:.2f} leva left.")
else:
print(f"Not enough money, you need {rest:.2f} leva more.")
| ivoivanov0830006/1.1.Python_BASIC | 3.Nested_conditional_statements/*03.New_house.py | *03.New_house.py | py | 761 | python | en | code | 1 | github-code | 36 |
16128708255 | __goal__ = "Note for 前端组件"
"""
span中可以通过添加class展示各种图标,当需要增加文本时,务必在图标和文本之间添加一个空格
<span class="glyphicon"> Star 就像这里一样,前面需要加空格
弹窗里面可以加图标
【下拉菜单】
下拉菜单触发器和下拉菜单都包裹早dropdown里面
按钮那一块使用class="dropdown",而具体的下拉内容我们应该使用class="dropdown-menu"属性
整个模块都可以扔到<div>里面去,整个下拉就是一个div模块
菜单的内容默认是左对齐的,通过<ul class="dropdown-menu dropdown-menu-right">可以让菜单那右对齐
这里的右对齐就是整个框都到右边去了的意思,不是字体在右边
可以在这个div的首行加入标题来表达这个菜单栏的意思,首行就是在ul的意思,不是在整个div
<li class="dropdown-header">DropDown</li>
还可以添加分割线:<li role="separator" class="divider"></li>这就是一条分割线
【按钮组】
是一个div模块,这个模块为class="btn-group",当有唐初框是,必须指定container:'body'属性
还需要确保设置正确的role属性并提供一个label标签,按钮组就是role="group", 对于工具栏就是class="toolbar"
<div class="btn-group" role="group"></div>
<div class="btn-toolbar" role="toolbar"><div class="btn-group" role="group"></div></div>在里面组合进group,同样也是可以的,这样就是分开了而已,感觉没和在一起的好看
class="btn-group btn-group-lg"可以设置按钮组的大小
可以组合按钮组和下拉菜单组,就是把group里面且套一个group,然后把下拉菜单的属性扔进去
通过btn-group得到的按钮组大小是随着字体的多少变化的,想要获得一个固定的不变的样式,我们可以使用justified
<div class="btn-group btn-group-justified" role="group"><div class="btn-group" role="group"> 通过这种外面是justified,里面嵌套btn-group来实现同一按钮组的大小
所谓的分裂是按钮,就是一个普通的下拉式按钮组,在前面再加一个无关紧要的按钮咯
【输入框组】
只支持input,不支持select,也不支持textarea,使用的class="input-group"
不要将表单或column直接和输入框混合使用,而是将输入框组嵌套入表单组或者栅格相关的元素
<div class="input-group"><span class="input-group-addon">@</span><input class="form-control" type="text" placeholder="Username"></div>
可以直接通过对<div class="input-group input-group-lg">调整整体的大小而不是单独的去调整每一个
作为额外元素的按钮,即使用input-group-btn而不是input-group-addon
这种其实就是在一个div为row的模板里面,对col记性响应的设计,里面嵌套input-group
按钮的大小可以使用class="form-control"来进行控制,默认的大小就那样啊
<div class="row"><div class="col-lg-6"><div class="input-group"><input><span class="input-group-btn"><button>
我去,怎么感觉都有点大同小异了
【导航】
,如果在导航组件实现导航条功能,务必在ul的最外围的逻辑上加上role="navigation"
普通标签页,使用<ul class"nav nav-tabs"><li role="presentation"></li>
胶囊式标签,使用<ul class="nav nav-pills">,区分不同的状态,可以使用class="avtive",这个估计得使用js了
垂直式标签,使用<ul class="nav nav-pills nav-stacked">
当你需要等宽的时候,可以使用nav-justified
ul里面接li,而li里面则接a,所以当你需要使用下拉菜单的时候记得用a而不是button,type=button
【导航条】
一般导航条都使用nav标签,如果使用的div的化,我们应该使用role="navigation"
对于导航条,使用<nav class="navbar navbar-default">
里面就是一些乱七八糟的东西,感觉和以前额都不太一样了
你想要创建一个header,需要先创建一个container-fluid,在这里面进行一些配置
如<div class="container-fluid"><div class="navbar-header"><a class="navbar-brand"><img src="#"></a></div></div>这个可是一路嵌套到最里层啊
navbar-header是服务于收个标签或者图片的,这个模块在container-fluid里面
<div class="collapse navbar-collapse"> 这个模块是用来手机超链接,表单还有一些下拉菜单等属性的,是一个与navbar-header同级的意思把
这里的导航都变了<ul class="nav navbar-nav">都变成这个样子了,醉了,以前的是<div class="nav nav-pills">
表单也变了<form class="navbar-form navbar-left">,里面还是使用form-group,基本都还好
"""
| czasg/ScrapyLearning | czaSpider/dump/bootstrap_test/组件_note.py | 组件_note.py | py | 4,680 | python | zh | code | 1 | github-code | 36 |
34338203362 | from p111 import Solution
from common.tree import buildTreeFromList
test_cases = [
([3,9,20,None,None,15,7], 2),
([2,None,3,None,4,None,5,None,6], 5),
([], 0),
]
def test_minDepth():
s = Solution()
for case in test_cases:
assert s.minDepth(buildTreeFromList(case[0])) == case[1], case
| 0x0400/LeetCode | p111_test.py | p111_test.py | py | 315 | python | en | code | 0 | github-code | 36 |
21477829683 | # 사용횟수가 많으면 많을 수록 멀티탭에 그대로 둔다.
# 플로그 빼는 횟수
# 빈도수 별
import sys
n, k = map(int, sys.stdin.readline().split())
a = list(map(int, sys.stdin.readline().split()))
cnt = 0
multi = []
for i in range(k):
# 물품이 이미 꽂혀있으면 멀티탭이 비는지 확인할 필요가 없다.
# but, 멀티탭이 비는지 확인해도 물품이 이미 포함되어있는지는 확인해야하기 때문에 따로 앞으로 빼는게 유리
if a[i] in multi: continue
if len(multi) < n:
multi.append(a[i])
continue
del_list = []
has_plug = True
# 멀티탭에 있는 물품을 나머지 사용순서에 있는 리스트를 참조해 사용예정이라면
for j in range(len(multi)):
if multi[j] in a[i:]: # 이미 multi에 없기때문에 a[i]는 미포함
multi_index = a[i:].index(multi[j])
else:
multi_index = 101
has_plug = False
del_list.append(multi_index)
if not has_plug:
break
# multi에서 뽑아할 idx를 저장
plug_out = del_list.index(max(del_list))
del multi[plug_out]
multi.append(a[i])
cnt += 1
print(cnt)
# 반례 생각하기
# 2 5
# 1 (2) (3) 2 (2) 4 1 1 1
# (1) 2 (3) 2 2 (4) 1 1 1
| Minsoo-Shin/jungle | week04/1700_멀티탭스케쥴링.py | 1700_멀티탭스케쥴링.py | py | 1,337 | python | ko | code | 0 | github-code | 36 |
12171161356 | # 하키
import sys
w, h, x, y, p = map(int, sys.stdin.readline().split()) # p는 좌표 수
hockey = []
for i in range(p): # 하키 선수들의 좌표 이중List
a, b = map(int, sys.stdin.readline().split())
hockey.append([a, b])
cnt = 0
r = int(h / 2) # 원의 반지름
for i in range(p):
if x <= hockey[i][0] <= x+w and y <= hockey[i][1] <= y+h: # 좌표가 직사각형 안에 있을 때
cnt += 1
elif (hockey[i][0] - x) ** 2 + (hockey[i][1] - y - r) ** 2 <= r ** 2: # 좌표가 왼쪽 원 안에 있을 때
cnt += 1
elif (hockey[i][0] - x - w) ** 2 + (hockey[i][1] - y - r) ** 2 <= r ** 2: # 좌표가 오른쪽 원 안에 있을 때
cnt += 1
print(cnt)
| hi-rev/TIL | Baekjoon/기하1/hockey.py | hockey.py | py | 714 | python | ko | code | 0 | github-code | 36 |
4134355956 | import pygame
import sys
import time
from RobotLib.Math import *
import math
import argparse
from RobotLib.FrontEnd import *
from RobotLib.IO import *
import numpy as np
class MyFrontEnd(FrontEnd):
""" Custom sub-class of FrontEnd
You can write custom sub-routines here to respond to UI events and calculate updates
"""
#global variables because i couldn't make a class work.
global velocity
global omega
global theta
global sparkiCenter
global sonarReadingS
velocity = 0
omega = 0
theta = 0
sparkiCenter = vec(128.,128.)
sonarReadingS = vec(15.,0.)
def __init__(self,width,height,sparki):
FrontEnd.__init__(self,width,height)
self.sparki = sparki
def mouseup(self,x,y,button):
# x,y is position of mouse click
print('mouse clicked at %d, %d'%(x,y))
def keydown(self,key):
# see https://www.pygame.org/docs/ref/key.html for pygame key names, such as pygame.K_UP
global velocity
global omega
#set velocities based on pressing of keys. 90% forward and back. small angular velocity to test
if ( pygame.key.get_pressed()[pygame.K_UP] != 0 ):
print('up pressed')
velocity = 3.42
if ( pygame.key.get_pressed()[pygame.K_DOWN] != 0):
print('down pressed')
velocity = -3.42
if ( pygame.key.get_pressed()[pygame.K_LEFT] != 0):
print('left pressed')
omega += .2
if ( pygame.key.get_pressed()[pygame.K_RIGHT] != 0 ):
print('right pressed')
omega += -.2
def keyup(self,key):
# see https://www.pygame.org/docs/ref/key.html for pygame key names, such as pygame.K_UP
print('key released')
global velocity
global omega
#set velocities to 0 on release of keys
if (key == 273):
velocity = 0
if (key == 274):
velocity = 0
if (key == 275):
omega = 0
if (key == 276):
omega = 0
def draw(self,surface):
# draw robot here
#
# draw a rectangle for the robot
# draw a red line from the sonar to the object it is pinging
#
# use pygame.draw.line(surface,color,point1,point2) to draw a line
# for example, pygame.draw.line(surface,(0,0,0),(0,0),(10,10))
# draws a black line from (0,0) to (10,0)
#circumference of wheel = 15.71 cm
#4096 steps per revolution.
#1 step =.0038 cm /step
#max speed is 1000 steps per sec or 3.8 cm per sec
#90% is 900 or 3.42 cm per sec
#find all 6 points in child frame
#set transformation matrix based on center and orientation
global sparkiCenter
global sonarReadingS
#use this for sonar if it won't work
#transform matrixes
transRtoM = transform(sparkiCenter[0],sparkiCenter[1],theta)
transStoR = transform(2.5,0.,0.)
transMtoR = invert(transRtoM)
transRtoS = invert(transStoR)
#points of sparki in robot frame
frontRightR = vec(5.,-4.5)
frontLeftR = vec(5.,4.5)
backRightR = vec(-5.,-4.5)
backLeftR = vec(-5.,4.5)
centerR = vec(0.,0.)
sonarR = vec(2.5,0.)
#calculate all points of the robot and sonar using transform matrixes
centerM = mul(transRtoM,frontRightR)
frontRightM = mul(transRtoM,frontRightR)
frontLeftM = mul(transRtoM,frontLeftR)
backRightM = mul(transRtoM,backRightR)
backLeftM = mul(transRtoM,backLeftR)
sonarM = mul(transRtoM,sonarR)
sonarReadingM = mul(transRtoM,mul(transStoR,sonarReadingS))
#draw robot and sonar, red for front of robot
pygame.draw.line(surface,(255,0,0),frontRightM,frontLeftM)
pygame.draw.line(surface,(0,255,0),frontRightM,backRightM)
pygame.draw.line(surface,(0,255,0),backRightM,backLeftM)
pygame.draw.line(surface,(0,255,0),frontLeftM,backLeftM)
pygame.draw.line(surface,(255,0,0),sonarM,sonarReadingM)
def update(self,time_delta):
# this function is called approximately every 50 milliseconds
# time_delta is the time in seconds since the last update() call
#
# you can send an update command to sparki here
# use self.sparki.send_command(left_speed,left_dir,right_speed,right_dir,servo,gripper_status)
# see docs in RobotLib/IO.py for more information, #0 for forward. #0 for strop gripper_status
#
# if you send a message more than once per millisecond, the message will be dropped
# so, only call send_command() once per update()
#
# you can also calculate dead reckoning (forward kinematics) and other things like PID control here
global theta
global omega
global velocity
global sparkiCenter
global sonarReadingS
#integrating over time
theta += omega * time_delta
#calculate center given known velocity and direction
sparkiCenter[0] += velocity * math.cos(theta) * time_delta
sparkiCenter[1] += velocity * math.sin(theta) * time_delta
#specific wheel velocity
velocityRight = velocity + (omega * (8.51/2))
velocityLeft = velocity - (omega * (8.52/2))
#reverse flags and logic
rightReverse = 0
leftReverse = 0
if velocityRight < 0:
rightReverse = 1
velocityRight = abs(velocityRight)
if velocityLeft < 0:
leftReverse = 1
velocityLeft = abs(velocityLeft)
#debugging output
#print(sparkiCenter[0],sparkiCenter[1],theta,omega,velocity)
#this will show a point if there is no reading, should show a line when readings come in
#comment this out to show a static line pointing in the direction of the sonar
sonarReadingS[0] = self.sparki.dist
#tell sparki how to move
self.sparki.send_command(int(velocityRight),rightReverse,int(velocityLeft),rightReverse,0,0)
def main():
# parse arguments
parser = argparse.ArgumentParser(description='Template')
parser.add_argument('--width', type=int, default=256, help='map width')
parser.add_argument('--height', type=int, default=256, help='map height')
parser.add_argument('--port', type=str, default='', help='port for serial communication')
args = parser.parse_args()
with SparkiSerial(port=args.port) as sparki:
# make frontend
frontend = MyFrontEnd(args.width,args.height,sparki)
# run frontend
frontend.run()
if __name__ == '__main__':
main()
| jeffherbst/sparki | template.py | template.py | py | 6,840 | python | en | code | 0 | github-code | 36 |
24843377512 | ' Script to for association analysis of clusters. Created by S Brueningk 2023 '
import pandas as pd
import numpy as np
from IPython.core.display import display, Markdown
import os.path
from pathlib import Path
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import statsmodels.api as sm
import scipy.stats as st
###########################################################################################################
# Functions
def logistic_regression(X, X_additional, Y, n_proteins):
pvals = []
for i in range(n_proteins): # a protein each time
X_i = X[:, i]
X_tot = np.c_[X_additional, X_i]
model = sm.Logit(Y, X_tot).fit(disp=0)
pvals.append(model.pvalues[-1])
return pvals
def linear_regression(X, X_additional, Y, n_proteins):
pvals = []
for i in range(n_proteins): # a protein each time
X_i = X[:, i]
X_tot = np.c_[X_additional, X_i]
model = sm.OLS(Y, X_tot).fit()
pvals.append(model.pvalues[-1])
return pvals
###########################################################################################################
##########################################################################################################
# INPUTS (fixed)
useScaler = True
stratfield = 'Patient_Care' # Account for COVID19 severity
stratify_for = ['age_group', stratfield]
inputData = '6M' # Options: '6M', '1M','1Mand6M'
endpoint = 'PACS_6M_woDys' # Options: 'PACS_6M_woDys', 'PACS_12M'
usehealthy = True # Options: True = include healthy controls, False: no healthy controls
# Use reduced feature set in case of external validation
reduceFeaturestoexternal = True
external_features_keep = 'Data/external_usedClustersAndProteins.csv'
# Paths
data_file_1M = 'Data/Proteomics_Clinical_Data_220902_Acute_plus_healthy_v5.xlsx'
data_file_6M = 'Data/Proteomics_Clinical_Data_220902_6M_timepoint_v4.xlsx'
label_file = 'Data/Proteomics_Clinical_Data_220902_Labels_v2.xlsx'
output_folder = 'Association_output'
protein_clusters = 'Data/Table S2 Biological protein cluster compositions.xlsx'
do_clusterAssociation = True
do_singleProteinAssociation = False
###########################################################################################################
# Run
# Prepare output
df_threshs = pd.DataFrame(columns = ['sig thresh'])
output = os.path.join(output_folder)
Path(output).mkdir(parents=True, exist_ok=True)
# Create name
if usehealthy:
healthy = 'withHealthy'
name = endpoint+'_withHealthy'
else:
healthy = 'noHealthy'
name = endpoint+'_noHealthy'
print('Working on '+ name)
# Get label data
endpoint_label = pd.read_excel(label_file, index_col=0)
endpoint_label.set_index('SubjectID', inplace=True)
label = endpoint_label[endpoint]
# Get all features
data_1M = pd.read_excel(data_file_1M)
data_1M.set_index('SubjectID', inplace=True)
data_6M = pd.read_excel(data_file_6M)
data_6M.set_index('SubjectID', inplace=True)
data_healthy = data_1M[data_1M['COVID'] == 'Healthy']
data_1M = data_1M.drop(data_healthy.index)
# Get clinical data related to the COVID19 infection
cols_clinical6M = ['Age', 'Sex','Post_Vaccine']
cols_clinical_nonBin = ['Age', 'BMI','Acute_Nr_Symptoms']
cols_clinical = ['Age', 'Sex','Post_Vaccine','Asthma',
'Lung','Diabetes','BMI','Cerebro','Heart',
'Hypertonia','Autoimmune_diseases','Malignancy','Kidney','Fatigue',
'Oxygen','Cough','Steroids','GI_symptoms','Remdesivir','Immuno',
'ICU','Tocilizumab','Hydroxychloroquin','Dyspnoe','Allergic_disease',
'Acute_Nr_Symptoms','Immunosuppressives','ACE_inhibitor','Fever']
cols_drop_fromFeatures = ['SampleId','Sampling_month','COVID',
'Days', 'Patient_Care','COVID19_Severity',
'COVID19_Severity_Grade','Index']
cols_clinical_keep = cols_clinical
# Separate features used for association analysis
severity_1M = data_1M[ ['Patient_Care','COVID19_Severity','COVID19_Severity_Grade']]
severity_6M = data_6M[ ['Patient_Care','COVID19_Severity','COVID19_Severity_Grade']]
severity_healthy = data_healthy[ ['Patient_Care','COVID19_Severity','COVID19_Severity_Grade']]
# Clinical data (here only age and sex are used)
data_clin_pats = data_1M[cols_clinical]
data_clin_healthy = data_healthy[cols_clinical]
for c in cols_clinical:
if c in cols_clinical_nonBin:
data_clin_pats.loc[:, c] = data_1M.loc[:, c]
data_clin_healthy.loc[:, c] = data_healthy.loc[:, c]
else:
data_clin_pats.loc[:,c] = data_1M.loc[:,c].map({'YES':1,'NO':0,'male':1,'female':0})
data_clin_healthy.loc[:,c] = data_healthy.loc[:,c].map({'YES':1,'NO':0,'male':1,'female':0})
cols_not_not_in_healthy = list(set(data_clin_pats.columns)-set(data_clin_healthy.columns))
data_clin_healthy[cols_not_not_in_healthy] = 0
# Protein data
data_1M = data_1M.drop(cols_clinical+cols_drop_fromFeatures, axis=1)
data_healthy = data_healthy.drop(cols_clinical+cols_drop_fromFeatures, axis=1)
data_6M = data_6M.drop(cols_clinical6M + cols_drop_fromFeatures, axis=1)
# Get ratios of some proteins - note: Data are already log10 transformed!!!
ratio1 = ['seq.2602.2','seq.3050.7','seq.2381.52','seq.4482.66']
ratio2 = ['seq.2811.27','seq.3175.51','seq.2888.49','seq.2888.49']
ratio_name = []
for i in range(0,len(ratio1)):
ratio_name = 'ratio_'+ratio1[i]+ '_'+ratio2[i]
data_1M[ratio_name] = np.log10(10**(data_1M[ratio1[i]])/10**(data_1M[ratio2[i]]))
data_6M[ratio_name] = np.log10(10**(data_6M[ratio1[i]]) / 10**(data_6M[ratio2[i]]))
data_healthy[ratio_name] = np.log10(10**(data_healthy[ratio1[i]]) / 10**(data_healthy[ratio2[i]]))
# Now get the input data used in this run
severity = severity_1M
if inputData == '1M':
data = data_1M
elif inputData == '6M':
data = data_6M
elif inputData == '1Mand6M':
cols_6M = [c+'_6M' for c in data_6M.columns]
data_6M_app = data_6M.copy()
data_6M_app.columns = cols_6M
data_delta1M6M = data_1M-data_6M
cols_1M6M= [c+'_1M-6M' for c in data_6M.columns]
data_delta1M6M.columns = cols_1M6M
# Concatenation of 1M and 6M data
data = pd.concat([data_1M,data_6M_app,data_delta1M6M], axis=1)
else:
raise('Invalid choice of model inputData!')
# Include healthy controls if wanted
if usehealthy:
if inputData == '1Mand6M':
data_healthy_app = data_healthy.copy()
data_healthy_app.columns = cols_6M
data_healthy_delta1M6M = data_healthy-data_healthy
data_healthy_delta1M6M.columns = cols_1M6M
data_healthy = pd.concat([data_healthy, data_healthy_app, data_healthy_delta1M6M], axis=1)
data = data.append(data_healthy)
data_clin = data_clin_pats.append(data_clin_healthy)
severity = severity.append(severity_healthy)
# Check data and exclude patients with missing proteomics
data = data.dropna() # Should not make any differene for our data
data_clin = data_clin.loc[data.index,cols_clinical_keep]
label = label.loc[data.index]
label = label.dropna()
data = data.loc[label.index]
# Scale each protein (as used later)
sc = StandardScaler()
npx_reform_train = pd.DataFrame(sc.fit_transform(
data.loc[:, :].values),
index=data.loc[:, :].index,
columns=data.columns)
# scale age
COVs = data_clin[['Age', 'Sex']]
COVs[stratfield] = severity.loc[data_clin.index, stratfield].map(dict(Outpatient=0,
Hospitalized=1,
Healthy=0))
scaler = StandardScaler()
COVs_sc = COVs.copy()
COVs_sc['Age'] = scaler.fit_transform(COVs_sc['Age'].values.reshape(-1, 1))
# Prepare
n, n_proteins = npx_reform_train.shape
X_additional = np.c_[np.ones(n), COVs_sc.values]
X = npx_reform_train.values
phenotype = label.values.astype(np.float64)
if do_singleProteinAssociation:
# Single protein association
if len(np.unique(phenotype)) == 2:
pvals = logistic_regression(X, X_additional, phenotype, n_proteins)
else:
pvals = linear_regression(X, X_additional, phenotype, n_proteins)
pvals = pd.Series(pvals, index=npx_reform_train.columns)
else:
pvals = pd.Series()
# Clusters
if do_clusterAssociation:
df_prots = pd.read_excel(protein_clusters)
if reduceFeaturestoexternal:
df_prots_external = pd.read_csv(external_features_keep, index_col=0)
missing = []
features_keep = []
keep_index = []
for p in df_prots_external.index:
if p not in df_prots['AptamerName'].values:
missing.append(p)
else:
features_keep.append(p)
keep_index += list(df_prots[df_prots['AptamerName'] == p].index)
df_prots = df_prots.loc[np.unique(keep_index)]
# Association covariates only
model_Cov = sm.Logit(phenotype, X_additional).fit(disp=0, method='bfgs')
pvals_Cov = model_Cov.llr_pvalue
pvals.loc['COVs'] = pvals_Cov
# Association cluster with COVs
clusters = df_prots['Group'].unique()
for this_cl in clusters:
# get protein group
ids_use = list(df_prots[df_prots['Group']==this_cl]['AptamerName'].values)
prots_use = [p for p in ids_use]
# get the relevant data
X_i = npx_reform_train.loc[:, prots_use].values
X_tot = np.c_[X_additional, X_i]
# Association
try:
model = sm.Logit(phenotype, X_tot).fit(disp=0, method='bfgs')
# Use p-value for whole model here
pvals.loc[this_cl] = model.llr_pvalue
print(this_cl + ': ' + str(model.llr_pvalue))
except:
print('Failed Single Group ' + this_cl)
pvals.loc[this_cl] = np.nan
# Organize and save results
pvals = pvals.sort_values()
pvals.sort_values().to_csv(os.path.join(output_folder, name + '_singleSomamer_pvals.csv'))
| BorgwardtLab/LongCOVID | associationClusters.py | associationClusters.py | py | 9,990 | python | en | code | 0 | github-code | 36 |
37974620586 | from vpython import *
import numpy as np
def check_wall_collision(ball):
if ball.pos.x > side - thick/2 - ball.radius:
ball.vel.x *= -1
ball.color = color.green
elif ball.pos.x < -side + thick/2 + ball.radius:
ball.vel.x *= -1
ball.color = color.red
elif ball.pos.y > side - thick/2 - ball.radius:
ball.vel.y *= -1
ball.color = color.orange
elif ball.pos.y < -side + thick/2 + ball.radius:
ball.vel.y *= -1
ball.color = color.blue
elif ball.pos.z > side -thick/2 -ball.radius:
ball.vel.z *= -1
ball.color = color.white
elif ball.pos.z < -side +thick/2 + ball.radius:
ball.vel.z *= -1
ball.color = color.cyan
radius = 0.5
side = 8.0
thick = 0.4
side_v1 = 2*side - thick
side_v2 = 2*side + thick
N = 50
balls = []
scene = canvas(width=600, height=600)
x = - side + thick/2 + radius
y = - side + thick/2 + radius
z = - side + thick/2 + radius
countx = 0
county = 0
for i in range(N):
balls.append(sphere(pos = vec(x, y, z), radius = 0.5, color=color.white))
balls[i].vel = vec(np.random.rand(), np.random.rand(), np.random.rand())
balls[i].out = False
if 0 <= countx < 6:
x += 3 * radius
else:
x = - side + thick/2 + radius
y += 3 *radius
countx = 0
countx +=1
wallR = box (pos=vector( side, 0, 0), size=vector(thick, side_v1, side_v2), color = color.green)
wallL = box (pos=vector(-side, 0, 0), size=vector(thick, side_v1, side_v2), color = color.red)
wallB = box (pos=vector(0, -side, 0), size=vector(side_v2, thick, side_v2), color = color.blue)
wallT = box (pos=vector(0, side, 0), size=vector(side_v2, thick, side_v2), color = color.orange)
wallBK = box(pos=vector(0, 0, -side), size=vector(side_v1, side_v1, thick), color = color.cyan)
t = 0
dt = 0.005
flag = False
sleep(1)
while t < 10000:
for i in range(N):
if balls[i].out:
continue
balls[i].pos += balls[i].vel*dt
check_wall_collision(balls[i])
for j in range(N):
if i == j:
continue
v = balls[j].pos - balls[i].pos
if v.mag < 2*radius:
balls[j].pos = vec(10, j - 10, i - 10)
balls[j].vel = vec(0, 0, 0)
balls[j].out = True
balls[i].pos = vec(10, i - 9, j - 9)
balls[i].vel = vec(0, 0, 0)
balls[i].out = True
t += dt
| Fadikk367/WFiIS-VPython | LAB08/ZAD3.py | ZAD3.py | py | 2,473 | python | en | code | 0 | github-code | 36 |
30981112855 | #!/usr/bin/python
# <bitbar.title>Stock Ticker</bitbar.title>
# <bitbar.version>1.0</bitbar.version>
# <bitbar.author>Robert Kanter</bitbar.author>
# <bitbar.author.github>rkanter</bitbar.author.github>
# <bitbar.desc>Provides a rotating stock ticker in your menu bar, with color and percentage changes</bitbar.desc>
# <bitbar.dependencies>python</bitbar.dependencies>
# <bitbar.image>https://i.imgur.com/Nf4jiRd.png</bitbar.image>
# <bitbar.abouturl>https://github.com/rkanter</bitbar.abouturl>
import urllib2
import json
#-----------------------------------------------------------------------------
# IMPORTANT: You will need an API Token. Follow these steps
# 1. Create a free account at https://iexcloud.io/cloud-login#/register/
# 2. Select the free "START" tier
# 3. Verify your email address
# 4. Click "API Tokens" in the left menu
# 5. Enter the "Publishable" Token in the quotes below (it should start with "pk_")
api_token = ""
# Enter your stock symbols here in the format: ["symbol1", "symbol2", ...]
stock_symbols = ["MSFT", "AAPL", "AMZN"]
#-----------------------------------------------------------------------------
response = urllib2.urlopen("https://cloud.iexapis.com/stable/stock/market/batch?symbols=" + ','.join(stock_symbols) + "&types=quote&filter=symbol,latestPrice,change,changePercent&displayPercent=true&token=" + api_token)
json_data = json.loads(response.read())
for stock_symbol in stock_symbols:
stock_quote = json_data[stock_symbol]["quote"]
price_current = stock_quote["latestPrice"]
price_changed = stock_quote["change"]
price_percent_changed = stock_quote["changePercent"]
if price_changed is not None:
color = "red" if float(price_changed) < 0 else "green"
print("{} {:.2f} {:.2f} ({:.2f}%) | color={}".format(stock_symbol, price_current, price_changed, price_percent_changed, color))
else:
color = "black"
print("{} {:.2f} | color={}".format(stock_symbol, price_current, color))
| damncabbage/dotfiles | macOS/BitBar/Plugins/Finance/stock-ticker.30s.py | stock-ticker.30s.py | py | 1,981 | python | en | code | 3 | github-code | 36 |
17849581957 | from .config import np, Vector, Keyword, ParameterName, ZOrderConfig, TextConfig, HorizontalAlignment, \
VerticalAlignment, ColorConfig
from .config import BentChevronArrow, ChevronArrow, CompositeFigure, Rectangle, TextBox, RoundRectangle
from .config import MIDDiagram, NetworkDiagram, CulturedCell, Mice, Human, CarbonBackbone, CommonElementConfig
class NoisyDataDiagramConfig(object):
normal_document_size = CommonElementConfig.normal_document_size
smaller_document_size = normal_document_size - 1
smallest_document_size = normal_document_size - 4
text_z_order = CommonElementConfig.text_z_order
background_z_order = ZOrderConfig.default_patch_z_order
child_diagram_base_z_order = CommonElementConfig.child_diagram_base_z_order
child_diagram_z_order_increment = CommonElementConfig.child_diagram_z_order_increment
document_text_width = 0.18
document_text_width2 = 0.12
document_text_height = 0.06
document_text_height2 = 0.04
smaller_document_text_height = 0.04
document_text_config = {
ParameterName.font: TextConfig.main_text_font,
ParameterName.font_size: normal_document_size,
ParameterName.width: document_text_width,
ParameterName.height: document_text_height,
ParameterName.horizontal_alignment: HorizontalAlignment.center,
ParameterName.vertical_alignment: VerticalAlignment.center_baseline,
ParameterName.z_order: text_z_order,
# ParameterName.text_box: True,
}
mid_title_text_common_config_dict = {
**document_text_config,
ParameterName.font_size: 10,
}
normal_chevron_width = CommonElementConfig.normal_chevron_width
chevron_config = {
**CommonElementConfig.chevron_config,
ParameterName.width: normal_chevron_width - 0.015
}
bend_chevron_to_main_distance = normal_chevron_width / 2 + 0.015
bend_chevron_config = {
**chevron_config,
ParameterName.radius: 0.03,
ParameterName.width: normal_chevron_width - 0.02
}
predicted_mid_text_config_dict = {
**document_text_config,
ParameterName.font_size: smallest_document_size,
ParameterName.width: document_text_width2,
ParameterName.height: smaller_document_text_height,
}
final_experimental_mid_text_config = {
**document_text_config,
ParameterName.font_size: smaller_document_size,
ParameterName.width: document_text_width,
ParameterName.height: document_text_height2,
ParameterName.vertical_alignment: VerticalAlignment.top,
}
final_experimental_mid_background_config = {
ParameterName.radius: 0.05,
ParameterName.width: document_text_width,
ParameterName.edge_width: None,
ParameterName.face_color: ColorConfig.super_light_blue,
ParameterName.z_order: background_z_order
}
background_rectangle_config_dict = {
ParameterName.face_color: ColorConfig.light_gray,
ParameterName.edge_width: None,
ParameterName.z_order: 0
}
class NoisyDataDiagram(CompositeFigure):
total_width = 1.2
total_height = 0.5
height_to_width_ratio = total_height / total_width
def __init__(self, **kwargs):
text_obj_list, chevron_arrow_obj_list, constructed_obj_list = noisy_data_diagram_generator()
size = Vector(self.total_width, self.total_height)
optimization_diagram_dict = {
ParameterName.text: {text_obj.name: text_obj for text_obj in text_obj_list},
ParameterName.chevron_arrow: {
chevron_arrow_obj.name: chevron_arrow_obj for chevron_arrow_obj in chevron_arrow_obj_list},
ParameterName.constructed_obj: {
constructed_obj.name: constructed_obj for constructed_obj in constructed_obj_list},
}
super().__init__(
optimization_diagram_dict, Vector(0, 0), size, **kwargs)
def generate_evenly_distributed_mid_diagram(
element_config_list, text_config_list, x_mid_location, data_y_vector, primary_data_array, noise_data_array,
color_name, mid_diagram_scale, title_text_common_config_dict):
mid_carbon_num = len(primary_data_array[0])
primary_data_previous_center_loc = MIDDiagram.calculate_center(
MIDDiagram, mid_diagram_scale, mid_carbon_num)
for mid_data_index, primary_mid_data_vector in enumerate(primary_data_array):
if noise_data_array is not None:
noise_data_vector = noise_data_array[mid_data_index]
mid_data_vector = np.array([primary_mid_data_vector, noise_data_vector])
else:
mid_data_vector = primary_mid_data_vector
target_center_vector = Vector(x_mid_location, data_y_vector[mid_data_index])
predicted_mid_diagram_bottom_left_offset = target_center_vector - primary_data_previous_center_loc
final_experimental_mid_diagram_dict = {
ParameterName.data_vector: mid_data_vector,
ParameterName.scale: mid_diagram_scale,
ParameterName.color_name: color_name,
ParameterName.bottom_left_offset: predicted_mid_diagram_bottom_left_offset,
ParameterName.base_z_order: NoisyDataDiagramConfig.child_diagram_base_z_order,
ParameterName.z_order_increment: NoisyDataDiagramConfig.child_diagram_z_order_increment
}
element_config_list.append((MIDDiagram, final_experimental_mid_diagram_dict))
text_config_list.append({
**title_text_common_config_dict,
ParameterName.string: f'Metabolite {mid_data_index + 1}',
ParameterName.center: target_center_vector + Vector(0, 0.056),
})
def noisy_data_diagram_generator():
main_horiz_axis = 0.22
# width = 1, height = height_to_width_ratio, all absolute number are relative to width
upper_horiz_axis = main_horiz_axis + 0.12
bottom_horiz_axis = main_horiz_axis - 0.12
text_horiz_axis = main_horiz_axis + 0.23
vert_axis_list = [0.11, 0.43, 0.75, 1.08]
chevron_start_end_x_value_list = [
Vector(0.22, 0.32),
Vector(0.54, 0.64),
Vector(0.86, 0.96),
]
primary_data_array = np.array([
[0.60, 0.049, 0.051, 0.3],
[0.37, 0.052, 0.048, 0.53],
[0.22, 0.043, 0.057, 0.68],
])
noise_data_array = np.array([
[-0.12, -0.009, 0.006, 0.1],
[-0.08, 0.005, -0.003, 0.09],
[0.11, -0.002, -0.008, -0.1]
])
absolute_noise_array = np.abs(noise_data_array)
primary_data_exclude_noise_array = primary_data_array + np.clip(noise_data_array, None, 0)
data_include_noise_array = primary_data_array + noise_data_array
mid_diagram_scale = 0.08
top_text_distance = 0.015
mid_data_y_vector = [upper_horiz_axis, main_horiz_axis, bottom_horiz_axis]
mid_data_height = mid_diagram_scale * MIDDiagram.total_height
top_text_y_value = upper_horiz_axis + mid_data_height / 2 + top_text_distance
other_element_config_list = []
text_config_list = [
{
ParameterName.string: 'Precise simulated data',
ParameterName.center: Vector(vert_axis_list[0], text_horiz_axis),
**NoisyDataDiagramConfig.document_text_config,
},
{
ParameterName.string: 'Introducing random noise\nand normalization',
ParameterName.center: Vector(vert_axis_list[1], text_horiz_axis),
**NoisyDataDiagramConfig.document_text_config,
},
{
ParameterName.string: 'Noisy simulated data',
ParameterName.center: Vector(vert_axis_list[2], text_horiz_axis),
**NoisyDataDiagramConfig.document_text_config,
},
{
ParameterName.string: 'MFA and\nfollowing analysis',
ParameterName.center: Vector(vert_axis_list[3], main_horiz_axis),
**NoisyDataDiagramConfig.document_text_config,
},
]
generate_evenly_distributed_mid_diagram(
other_element_config_list, text_config_list, vert_axis_list[0], mid_data_y_vector, primary_data_array, None,
Keyword.blue, mid_diagram_scale, NoisyDataDiagramConfig.mid_title_text_common_config_dict)
chevron_1_config = {
ParameterName.tail_end_center: Vector(chevron_start_end_x_value_list[0][0], main_horiz_axis),
ParameterName.head: Vector(chevron_start_end_x_value_list[0][1], main_horiz_axis),
**NoisyDataDiagramConfig.chevron_config,
}
generate_evenly_distributed_mid_diagram(
other_element_config_list, text_config_list, vert_axis_list[1], mid_data_y_vector,
primary_data_exclude_noise_array, absolute_noise_array, [Keyword.blue, Keyword.gray],
mid_diagram_scale, NoisyDataDiagramConfig.mid_title_text_common_config_dict)
chevron_2_config = {
ParameterName.tail_end_center: Vector(chevron_start_end_x_value_list[1][0], main_horiz_axis),
ParameterName.head: Vector(chevron_start_end_x_value_list[1][1], main_horiz_axis),
**NoisyDataDiagramConfig.chevron_config,
}
generate_evenly_distributed_mid_diagram(
other_element_config_list, text_config_list, vert_axis_list[2], mid_data_y_vector, data_include_noise_array,
None, Keyword.orange, mid_diagram_scale, NoisyDataDiagramConfig.mid_title_text_common_config_dict)
chevron_3_config = {
ParameterName.tail_end_center: Vector(chevron_start_end_x_value_list[2][0], main_horiz_axis),
ParameterName.head: Vector(chevron_start_end_x_value_list[2][1], main_horiz_axis),
**NoisyDataDiagramConfig.chevron_config,
}
chevron_arrow_config_list = [chevron_1_config, chevron_2_config, chevron_3_config]
text_obj_list = []
for text_config_dict in text_config_list:
text_obj = TextBox(**text_config_dict)
text_obj_list.append(text_obj)
chevron_obj_list = []
for chevron_arrow_config_dict in chevron_arrow_config_list:
if ParameterName.radius in chevron_arrow_config_dict:
chevron_class = BentChevronArrow
else:
chevron_class = ChevronArrow
chevron_arrow_obj = chevron_class(**chevron_arrow_config_dict)
chevron_obj_list.append(chevron_arrow_obj)
other_element_obj_list = []
for other_element_class, other_element_config in other_element_config_list:
other_element_obj = other_element_class(**other_element_config)
other_element_obj_list.append(other_element_obj)
return text_obj_list, chevron_obj_list, other_element_obj_list
| LocasaleLab/Automated-MFA-2023 | figures/figure_plotting/figure_elements/diagrams/diagrams/noisy_data_diagram.py | noisy_data_diagram.py | py | 10,532 | python | en | code | 0 | github-code | 36 |
33718174339 | # 기다리는 시간을 줄이려면, 작업 시간이 짧은 작업부터 하는 것이 이득
# 그렇지만, 먼저 들어온 것을 빠르게 처리하는 것도 중요
# 하드디스크가 작업을 수행하고 있지 않을 때에는 먼저 요청이 들어온 작업부터 처리! -> 처음에 들어온 작업은 무조건 시작된다는 뜻!
# 힙에 값을 넣어주면, 자동으로 값이 작은 순서대로 정렬됨
# 테케 19번 안되는 문제 해결!
from heapq import heappush, heappop
def solution(jobs):
times = 0 # 각 작업들의 요청~종료 시간의 합
now = 0 # 현재시간
tasks = [] # 힙 생성
qjobs = sorted(jobs, reverse=True) # 요청 시간 내림차순 정렬 (뒤에서부터 작은 것 제거할 예정 (스택))
while qjobs or tasks: # 작업이 모두 수행될 때까지 반복
# 이전 작업이 끝나는 시간 기준, 요청된 작업들 불러오기
while qjobs:
if qjobs[-1][0] > now:
break
srt, l = qjobs.pop() # jobs에서 제거
heappush(tasks, (l, srt)) # (작업시간, 요청시간) 순서로 heap에 넣어주기
# 작업시간이 가장 짧은 작업 수행
if tasks: # 기다리고 있는 작업이 있다면,
l, srt = heappop(tasks) # heap에서 첫번째 값 제거
now += l # 작업이 끝나는 시간으로 현재 시간 변경
times += (now - srt) # times에 시간 추가
else: # 기다리고 있는 작업이 없다면, 다음으로 제일 먼저 요청되는 작업 시작
srt, l = qjobs.pop() # jobs에서 첫번째 값 제거
now = srt + l # 새로운 작업이 끝나는 시간으로 현재 시간 변경
times += l # times에 시간 추가
return times // len(jobs) # 평균 시간 출력 | ayocado/algorithm-study | bokkuembab/heap/PGS level3 42627 디스크 컨트롤러.py | PGS level3 42627 디스크 컨트롤러.py | py | 1,938 | python | ko | code | 0 | github-code | 36 |
1938568135 | import os
import glob
import pathlib
from io import StringIO
from typing import List, Dict, Optional
from dataclasses import dataclass
import kclvm.compiler.parser.parser as parser
import kclvm.compiler.vfs as vfs
import kclvm.compiler.extension.plugin.plugin_model as plugin
import kclvm.compiler.extension.builtin.builtin as builtin
import kclvm.kcl.ast.ast as ast
import kclvm.kcl.info as kcl_info
import kclvm.tools.printer.printer as printer
@dataclass
class Config:
name_len: int = 30
type_len: int = 30
default_len: int = 30
final_len: int = 10
optional_len: int = 10
def get_import_module(
module: ast.Module, result: Dict[str, ast.Module] = None
) -> Optional[Dict[str, ast.Module]]:
"""Get all import module in a module."""
if not module:
return None
assert isinstance(module, ast.Module)
if not result:
result = {}
import_file_list = []
import_stmt_list = module.GetImportList()
work_dir = os.path.dirname(module.filename)
root: str = vfs.GetPkgRoot(work_dir)
if not root:
root = work_dir
for stmt in import_stmt_list or []:
if (
stmt.path.startswith(plugin.PLUGIN_MODULE_NAME)
or stmt.name in builtin.STANDARD_SYSTEM_MODULES
):
continue
# import_path to abs_path
fix_path = vfs.FixImportPath(root, module.filename, stmt.path).replace(
".", os.sep
)
abs_path = os.path.join(root, fix_path)
# Get all .k file if path is a folder
if os.path.isdir(abs_path):
file_glob = os.path.join(abs_path, "**", kcl_info.KCL_FILE_PATTERN)
import_file_list += glob.glob(file_glob, recursive=True)
else:
abs_path += kcl_info.KCL_FILE_SUFFIX
import_file_list.append(abs_path)
for file in import_file_list:
# Skip `_*.k` and `*_test.k` kcl files
if os.path.basename(file).startswith("_"):
continue
if file.endswith("_test.k"):
continue
if file not in result:
import_module = parser.ParseFile(file)
result[file] = import_module
if import_module.GetImportList():
get_import_module(import_module, result)
return result
def get_import_schema(
module: ast.Module,
) -> Optional[Dict[ast.Module, List[ast.SchemaStmt]]]:
"""Get all import schema in a module."""
if not module:
return None
assert isinstance(module, ast.Module)
import_module_list = get_import_module(module).values()
import_schema_map = {m: m.GetSchemaList() for m in import_module_list}
return import_schema_map
class FullSchema(ast.SchemaStmt):
"""
Schema with base schema's attr.
todo: mixin attr
"""
def __init__(self, schema: ast.SchemaStmt, module: ast.Module) -> None:
super().__init__(schema.line, schema.column)
self.self_schema = schema
self.parent_attr: Dict[str, List[ast.SchemaAttr]] = get_parent_attr_map(
schema, module, {}
)
def __str__(self):
s = self.self_schema.name + ", attr:["
for name in self.self_schema.GetAttrNameList():
s += f"{name}, "
s = s[:-2] + "],"
for p in self.parent_attr:
s += f" parent:{p}, attr:["
for attr in self.parent_attr[p]:
s += f"{attr.name}, "
s = s[:-2] + "],"
return s
def get_parent_attr_map(
ori_schema: ast.SchemaStmt,
module: ast.Module,
result: Dict[str, List[ast.SchemaAttr]] = None,
) -> Optional[Dict[str, List[ast.SchemaAttr]]]:
if not ori_schema or not module:
return None
assert isinstance(ori_schema, ast.SchemaStmt)
assert isinstance(module, ast.Module)
if not result:
result = {}
if not ori_schema.parent_name:
return result
else:
# Current module and schema.
full_schema_map: Dict[ast.Module, List[ast.SchemaStmt]] = {
module: module.GetSchemaList()
}
# Import module and schemas.
full_schema_map.update(get_import_schema(module))
# key : module , value: List[ast.SchemaStmt]
for key, value in full_schema_map.items():
for schema in value:
if schema.name == ori_schema.parent_name.get_name():
result[schema.name] = schema.GetAttrList()
if schema.parent_name:
get_parent_attr_map(schema, key, result)
break
else:
continue
break
return result
def get_full_schema_list(module: ast.Module) -> List[FullSchema]:
"""Get all FullSchema in a module"""
schema_list = module.GetSchemaList()
full_schema_list = [FullSchema(schema, module) for schema in schema_list]
return full_schema_list
class ListAttributePrinter:
def __init__(self, file: str = None, config: Config = Config()) -> None:
self.file = file
self.name_len = config.name_len
self.type_len = config.type_len
self.default_len = config.default_len
self.final_len = config.final_len
self.optional_len = config.optional_len
self.module = None
self.schema_list = None
self.import_schema_list = None
self.full_schema_list = None
def build_full_schema_list(self):
self.module = parser.ParseFile(self.file)
self.schema_list = self.module.GetSchemaList()
self.import_schema_list = get_import_schema(self.module)
self.full_schema_list = get_full_schema_list(self.module)
def print(self):
self.build_full_schema_list()
if self.module:
self.print_schema_list()
self.print_schema_structures()
def print_schema_list(self):
print("------------ schema list ------------")
file_path = self.module.filename
file_name = pathlib.Path(file_path).name
print("Here are schemas defined in {}:".format(file_name))
for schema in self.schema_list:
print("- " + schema.name)
print("Here are schemas imported to {}:".format(file_name))
for key, value in self.import_schema_list.items():
import_file_path = key.filename
import_file_name = pathlib.Path(import_file_path).name
if len(value) > 0:
print("imported from {}".format(import_file_name))
for schema in value:
print("- " + schema.name)
def print_schema_structures(self):
print("------------ schema structures ------------")
for full_schema in self.full_schema_list:
print("schema {}:".format(full_schema.self_schema.name))
self.print_header()
for attr in full_schema.self_schema.GetAttrList():
self.print_schema_attr(attr)
for key, value in full_schema.parent_attr.items():
print("attrs inherited from {}".format(key))
for attr in value:
self.print_schema_attr(attr)
print()
def _print_schema_attr(self, attr: ast.SchemaAttr, default: str):
print(
"{:<{}}{:<{}}{:<{}}{:<{}}{:<{}}".format(
# name
attr.name
if len(attr.name) <= self.name_len
else attr.name[: self.name_len - 3] + "...",
self.name_len,
# type
attr.type_str
if len(attr.type_str) <= self.type_len
else attr.type_str[: self.type_len - 3] + "...",
self.type_len,
# default
default,
self.default_len,
"",
self.final_len,
# is_optional
"" if attr.is_optional else "Required",
self.optional_len,
)
)
def print_schema_attr(self, attr: ast.SchemaAttr):
if not attr:
return
assert isinstance(attr, ast.SchemaAttr)
if attr.value and isinstance(attr.value, ast.SchemaExpr):
"""
Because ast node SchemaExpr is too long to print,
when the default value of attr.value is a SchemaExpr,just print schema name,e.g.:
schema Name:
firstName : str
lastName : str
schema Person:
name: Name = Name {
firstName = "hello"
lastName = "world"
}
-------------------------------------
schema Person:
name type default ...
name Name -> Name{...}
"""
default = (
attr.type_str
if len(attr.type_str) <= (self.default_len - 5)
else attr.type_str[: self.default_len - 5]
) + "{...}"
self._print_schema_attr(attr, default)
return
with StringIO() as expr:
printer.PrintAST(attr.value, expr)
default_str = expr.getvalue()
if len(default_str) > self.default_len or ("\n" in default_str):
default_str = "..."
self._print_schema_attr(attr, default_str)
def print_header(self):
print(
"{:<{}}{:<{}}{:<{}}{:<{}}{:<{}}".format(
# name
"name",
self.name_len,
# type
"type",
self.type_len,
# default
"default",
self.default_len,
# is_final
"is_final",
self.final_len,
# is_optional
"is_optional",
self.optional_len,
)
)
| kcl-lang/kcl-py | kclvm/tools/list_attribute/utils.py | utils.py | py | 9,943 | python | en | code | 8 | github-code | 36 |
14152007415 | import requests
import json
import sys
from .ui import print_success
from requests.auth import HTTPBasicAuth
def find_github_emails(organization,organization_domain,github_api,github_username,github_token):
github_emails = []
print_success("[+] Searching GitHub")
page_number = 1
while page_number < 2:
orgquery = requests.get((github_api + "/orgs/{0}/members?per_page=100&page={1}".format(
organization,page_number)), auth=HTTPBasicAuth(github_username,github_token))
results = json.loads(orgquery.text)
for result in results:
try:
username = result["login"]
userquery = requests.get((github_api + "/users/{0}".format(username)),
auth=HTTPBasicAuth(github_username,github_token))
userdata = json.loads(userquery.text)
email = userdata["email"]
if email:
check_domain = email.split("@")
if check_domain[1] == organization_domain:
github_emails.append(",,{0},".format(email))
except:
break
page_number += 1
return github_emails
| highmeh/lure | resources/github.py | github.py | py | 1,007 | python | en | code | 148 | github-code | 36 |
43297240194 | from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rtyper.tool import rffi_platform
from rpython.translator.tool.cbuild import ExternalCompilationInfo
import math
time_t = rffi_platform.getsimpletype('time_t', '#include <time.h>', rffi.SIGNED)
eci = ExternalCompilationInfo(includes=['time.h'])
time = rffi.llexternal('time', [lltype.Signed], time_t,
compilation_info=eci)
def get(space, name):
w_module = space.getbuiltinmodule('_demo')
return space.getattr(w_module, space.newtext(name))
@unwrap_spec(repetitions=int)
def measuretime(space, repetitions, w_callable):
if repetitions <= 0:
w_DemoError = get(space, 'DemoError')
raise oefmt(w_DemoError, "repetition count must be > 0")
starttime = time(0)
for i in range(repetitions):
space.call_function(w_callable)
endtime = time(0)
return space.newint(endtime - starttime)
@unwrap_spec(n=int)
def sieve(space, n):
lst = range(2, n + 1)
head = 0
while 1:
first = lst[head]
if first > math.sqrt(n) + 1:
lst_w = [space.newint(i) for i in lst]
return space.newlist(lst_w)
newlst = []
for element in lst:
if element <= first:
newlst.append(element)
elif element % first != 0:
newlst.append(element)
lst = newlst
head += 1
class W_MyType(W_Root):
def __init__(self, space, x=1):
self.space = space
self.x = x
def multiply(self, w_y):
space = self.space
y = space.int_w(w_y)
return space.newint(self.x * y)
def fget_x(self, space):
return space.newint(self.x)
def fset_x(self, space, w_value):
self.x = space.int_w(w_value)
@unwrap_spec(x=int)
def mytype_new(space, w_subtype, x):
if x == 3:
return MySubType(space, x)
return W_MyType(space, x)
getset_x = GetSetProperty(W_MyType.fget_x, W_MyType.fset_x, cls=W_MyType)
class MySubType(W_MyType):
pass
W_MyType.typedef = TypeDef('MyType',
__new__ = interp2app(mytype_new),
x = getset_x,
multiply = interp2app(W_MyType.multiply),
)
| mozillazg/pypy | pypy/module/_demo/demo.py | demo.py | py | 2,385 | python | en | code | 430 | github-code | 36 |
41180522167 | fib = [0 for x in range(111)]
fib[1] = 1
fib[2] = 2
for i in range(3, 111):
fib[i] = fib[i-1] + fib[i-2];
case = 1
while True:
try:
A = input()
B = input()
if case != 1:
print("")
case += 1
except:
break
la = len(A)
lb = len(B)
total = 0
for i in range(la):
if A[i] == '1':
total += fib[la-i]
for i in range(lb):
if B[i] == '1':
total += fib[lb-i]
if total == 0:
print('0')
else:
ans = ['0' for x in range(111)]
for i in range(110, 0, -1):
if fib[i] <= total:
total -= fib[i]
ans[i] = '1'
k = 110
while ans[k] == '0': k -= 1
while k > 0:
print(ans[k],end='')
k -= 1
print("")
try:
input()
except:
break
| tuananhcnt55vmu/Online-Judge | uva_763.py | uva_763.py | py | 692 | python | en | code | 0 | github-code | 36 |
18573856316 | import numpy as np
import matplotlib.pyplot as plt
N = 50 # Nb of steps
dt = 0.01
kv = 0.1
tau = 0.05
lamb = 0
ns = 6
###############################
## Complete the code below ####
###############################
A = np.array([[1,0,dt,0,0,0],[0,1,0,dt,0,0],[0,0,1-kv*dt,0,dt,0],[0,0,0,1-kv*dt,0,dt],[0,0,0,0,1-dt/tau,0],[0,0,0,0,0,1-dt/tau]])
B = np.zeros((6,2))
B[4,:]=np.array([dt/tau,0])
B[5,:]=np.array([0,dt/tau])
w1 = 10
w2 = 10
w3 = 0.1
w4 = 0.1
QN = np.zeros((6,6))
QN[0,0]=w1
QN[1,1]=w2
QN[2,2]=w3
QN[3,3]=w4
# We set the R matrix as follows, later on you can change it to see its effect on the controller
R = np.array([(10 ** -4, 0), (0, 10 ** -4)])
L = np.zeros((N, 2, ns))
S = np.zeros((N, ns, ns))
Q = np.zeros((N, ns, ns))
###############################
## Complete the code below ####
###############################
# (hint : fill in L and S matrices in the backward loop)
Q[N - 1, :, :] = QN
S[N - 1, :, :] = QN
for i in range(N - 1, 0, -1):
L[i,:,:]=np.linalg.solve(R+B.T@S[i,:,:]@B,B.T@S[i,:,:]@A)
S[i-1,:,:]=A.T@S[i,:,:]@(A-B@L[i,:,:])
X = np.zeros((N, ns, 1))
#Change the first entries of the vector below to investigate different starting position
print(L[45,:,:])
X[0, :, :] = [[0.2], [0.3], [0], [0], [0], [0]]
#Computation of the motor noise
Xi = np.random.normal(loc=0, scale=10 ** -4, size=(N, 6, 1))
###############################
## Complete the code below ####
###############################
for j in range(0, N - 1):
X[j+1,:,:]=(A-B@L[j,:,:])@X[j,:,:]+Xi[j,:,:]
###############################
## Complete the code below ####
###############################
#Create a representation of positions and speeds with respect to time and characterise their evolution
fig, ax=plt.subplots()
ax.plot(X[:,0,:],X[:,1,:],'r')
fig, ax=plt.subplots()
ax.plot(range(N),X[:,0,:],'b')
ax.plot(range(N),X[:,1,:],'r')
fig, ax=plt.subplots()
ax.plot(range(N),X[:,2,:],'r')
ax.plot(range(N),X[:,3,:],'b')
#Initialize the state estimation... What is the size of hte matrix? How would you complete the information corresponding to the first time step?
Xhat = np.zeros_like(X)
Xhat[0, :, :] = X[0,:,:] + np.random.normal(loc=0, scale=10 ** -6, size=(6, 1))
#Initialization of the command and observable
Y = np.zeros((N, ns, 1))
U = np.zeros((N,2,1))
#Initialization of the covariance matrix of the state, how would you initialize the first covariance matrix?
Sigma = np.zeros((N, ns, ns))
Sigma[0,:,:] = np.random.normal(loc=0, scale=10 ** -2, size=(1, ns, 1))
#Some more initialization (nothing to do for you here)
K = np.zeros((N, ns, ns))
H = np.eye(ns)
Xi = np.random.normal(loc=0, scale=10 ** -4, size=(N, ns, 1))
Omega = np.random.normal(loc=0, scale=10 ** -2, size=(N, ns, 1))
oXi = 0.1 * (B @ B.T)
oOmega = 0.1 * np.max(np.max(oXi)) * np.eye(ns)
#Fill in the following loop to complete
#
# state evolution
# observatoin evolutino
# computation of K and Sigma
# computation of the command
# evolution of the state estimation
for j in range(0, N - 1):
X[j+1,:,:]=A@X[j,:,:]-B@L[j,:,:]@Xhat[j,:,:]+Xi[j,:,:]
Y[j+1,:,:] = H@X[j,:,:]+Omega[j+1,:,:]
K[j,:,:] = A@Sigma[j,:,:]@H.T@np.linalg.inv(H@Sigma[j,:,:]@H.T+oOmega)
Sigma[j+1,:,:] = oXi + (A-K[j,:,:]@H)@Sigma[j,:,:]@A.T
Xhat[j+1,:,:] = (A-B@L[j,:,:])@Xhat[j,:,:] + K[j,:,:]@(Y[j,:,:]-H@Xhat[j,:,:])
#Plot the time evolution of the state, its observation and its estimation.. What do you observe?
fig, ax=plt.subplots()
ax.plot(X[:,0,:],X[:,1,:],'r')
fig, ax=plt.subplots()
ax.plot(range(N),X[:,0,:],'b')
ax.plot(range(N),X[:,1,:],'r')
ax.plot(range(N),Xhat[:,0,:],'b:')
ax.plot(range(N),Xhat[:,1,:],'r:')
fig, ax=plt.subplots()
ax.plot(range(N),X[:,2,:],'r')
ax.plot(range(N),X[:,3,:],'b')
plt.show() | decomiteA/ReachRLToolbox | OFC_2D/OFC2D_Reaching.py | OFC2D_Reaching.py | py | 3,838 | python | en | code | 0 | github-code | 36 |
20350466819 | try:
import json
except ImportError:
import simplejson as json
import requests
import pandas
import datetime
import time
# Please enter the file's path of the URLs to be checked
file_path = str(input('Please Enter The File Path: '))
input_CSV = pandas.read_csv(file_path)
Urls = input_CSV['Domain'].tolist()
API_key = '7459dcceaae97bf8fbed29997d9b05003db3e42c92e4de20ddde4e9bf2cb053f'
url = 'https://www.virustotal.com/vtapi/v2/url/report'
# A function that classifies a site as safe or risk
def check_if_url_safe(j_response):
results = []
if_risk = "Safe"
for x in j_response['scans']:
get_result = j_response['scans'].get(x).get("result")
results.append(get_result)
#print(results)
for y in results:
if y == 'malicious site' or y == 'phishing site' or y == 'malware site':
if_risk = "Risk"
return if_risk
# A function that receives a site and checks whether it has been queried in the last 30 minutes
def if_checked_in_30_last_minutes(site):
if_to_check_site = True
now_time = datetime.datetime.now()
output_CSV = pandas.read_csv("URLs_Status.csv")
site_list = output_CSV['URL'].tolist()
#print(site_list)
if site in site_list:
index = site_list.index(site)
last_sample_time = datetime.datetime.strptime(output_CSV['Sample_Time'][index], '%Y-%m-%d %H:%M:%S.%f')
last_sample_and_now_diff = (now_time - last_sample_time).total_seconds() / 60
# If 30 minutes have not passed since the last check, the site will not check
if last_sample_and_now_diff < 30:
if_to_check_site = False
# Otherwise, the test data will be updated in the file
else:
if_to_check_site = False
update_site_info(site, index)
return if_to_check_site
# If a site has not been queried in the last 30 minutes and already appears in the output file, will update the test fields for it
def update_site_info(site, index):
up_parameters = {'apikey': API_key, 'resource': site}
up_response = requests.get(url=url, params=up_parameters)
up_json_response = json.loads(up_response.text)
up_sites_risk = check_if_url_safe(up_json_response)
up_total_voting = up_json_response['total']
up_sample_time = datetime.datetime.now()
output_CSV = pandas.read_csv("URLs_Status.csv")
output_CSV.at[index, 'Sample_Time'] = up_sample_time
output_CSV.at[index, 'Sites_Risk'] = up_sites_risk
output_CSV.at[index, 'Total_Voting'] = up_total_voting
output_CSV.to_csv("URLs_Status.csv", index=False)
# Check the list of the sites obtained from the URLs file
for i in Urls:
check_site = if_checked_in_30_last_minutes(i)
# A new site that has not been queried yet
if check_site:
parameters = {'apikey': API_key, 'resource': i}
response = requests.get(url=url, params=parameters)
json_response = json.loads(response.text)
sites_risk = check_if_url_safe(json_response)
total_voting = json_response['total']
sample_time = datetime.datetime.now()
row_in = pandas.DataFrame([[i, sample_time, sites_risk, total_voting]],
columns=['URL', 'Sample_Time', 'Sites_Risk', 'Total_Voting'])
row_in.to_csv('URLs_Status.csv', mode='a', header=False, index=False)
# we can check up to 4 sites per minute
time.sleep(15)
| dorintu/VirusTotal | VirusTotal_Assignment.py | VirusTotal_Assignment.py | py | 3,484 | python | en | code | 0 | github-code | 36 |
2725915163 | from os import scandir, rename
from os.path import splitext, exists
from shutil import move
from time import sleep
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
src_path = "" #local path to folder
source_dir = src_path + "Downloads"
dest_dir_docs = src_path + "Documents"
dest_dir_pics = src_path + "Pictures"
dest_dir_vid = src_path + "Movies"
dest_dir_mus = src_path + "Music"
# ? supported image types
image_extensions = [".jpg", ".jpeg", ".jpe", ".jif", ".jfif", ".jfi", ".png", ".gif", ".webp", ".tiff", ".tif", ".psd", ".raw", ".arw", ".cr2", ".nrw",
".k25", ".bmp", ".dib", ".heif", ".heic", ".ind", ".indd", ".indt", ".jp2", ".j2k", ".jpf", ".jpf", ".jpx", ".jpm", ".mj2", ".svg", ".svgz", ".ai", ".eps", ".ico"]
# ? supported Video types
video_extensions = [".webm", ".mpg", ".mp2", ".mpeg", ".mpe", ".mpv", ".ogg",
".mp4", ".mp4v", ".m4v", ".avi", ".wmv", ".mov", ".qt", ".flv", ".swf", ".avchd"]
# ? supported Audio types
audio_extensions = [".m4a", ".flac", "mp3", ".wav", ".wma", ".aac"]
# ? supported Document types
document_extensions = [".doc", ".docx", ".odt",
".pdf", ".xls", ".xlsx", ".ppt", ".pptx"]
def makeUnique(dest, name):
filename, extension = splitext(name)
counter = 1
# * IF FILE EXISTS, ADDS NUMBER TO THE END OF THE FILENAME
while exists(f"{dest}/{name}"):
name = f"{filename}({str(counter)}){extension}"
counter += 1
return name
def move(dest, entry, name):
file_exists = path.exists(dest + "/" + name)
if file_exists:
unique_name = makeUnique(name)
rename(entry, unique_name)
move(entry, dest)
class MoveHandler(FileSystemEventHandler):
def on_modified(self, event):
with scandir(source_dir) as entries:
for entry in entries:
name = entry.name
dest = source_dir
if name.endswith(audio_extensions):
dest = dest_dir_mus
elif name.endswith(document_extensions) :
dest = dest_dir_docs
elif name.endswith(video_extensions):
dest = dest_dir_vid
elif name.endswith(image_extensions) :
dest = dest_dir_pics
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = source_dir
event_handler = MoveHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
sleep(1)
finally:
observer.stop()
observer.join() | zastasja/automate_stuff | moving_from_download.py | moving_from_download.py | py | 2,787 | python | en | code | 0 | github-code | 36 |
19989138110 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import gallery.models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0004_exhibit'),
]
operations = [
migrations.AlterField(
model_name='exhibit',
name='image',
field=models.ImageField(null=True, upload_to=gallery.models.upload_image_to, blank=True),
preserve_default=True,
),
]
| andrewhead/Gallery-Paths | server/gallery/migrations/0005_auto_20141206_2124.py | 0005_auto_20141206_2124.py | py | 499 | python | en | code | 0 | github-code | 36 |
23563599190 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 07:39:26 2017
@author: mnshr
"""
from statsmodels.stats.outliers_influence import variance_inflation_factor
"""
VIF = 1 (Not correlated) 1 < VIF < 5 (Moderately correlated) VIF > 5 to 10 (Highly correlated)
VIF is one way to understand whether any two independent variable are highly correlated.
In that case both the variable explain same variance in the model.
So it is generally good to drop any one of them.
Feature selection vs Multicollinearity Checks Vs Mean Centering (Standardization)
Answer by Robert Kubrick in the following link give some interesting information
on the front
https://stats.stackexchange.com/questions/25611/how-to-deal-with-multicollinearity-when-performing-variable-selection
VIF calculations are straightforward - the higher the value, the higher the collinearity.
"""
def calculate_vif_(X):
variables = list(X.columns)
vif = {variable:variance_inflation_factor(exog=X.values, exog_idx=ix) for ix,variable in enumerate(list(X.columns))}
return vif
from scipy.stats import kendalltau
#https://www.kaggle.com/ffisegydd/sklearn-multicollinearity-class/comments/notebook
from statsmodels.stats.outliers_influence import variance_inflation_factor
class ReduceVIF(BaseEstimator, TransformerMixin):
def __init__(self, thresh=5.0, impute=True, impute_strategy='median'):
# From looking at documentation, values between 5 and 10 are "okay".
# Above 10 is too high and so should be removed.
self.thresh = thresh
# The statsmodel function will fail with NaN values, as such we have to impute them.
# By default we impute using the median value.
# This imputation could be taken out and added as part of an sklearn Pipeline.
if impute:
self.imputer = Imputer(strategy=impute_strategy)
def fit(self, X, y=None):
print('ReduceVIF fit')
if hasattr(self, 'imputer'):
self.imputer.fit(X)
return self
def transform(self, X, y=None):
print('ReduceVIF transform')
columns = X.columns.tolist()
if hasattr(self, 'imputer'):
X = pd.DataFrame(self.imputer.transform(X), columns=columns)
return ReduceVIF.calculate_vif(X, self.thresh)
@staticmethod
def calculate_vif(X, thresh=5.0):
# Taken from https://stats.stackexchange.com/a/253620/53565 and modified
dropped=True
while dropped:
variables = X.columns
dropped = False
vif = [variance_inflation_factor(X[variables].values, X.columns.get_loc(var)) for var in X.columns]
max_vif = max(vif)
if max_vif > thresh:
maxloc = vif.index(max_vif)
print(f'Dropping {X.columns[maxloc]} with vif={max_vif}')
X = X.drop([X.columns.tolist()[maxloc]], axis=1)
dropped=True
return X | mnshr/kgl | stats.py | stats.py | py | 2,925 | python | en | code | 0 | github-code | 36 |
15668895730 | import random
import varibles
import math
now_state = { 'blist':[], 'weight':0, 'value':0 } # Now:「現在」的狀態
best_state = { 'blist':[], 'weight':0, 'value':0 } # (for SA) Best:「全域最佳」的狀態
#---------更新Now狀態---------
def UpdateNowState(blist, w, v):
now_state['blist'] = blist
now_state['weight'] = w
now_state['value']= v
#---------Random"合法"初始狀態(解)----------
def initialState(mode):
global best_state
pickBound = math.pow(2, int(varibles.objNums)) #upperbound: 2^15
inti_w = 0; init_v = 0
cap = varibles.capcity
if mode == 's': cap = cap * 0.6
while(1):
initNum = format(random.randrange(int(pickBound/2) , pickBound), 'b') #範圍: (2^15/2) - 2^15
blist = binToList(initNum) #拆成list
(w, v) = calTotalWandV(blist) #計算weight & value
if w <= cap: #是否合法
inti_w = w
init_v = v
break
print("初始解",blist,w, v)
UpdateNowState(blist, inti_w, init_v) #存入Now狀態
best_state = now_state.copy() #(SA)初始先設為Best
return (initNum, inti_w, init_v) #回傳"合法"二進位數
#---------binary數拆成List---------
def binToList(bin):
len_pick = len(bin)
len_total = varibles.objNums
blist = [int(i) for i in list(format(bin))] #binary拆開to List
blist += [0] * (len_total - len_pick) #補0
return blist
#----------計算weight和value----------
def calTotalWandV(blist):
tempWeight = 0
tempValue = 0
for i in enumerate(blist):
if(i[1] == 1):
tempValue += varibles.values[i[0]]
tempWeight += varibles.weights[i[0]]
# print("總重:",tempWeight)
# print("總價值:",tempValue)
return(tempWeight, tempValue)
#----------找鄰居的方法(HC)----------
def delOrAdd_HeavyOrLight(list, mode):
# mode=0做增加 , mode=1做刪減
hasNeig = False; d = dict()
indices = [i for i, x in enumerate(list) if x == mode]
if(len(indices) >= 1): #刪一個輕的
idx = random.choice(indices) #隨機挑一個1
list[idx] = int(not mode)#刪
hasNeig = True
d['flag'] = hasNeig
d['new_oper_list'] = list
return d
#>>>>>>>>>>> HILL CLIMBL <<<<<<<<<<
def HillClimbing():
# for index, pick in enumerate(now_state['blist']): #遍歷每個位元
# for i in range(0,varibles.objNums-1 ,1): #遍歷每兩位元
new_list = now_state['blist'].copy()
now_v = now_state['value']
front = int(varibles.objNums / 2);
front_list = new_list[:front] #前半
end_list = new_list[front:] #後半
flag1 = False; flag2 = False #加/減有無找到鄰居
# 找鄰居:
# 將binary數分成前半、後半(權重輕、權重重)(已由小 → 大排序))
# 〔加一個,減一個〕操作都要從【前半、後半】中選一個(每次由隨機機率決定)
# 若剛好前半、後半都無法再加減
# 則隨機取一位元做翻轉
doAdd = random.random() #前半、後半誰要做加
doDel = random.random() #前半、後半誰要做減
#加------
if doAdd > 0.5: #重的+1
res = delOrAdd_HeavyOrLight(end_list, 0) #add Heavy
end_list = res['new_oper_list']
flag1 = res['flag']
else: #輕的+1
res = delOrAdd_HeavyOrLight(front_list, 0) #add Light
front_list = res['new_oper_list']
flag1 = res['flag']
#減-------
if doDel > 0.5: #重的-1
res = delOrAdd_HeavyOrLight(end_list, 1) #del Heavy
end_list = res['new_oper_list']
flag2 = res['flag']
else: #輕的-1
res = delOrAdd_HeavyOrLight(front_list, 1) #del Light
front_list = res['new_oper_list']
flag2 = res['flag']
new_list = front_list + end_list
#都失敗-----
if (flag1 or flag2) == 0: #隨機翻轉一位元
idx = random.randrange(0, varibles.objNums)
new_list[new_list] = (not new_list[idx])
(w, v) = calTotalWandV(new_list)
if w <= varibles.capcity: #合法
if v > now_v: #新better than 舊 => 交換
UpdateNowState(new_list, w, v)
return (now_state)
#>>>>>>>>>>> Simulation Annealing <<<<<<<<<<
def SimulationAnnealing():
global best_state
T0 = 200 #初始溫度 (影響解的搜索範圍)
TF = 10 #臨界溫度
RATIO = 0.95 #收斂速度 (過快較可能找不到最佳解)
t = T0
while t >= TF:
exe_time = random.randrange(int(varibles.objNums/2), int(varibles.objNums)) #該溫度要做幾次 (1-n)
for index in range(exe_time):
(now_w, now_v) = calTotalWandV(now_state['blist'])
#生成 neighbors(test)
test_list = now_state['blist'].copy()
test_list[index] = int(not test_list[index])
(test_w, test_v) = calTotalWandV(test_list)
#best更新
if test_w > varibles.capcity: continue #非法,跳過
if test_w <= varibles.capcity: #合法
if test_v > best_state['value']: #新better than 舊
best_state['blist'] = test_list
best_state['weight'] = test_w
best_state['value'] = test_v
#now更新
if test_v > now_v : #優於當前解 -> 更新
UpdateNowState(test_list, test_w, test_v)
else: #由機率判斷
proba = float(test_v - now_v) / t
if(random.random() < math.exp(proba)):
UpdateNowState(test_list, test_w, test_v)
t *= RATIO
return (best_state)
| lanac0911/deepLearning | Knapsack/compoents.py | compoents.py | py | 5,624 | python | en | code | 0 | github-code | 36 |
10489171606 | # -*- coding: utf-8 -*-
from flask import Blueprint, Response, g
blueprint = Blueprint('property-tax-assessments-broken-model-scatterplots', __name__)
@blueprint.app_template_filter('get_length')
def get_length(thing):
return len(thing)
"""
Tarbell project configuration
"""
# Google spreadsheet key
SPREADSHEET_KEY = "1fRWsDwi4-lmdS6r61JB44Zrlb6GR9-21DTpQkgTPBdw"
# Exclude these files from publication
EXCLUDES = ['*.md', '*.ai', 'requirements.txt', 'node_modules', 'sass', 'js/src', 'package.json', 'Gruntfile.js']
# Spreadsheet cache lifetime in seconds. (Default: 4)
# SPREADSHEET_CACHE_TTL = 4
# Create JSON data at ./data.json, disabled by default
# CREATE_JSON = True
# Get context from a local file or URL. This file can be a CSV or Excel
# spreadsheet file. Relative, absolute, and remote (http/https) paths can be
# used.
# CONTEXT_SOURCE_FILE = ""
# EXPERIMENTAL: Path to a credentials file to authenticate with Google Drive.
# This is useful for for automated deployment. This option may be replaced by
# command line flag or environment variable. Take care not to commit or publish
# your credentials file.
# CREDENTIALS_PATH = ""
# S3 bucket configuration
S3_BUCKETS = {
# Provide target -> s3 url pairs, such as:
# "mytarget": "mys3url.bucket.url/some/path"
# then use tarbell publish mytarget to publish to it
"production": "apps.chicagotribune.com/property-tax-assessments-broken-model-scatterplots",
"staging": "apps.beta.tribapps.com/property-tax-assessments-broken-model-scatterplots",
}
# Default template variables
DEFAULT_CONTEXT = {
'name': 'property-tax-assessments-broken-model-scatterplots',
'title': 'Property tax assessments - How the model broke',
'OMNITURE': {
'domain': 'chicagotribune.com',
'section': 'news',
'sitename': 'Chicago Tribune',
'subsection': 'watchdog',
'subsubsection': '',
'type': 'dataproject'
}
} | ryanbmarx/cook-county-property-tax-broken-model | tarbell_config.py | tarbell_config.py | py | 1,937 | python | en | code | 0 | github-code | 36 |
3954152863 | from PIL import Image
img = Image.open("PDI/folha.png").convert('L')
adj = 8 # 4 para adj-8, 8 para adj-m
visited = [[False for col in range(img.height)] for row in range(img.width)]
dx = [0, 0, 1, -1, 1, 1, -1, -1]
dy = [1, -1, 0, 0, 1, -1, 1, -1]
def isEdge(x, y):
for i in range(adj):
if(img.getpixel((x + dx[i], y + dy[i])) == 0):
return True
return False
def dfs(i, j):
st = [(i, j)]
while(len(st) > 0):
pixel = st.pop()
x = pixel[0]
y = pixel[1]
if(visited[x][y]):
continue
visited[x][y] = True
for i in range(adj):
# verifica se os pixels adjacentes sao validos
if(x + dx[i] < 0 or x + dx[i] >= img.width or y + dy[i] < 0 or y + dy[i] >= img.height):
continue
# se a imagem for branca e estiver na borda
if(img.getpixel((x + dx[i], y + dy[i])) == 255 and isEdge(x + dx[i], y + dy[i])):
st.append((x + dx[i], y + dy[i]))
# pinta o pixel de cinza
img.putpixel((x + dx[i], y + dy[i]), 128)
for i in range(img.width):
for j in range(img.height):
if(visited == True):
continue
if(img.getpixel((i, j)) == 255):
dfs(i, j)
visited[i][j] = True
for i in range(img.width):
for j in range(img.height):
if(img.getpixel((i, j)) == 255):
img.putpixel((i, j), 0)
elif(img.getpixel((i, j)) == 128):
img.putpixel((i, j), 255)
img.save("adjacenciam.png")
| Pedroffda/Digital-Image-Processing | Pratica-01/code/adj_border.py | adj_border.py | py | 1,417 | python | en | code | 0 | github-code | 36 |
18521700070 | import pandas as pd
import pickle
import numpy as np
from tqdm import tqdm
import os, sys, inspect
from collections import defaultdict
from processing_utils import make_aggregate_df, make_yearly_df, make_class_df, make_df_by_decade, make_parent_df
MIN_FREQ = 20
# folders for reading and writing data
raw_data_folder = '../../data/raw_data/'
output_folder = '../../data/bootstrapped_data/'
# number of bootstrapped instances to create
n_iter = 10000
def filter_min_freq(df):
return df[df["freq"] >= MIN_FREQ].reset_index(drop=True)
if __name__ == "__main__":
# random seed for reproducibility
np.random.seed(3937)
# AGGREGATE AND YEARLY ANALYSIS
with open(raw_data_folder + 'm_words_cds.p', 'rb') as fp:
m_words_cds = pickle.load(fp)
with open(raw_data_folder + 'f_words_cds.p', 'rb') as fp:
f_words_cds = pickle.load(fp)
with open(raw_data_folder + 'm_words_cs.p', 'rb') as fp:
m_words_cs = pickle.load(fp)
with open(raw_data_folder + 'f_words_cs.p', 'rb') as fp:
f_words_cs = pickle.load(fp)
# get total word count
all_words = []
downsample_n_cds = 0
downsample_n_cs = 0
for age in range(1,6):
all_words += m_words_cds[str(age)]
all_words += f_words_cds[str(age)]
all_words += m_words_cs[str(age)]
all_words += f_words_cs[str(age)]
downsample_n_cds += len(f_words_cds[str(age)]) + len(m_words_cds[str(age)])
downsample_n_cs += len(f_words_cs[str(age)]) + len(m_words_cs[str(age)])
# compute the number of words to include per age-gender pair
downsample_n_cds = int(downsample_n_cds / 10)
downsample_n_cs = int(downsample_n_cs / 10)
print(f"CDS downsample number: {downsample_n_cds}")
print(f"CS downsample number: {downsample_n_cs}")
for iteration in tqdm(range(n_iter)):
m_cds_ds = {}
f_cds_ds = {}
m_cs_ds = {}
f_cs_ds = {}
# downsample equally for each age-gender pair
for age in range(1,6):
m_cds_ds[str(age)] = np.random.choice(m_words_cds[str(age)], size=downsample_n_cds, replace=True)
f_cds_ds[str(age)] = np.random.choice(f_words_cds[str(age)], size=downsample_n_cds, replace=True)
m_cs_ds[str(age)] = np.random.choice(m_words_cs[str(age)], size=downsample_n_cs, replace=True)
f_cs_ds[str(age)] = np.random.choice(f_words_cs[str(age)], size=downsample_n_cs, replace=True)
# compute yearly word statistics
df_cds_yearly = make_yearly_df(m_cds_ds, f_cds_ds)
df_cs_yearly = make_yearly_df(m_cs_ds, f_cs_ds)
# compute aggregate word statistics
df_cds_agg = make_aggregate_df(m_cds_ds, f_cds_ds)
df_cs_agg = make_aggregate_df(m_cs_ds, f_cs_ds)
# filter out the words that have fewer instances than the minimum frequency
df_cds_yearly = filter_min_freq(df_cds_yearly)
df_cs_yearly = filter_min_freq(df_cs_yearly)
df_cds_agg = filter_min_freq(df_cds_agg)
df_cs_agg = filter_min_freq(df_cs_agg)
# save data to csv
df_cds_yearly.to_csv(f'{output_folder}/df_cds_yearly_bs{iteration}.csv')
df_cs_yearly.to_csv(f'{output_folder}/df_cs_yearly_bs{iteration}.csv')
df_cds_agg.to_csv(f'{output_folder}/df_cds_agg_bs{iteration}.csv')
df_cs_agg.to_csv(f'{output_folder}/df_cs_agg_bs{iteration}.csv')
| benpry/gender-associations-child-language | code/bootstrap/bootstrap_aggregate_yearly.py | bootstrap_aggregate_yearly.py | py | 3,396 | python | en | code | 0 | github-code | 36 |
74436148265 | # osandvold
# 5 Jul 2022
# Adapted script from checkData_UPENN.m script from Heiner (Philips)
# for validating and reading log data from DMS of CT Benchtop
import numpy as np
import pandas as pd
import glob
import os.path
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib.widgets import Button, RadioButtons, CheckButtons
# Script adapted from Heiner to read DMS data (.dat) which is a binary file
# Each projection contains a header 64 entries (unit16) and detector data
# of size (nrows * (ncols+8)) = 16 * (672+8)
# Example: acquiring 4000 views will mean you have 4000 headers and
# 4000x the detector data
# Function to read .dat file and return dcm (dms data)
def read_dms_dat_nrows(dat_fname):
dcm = {}
nCols = 672
tailer = 8 # the tailer has 8 bytes
header = 64
# read and open binary .dat file
file_id = open(dat_fname, 'rb')
nRows = list(file_id.read(1))[0]
print(f'Number of detected rows: {nRows}')
# read binary file
cirs_data = np.fromfile(dat_fname, dtype='uint16')
bytes_per_view = ((nCols + tailer) * nRows + header)
nViews = int(np.floor(len(cirs_data) / bytes_per_view))
print(f'Number of detected views: {nViews}')
headers = np.zeros((nViews, 64), dtype=float)
# loop to collect the header information
for counter in range(nViews):
headers[counter, :] = cirs_data[counter*bytes_per_view + np.arange(64)]
# TODO: use projData mainly
projData = np.zeros((nCols, nRows, nViews))
projDataHz = np.zeros((nCols, nRows, nViews))
projDataRaw = np.zeros((nCols, nRows, nViews))
# loop to collect the projection data
for v in range(nViews):
for c in range(nRows):
projData_ind = header + v*bytes_per_view + (nCols+tailer)*c + np.arange(nCols)
projData[:,c,v] = np.exp(-1 * cirs_data[projData_ind]
/ 2048 * np.log(2))
projDataHz_ind = header + v*bytes_per_view + (nCols+tailer)*c + np.arange(nCols)
projDataHz[:,c,v] = 8e6 * (2 ** (-cirs_data[projDataHz_ind] / 2048))
# projDataRaw_ind = header + v*bytes_per_view + (nCols+tailer)*c + np.arange(nCols)
# projDataRaw[:,c,v] = cirs_data[projDataRaw_ind]
# create output structure/dictionary
dcm['FloatProjectionData'] = projData
dcm['FloatProjectionDataHz'] = projDataHz
# dcm['FloatProjectionDataRaw'] = projDataRaw
dcm['IntegrationPeriod'] = headers[:,3]*0.125
dcm['kV'] = headers[:, 50]
dcm['mA'] = headers[:, 49]
dcm['gridV1'] = headers[:, 42]
dcm['gridV2'] = headers[:, 43]
dcm['LeftDMSTemperature'] = headers[:,5] * 1/16
dcm['RightDMSTemperature'] = headers[:,6] * 1/16
dcm['RotationAngle'] = headers[:, 18] * 360/13920
dcm['IPcounter'] = headers[:,17]
return dcm
# Read the ; delimited csv log file and save as a table
# Header:
# Timestamp[us]; Grid_1[V]; Grid_2[V]; Anode_Voltage[V]; Cathode_Voltage[V];
# High_Voltage[V]; Analog_In_2[mV]; Analog_In_3[mV]; HighVoltage[0,1];
# HighGrid[0,1]; Resend[0,1]; X_Act_S[0,1]; TH_Rising[0,1]; TH_Falling[0,1];
# Phase[3Bit]; IP_Counter[16Bit]; Phantom_Index[0,1]; Phantom_AP0[0,1];
# Phantom_AP90[0,1]; DMS_AP0[0,1]; DMS_AP90[0,1]; DMS_Even_Rea[0,1]
def read_log(log_fname):
log_data = pd.read_csv(log_fname, sep=';')
return log_data
# Send in a directory path with the .dat and .csv file
# Output:
# - Will display the DMS data
# - Will display the wave form of the data
# - Will print a PNG to the same data directory
def check_data(data_path):
paths = check_contents(data_path)
# Exit if the csv and dat file are not in the same directory
if len(paths) == 0:
print('Must have only one dat and one log file in the same directory')
return 0
logdata = read_log(paths[0])
dcm = read_dms_dat_nrows(paths[1])
row = 40 # set in the original script
# where to grab data
indStart = round(0.4/(1e-6)) # 400000
indEnd = len(logdata.loc[:,'Timestamp[us]'])-100
# compute correct time axis
timeLog = np.arange(1, len(logdata.loc[:,'Timestamp[us]'])) * 1.0e-6
dd = np.mean(np.mean(np.mean(dcm['FloatProjectionData'][300:400,:,:])))
KV = np.transpose(round(logdata.loc[:,'High_Voltage[V]'][indStart:indEnd]/1000))
PD = np.transpose(-logdata.loc[:,'Analog_In_2[mV]'][indStart:indEnd])
time = timeLog[indStart:indEnd] - timeLog[indStart]
IPsignal = logdata.loc[:,'DMS_Even_Rea[0,1]'][indStart:indEnd]
nx, ny, nz = dcm['FloatProjectionData'].shape
print(f'Shape of projection data: ({nx}, {ny}, {nz})')
# plot the data from the dms
plt.figure()
plt.imshow(np.transpose(np.mean(dcm['FloatProjectionData'], 2)), vmax=0.05)
# plot the signals
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((2, 4), (0, 0))
# plt.hist(dcm['IntegrationPeriod'], 100) # bins=100
low_ip = dcm['IntegrationPeriod'][1:-1:2]
high_ip = dcm['IntegrationPeriod'][2:-1:2]
bins = np.linspace(200,600,100)
plt.hist(low_ip, bins, alpha=0.5, label='low')
plt.hist(high_ip, bins, alpha=0.5, label='high')
plt.xlabel('IP [us]')
plt.ylabel('frequency')
plt.legend()
plt.title('IP')
# TODO: print the mode (IP) of each peak on the graph
# plot the projection for a single detector at row 40, col 341
plt.subplot2grid((2, 4), (0, 1))
ys = dcm['FloatProjectionData'][341, row, :].squeeze()
plt.plot(ys)
plt.xlim([150, 200]) # views ranges from 1-nViews
plt.title('Profile of projection at (40,341)')
plt.xlabel('view')
plt.ylabel('DMS signal for single pixel')
plt.ylim([np.mean(ys)-3*np.std(ys), np.mean(ys)+3*np.std(ys)])
plt.subplot2grid((2, 4), (0, 2), colspan=2)
plt.imshow(np.transpose(dcm['FloatProjectionData'][:, row, 1:-1:2].squeeze()),
vmin=0, vmax= 0.5, aspect='auto', cmap='gray')
plt.title('dms low')
plt.xlabel('columns')
plt.ylabel('views')
plt.xlim([1, 672])
plt.ylim([nz/2-99, nz/2])
# Will see either switching or no switching
plt.subplot2grid((2, 4), (1, 0))
plt.plot(time, KV)
plt.plot(time, np.transpose(IPsignal*max(KV)))
plt.title('Genrator kVp')
plt.xlabel('time')
plt.ylabel('voltage [kVp]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.ylim([60, 150])
plt.grid()
# refrence diode detector
plt.subplot2grid((2, 4), (1, 1))
plt.plot(time, PD)
plt.title('photodiode signal')
plt.xlabel('time')
plt.ylabel('voltage [mV]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.grid()
plt.subplot2grid((2, 4), (1, 2), colspan=2)
plt.imshow(np.transpose(dcm['FloatProjectionData'][:,row, 2:-1:2].squeeze()),
vmin=0, vmax= 0.5, aspect='auto', cmap='gray')
plt.title('dms high')
plt.xlabel('columns')
plt.ylabel('views')
plt.xlim([1, 672])
plt.ylim([nz/2-99, nz/2])
plt.tight_layout()
# Save the png figure to the same data_path
# plt.savefig()
# display profiles
plt.figure(figsize=(11.5, 6))
plt.subplot(2,2,1)
plt.plot(dcm['FloatProjectionDataHz'][342,row,1:-1:2].squeeze())
plt.xlim([90, 672])
plt.title('profiles low (40, 342)')
plt.subplot(2,2,2)
plt.plot(dcm['FloatProjectionDataHz'][342,row,2:-1:2].squeeze())
plt.xlim([90, 672])
plt.title('profiles high (40, 342)')
plt.subplot(2,2,3)
plt.plot(dcm['FloatProjectionDataHz'][600,row,1:-1:2].squeeze())
plt.xlim([90, 672])
plt.title('profiles low (40,600)')
plt.subplot(2,2,4)
plt.plot(dcm['FloatProjectionDataHz'][600,row,2:-1:2].squeeze())
plt.xlim([90, 672])
plt.title('profiles high (40,600)')
# plt.savefig()
# show the histograms
plt.figure()
plt.subplot(1,2,1)
data = dcm['FloatProjectionData'][342,row,101:-1:2]
plt.hist(data[:],1000)
plt.title('histogram low data')
plt.subplot(1,2,2)
data = dcm['FloatProjectionData'][342,row,102:-1:2]
plt.hist(data[:],1000)
plt.title('histogram high data')
return 0
# TODO: add foldername as an input path, or use a defult ./results path
def display_main_figure(paths, foldername):
# read the data
logdata = read_log(paths[0])
dcm = read_dms_dat_nrows(paths[1])
row = 40 # set in the original script
# where to grab data
indStart = round(0.4/(1e-6)) # 400000
indEnd = len(logdata.loc[:,'Timestamp[us]'])-100
# compute correct time axis
timeLog = np.arange(1, len(logdata.loc[:,'Timestamp[us]'])) * 1.0e-6
dd = np.mean(np.mean(np.mean(dcm['FloatProjectionData'][300:400,:,:])))
KV = np.transpose(round(logdata.loc[:,'High_Voltage[V]'][indStart:indEnd]/1000))
PD = np.transpose(-logdata.loc[:,'Analog_In_2[mV]'][indStart:indEnd])
time = timeLog[indStart:indEnd] - timeLog[indStart]
IPsignal = logdata.loc[:,'DMS_Even_Rea[0,1]'][indStart:indEnd]
nx, ny, nz = dcm['FloatProjectionData'].shape
# plot the signals
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((2, 4), (0, 0))
# plt.hist(dcm['IntegrationPeriod'], 100) # bins=100
low_ip = dcm['IntegrationPeriod'][1:-1:2]
high_ip = dcm['IntegrationPeriod'][2:-1:2]
bins = np.linspace(200,600,100)
plt.hist(low_ip, bins, alpha=0.5, label='low')
plt.hist(high_ip, bins, alpha=0.5, label='high')
plt.xlabel('IP [us]')
plt.ylabel('frequency')
plt.legend()
plt.title('IP')
# plot the projection for a single detector at row 40, col 341
plt.subplot2grid((2, 4), (0, 1))
ys = dcm['FloatProjectionData'][341, row, :].squeeze()
plt.plot(ys)
plt.xlim([150, 200]) # views ranges from 1-nViews
plt.title('Profile of projection at (40,341)')
plt.xlabel('view')
plt.ylabel('DMS signal for single pixel')
plt.ylim([np.mean(ys)-3*np.std(ys), np.mean(ys)+3*np.std(ys)])
plt.subplot2grid((2, 4), (0, 2), colspan=2)
plt.imshow(np.transpose(dcm['FloatProjectionData'][:, row, 1:-1:2].squeeze()),
vmin=0, vmax= 0.5, aspect='auto', cmap='gray')
plt.title('dms low')
plt.xlabel('columns')
plt.ylabel('views')
plt.xlim([1, 672])
plt.ylim([nz/2-99, nz/2])
# Will see either switching or no switching
plt.subplot2grid((2, 4), (1, 0))
plt.plot(time, KV)
plt.plot(time, np.transpose(IPsignal*max(KV)))
plt.title('Genrator kVp')
plt.xlabel('time')
plt.ylabel('voltage [kVp]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.ylim([60, 150])
plt.grid()
# refrence diode detector
plt.subplot2grid((2, 4), (1, 1))
plt.plot(time, PD)
plt.title('photodiode signal')
plt.xlabel('time')
plt.ylabel('voltage [mV]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.grid()
plt.subplot2grid((2, 4), (1, 2), colspan=2)
plt.imshow(np.transpose(dcm['FloatProjectionData'][:,row, 2:-1:2].squeeze()),
vmin=0, vmax= 0.5, aspect='auto', cmap='gray')
plt.title('dms high')
plt.xlabel('columns')
plt.ylabel('views')
plt.xlim([1, 672])
plt.ylim([nz/2-99, nz/2])
plt.tight_layout()
# TODO:
# plt.savefig(foldername) UNCOMMENT THIS AT THE END
#button for IP
ip_button = plt.axes([0.17, 0.535, 0.04, 0.02]) #xposition,yposition,width,height
enlarge_button = Button(ip_button, "Enlarge", color="white", hovercolor="green")
def enlarge(val):
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((1, 1), (0, 0))
# plt.hist(dcm['IntegrationPeriod'], 100) # bins=100
low_ip = dcm['IntegrationPeriod'][1:-1:2]
high_ip = dcm['IntegrationPeriod'][2:-1:2]
bins = np.linspace(200,600,100)
plt.hist(low_ip, bins, alpha=0.5, label='low')
plt.hist(high_ip, bins, alpha=0.5, label='high')
plt.xlabel('IP [us]')
plt.ylabel('frequency')
plt.legend()
plt.title("IP")
plt.show()
enlarge_button.on_clicked(enlarge)
#button for Generator kVp
kvp_button = plt.axes([0.17, 0.045, 0.04, 0.02]) #xposition,yposition,width,height
enlarge_button2 = Button(kvp_button, "Enlarge", color="white", hovercolor="green")
def enlarge2(val):
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((1, 1), (0, 0))
plt.plot(time, KV)
plt.plot(time, np.transpose(IPsignal*max(KV)))
plt.title('Genrator kVp')
plt.xlabel('time')
plt.ylabel('voltage [kVp]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.ylim([60, 150])
plt.grid()
plt.title("Generator kVp")
plt.show()
enlarge_button2.on_clicked(enlarge2)
#button for profile of projection
proj_button = plt.axes([0.425, 0.535, 0.04, 0.02]) #xposition,yposition,width,height
enlarge_button3 = Button(proj_button, "Enlarge", color="white", hovercolor="green")
def enlarge3(val):
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((0, 0), (0, 0))
ys = dcm['FloatProjectionData'][341, row, :].squeeze()
plt.plot(ys)
plt.xlim([150, 200]) # views ranges from 1-nViews
plt.xlabel('view')
plt.ylabel('DMS signal for single pixel')
plt.ylim([np.mean(ys)-3*np.std(ys), np.mean(ys)+3*np.std(ys)])
plt.title("Profile of projection")
plt.show()
enlarge_button3.on_clicked(enlarge3)
#button for photodiode signal
photodi_button = plt.axes([0.425, 0.045, 0.04, 0.02]) #xposition,yposition,width,height
enlarge_button4 = Button(photodi_button, "Enlarge", color="white", hovercolor="green")
def enlarge4(val):
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((1, 1), (0, 0))
plt.plot(time, PD)
plt.xlabel('time')
plt.ylabel('voltage [mV]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.grid()
plt.title("photodiode signal")
plt.show()
enlarge_button4.on_clicked(enlarge4)
plt.show()
#def enlarge2(val):
#plt.figure()
#plt.title("Generator kVp")
#plt.show()
#enlarge_button2.on_clicked(enlarge2)
#def enlarge3(val):
#plt.figure()
#plt.title("Profile of projection")
#plt.show()
#enlarge_button3.on_clicked(enlarge3)
#def enlarge4(val):
#plt.figure()
#plt.title("photodiode signal")
#plt.show()
#enlarge_button4.on_clicked(enlarge5)
# Checks the directory path has one .dat and one .csv file
# Returns the path to those files in an array if true
# or an empty array if false
def check_contents(data_path):
log_files = glob.glob(os.path.join(data_path, '*.csv'))
dat_files = glob.glob(os.path.join(data_path, '*.dat'))
if len(log_files) == 1 and len(dat_files) == 1:
return log_files + dat_files
else:
print(f'Directory {data_path} contains: ')
print(f'- Log files: {log_files}')
print(f'- Dat files: {dat_files}')
return []
# check_data('E:/CT_BENCH/2022-06-24_17_34_15-Edgar-140kv_100mAs')
# check_data('E:/CT_BENCH/data/2022_07_15/smaller_col/')
# plt.show()
# data = read_log('E:/CT_BENCH/2022-07-13/2022_07_13_UPENN_140kVp_80kVp_1150V_705V_330mA_285ms_thresh_94kV_tkeep_40-test_photodiode_13 Jul 2022_14_39_17_converted_.csv')
# print(data.columns)
# print(data.loc[102900:300000, 'Analog_In_3[mV]'])
| chan-andrew/LACTI | check_data.py | check_data.py | py | 14,721 | python | en | code | 0 | github-code | 36 |
39268497138 | # coding:utf-8
'''
@Copyright:LintCode
@Author: cong liu
@Problem: http://www.lintcode.com/problem/add-two-numbers
@Language: Python
@Datetime: 16-12-16 23:40
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param l1: the first list
# @param l2: the second list
# @return: the sum list of l1 and l2
def addLists(self, l1, l2):
dummy = ListNode(0)
prev = dummy
carry = 0
while l1 is not None and l2 is not None:
prev.next = ListNode((l1.val + l2.val + carry) % 10)
carry = (l1.val + l2.val + carry) / 10
prev = prev.next
l1 = l1.next
l2 = l2.next
while l1 is not None:
prev.next = ListNode((l1.val + carry) % 10)
carry = (l1.val + carry) / 10
prev = prev.next
l1 = l1.next
while l2 is not None:
prev.next = ListNode((l2.val + carry) % 10)
carry = (l2.val + carry) / 10
prev = prev.next
l2 = l2.next
if carry > 0:
prev.next = ListNode(carry)
prev = prev.next
prev.next = None
return dummy.next | JessCL/LintCode | 167_add-two-numbers/add-two-numbers.py | add-two-numbers.py | py | 1,325 | python | en | code | 0 | github-code | 36 |
25107350000 | from OpenGL.GL import *
from Masks import *
from ObjectLoader import ObjectLoader
from Model import Model
from Shaders.ColoredObjectShader import ColoredObjectShader
import pyrr
class MazeCore:
def __init__(self, shader : ColoredObjectShader):
# self.transparentShader = TransparentShader()
# self.shader = ColoredObjectShader()
self.loadModels(shader)
self.modelMatrix = self.__getModelMatrix([0, 0, 0])
self.alpha_value = 0.5
def loadModels(self, shader : ColoredObjectShader):
mazeLoader = ObjectLoader(VERTEX_COORDINATES | VERTEX_COLORS, INDICES)
mazeLoader.loadModel("maze/", "fmaze")
self.mazeModel = Model()
self.mazeModel.pushData(
shader,
VERTEX_COORDINATES = mazeLoader.vertex_coords_array,
VERTEX_COLORS = mazeLoader.vertex_colors_array,
INDICES = mazeLoader.indices
)
# Assume shader is binded
def render(self, camera, shader : ColoredObjectShader):
# self.transparentShader.bind()
# shader.bind()
self.mazeModel.bindVAO()
shader.setModelMatrix(self.modelMatrix)
shader.setViewMatrix(camera.get_view_matrix())
shader.setProjectionMatrix(camera.get_projection_matrix())
shader.setAlphaValue(self.alpha_value)
glDepthMask(GL_FALSE);
glEnable(GL_CULL_FACE)
glCullFace(GL_FRONT)
glDrawElements(GL_TRIANGLES, self.mazeModel.indicesCount, GL_UNSIGNED_INT, None)
glCullFace(GL_BACK)
glDrawElements(GL_TRIANGLES, self.mazeModel.indicesCount, GL_UNSIGNED_INT, None)
glDisable(GL_CULL_FACE)
glDepthMask(GL_TRUE)
def __getModelMatrix(self, position):
return pyrr.matrix44.create_from_translation(pyrr.Vector3(position))
| VolodymyrVakhniuk/Pacman | src/Core/MazeCore.py | MazeCore.py | py | 1,878 | python | en | code | 1 | github-code | 36 |
41636547175 | def twoSum(nums, target):
# pivot = 0
# while pivot < len(nums):
# for x in range(pivot + 1, len(nums)):
# if nums[pivot] + nums[x] == target:
# return [pivot, x]
# pivot += 1
seen = {}
for i, value in enumerate(nums):
remaining = target - nums[i]
if remaining in seen:
return [i, seen[remaining]]
seen[value] = i
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# print(twoSum([1, 4, 3], 4))
# def isPalindrome(x):
# """
# :type x: int
# :rtype: bool
# """
# try:
# if x == int(str(x)[::-1]):
# return True
# else:
# return False
# except:
# return False
#
# print(isPalindrome(525))
def search(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
left = 0
right = len(nums) - 1
while left <= right:
mid = int(left + (right - left) / 2)
# mid = int((left+right)/2)
print("LEFT: ", left)
print("MID: ", mid)
print("RIGHT: ", right)
# if mid == len(nums):
# return -1
if nums[mid] == target:
return mid
elif nums[mid] < target:
left = mid + 1
else:
right = mid - 1
return -1
# left, right = 0, len(nums) - 1
# while left <= right:
# pivot = left + (right - left) // 2
# if nums[pivot] == target:
# return pivot
# if target < nums[pivot]:
# right = pivot - 1
# else:
# left = pivot + 1
# return -1
# def main():
# a = [-1, 0, 3, 5, 9, 12]
# b = 13
# print(search(a, b))
#
# main()
#
#
# def firstBadVersion(self, n):
# """
# :type n: int
# :rtype: int
# """
# left = 0
# right = n
#
# while (left <= right):
# mid = (left + right) // 2
# if isBadVersion(mid):
# if not isBadVersion(mid -1):
# return mid
# else:
# right = mid - 1
# else:
# left = mid + 1
# def searchInsert(self, nums, target):
# """
# :type nums: List[int]
# :type target: int
# :rtype: int
# """
# def containsDuplicate(nums):
# """
# :type nums: List[int]
# :rtype: bool
# """
# seen = {}
# if len(nums) == 1:
# return False
# else:
# for i, value in enumerate(nums):
# print("i: ",i)
# print("value", value)
# print("num len: ", len(nums))
# print("seen: ", seen)
# if value in seen and i <= len(nums):
# return True
# else:
# seen[value] = i
#
#
# print(containsDuplicate([1, 2, 3, 4, 5, 6, 7]))
def insertionSort(nums):
for i in range(1, len(nums)):
key = nums[i]
sortedIdx = i - 1
while sortedIdx >= 0 and nums[sortedIdx] > key:
nums[sortedIdx + 1] = nums[sortedIdx]
sortedIdx = sortedIdx - 1
nums[sortedIdx + 1] = key # Thêm key vào mảng trị đã được duyệt
print(nums)
# insertionSort([1, 3, 7, 4, 5, 2, 6,8, 13, 8,3, 9])
# Điểm mấu chốt đó là nếu đối tượng đứng sau mà lớn hơn đối tượng đứng trước thì ta đổi vị trí của chúng nếu thoả điều kiện thì while còn không thì gán index
# của phần từ đã so sánh lên 1, có nghĩa là đã duyệt qua 1 phần tử.
# Tạo ra một vòng lặp để quét từ đầu đến cuối các giá trị trong mảng
# Gán key là đối tượng đang được so sánh, chú ý giá trị sẽ không đổi trong suốt quá trình so sánh ngược về phía trước
# Tạo một vòng while có tác dụng nếu gặp giá trị đang so sánh mà lớn hơn giá trị đứng trước nó thì cứ hoán đổi vị trị về phía trước cho tới phần tử đầu tiên
#
# Given a sorted array of distinct integers and a target value
# return the index if the target is found.
# If not, return the index where it would be if it were inserted in order.
# You must write an algorithm with O(log n) runtime complexity.
def searchPosition(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if target < nums[0]:
return 0
left = 0
right = len(nums) - 1
pos = -1
while left <= right:
mid = int(left + (right - left) / 2)
if nums[mid] == target:
pos = mid
return pos
elif nums[mid] < target:
left = mid + 1
pos = left
else:
right = mid - 1
pos = mid
return pos
print(searchPosition([1, 3, 5, 6], 4))
| mrdaiking/leetcode_challenge | main.py | main.py | py | 4,854 | python | vi | code | 0 | github-code | 36 |
2358896940 | """empty message
Revision ID: bc7b1dd477a1
Revises: d7ad318a76d9
Create Date: 2021-08-24 19:22:40.497985
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bc7b1dd477a1'
down_revision = 'd7ad318a76d9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('aboutme', sa.Column('user_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'aboutme', 'user', ['user_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'aboutme', type_='foreignkey')
op.drop_column('aboutme', 'user_id')
# ### end Alembic commands ###
| nickcao123456/blog | migrations/versions/bc7b1dd477a1_.py | bc7b1dd477a1_.py | py | 788 | python | en | code | 0 | github-code | 36 |
14032964317 | from django.http import JsonResponse, HttpResponse
from .models import Template, Field
import re
from .utils import check_db
def post(request):
check_db()
form = type_form(request.GET)
name = ""
for name_f, val in form.items():
res = Field.objects.filter(name_field=name_f, type_field=val).values_list()
if not res:
continue
name = Template.objects.get(id=res[0][1])
if not name:
return JsonResponse(form)
return HttpResponse(name)
def validate(val):
validators = {
'email': r'^\S+@\w+.\w{2,4}$',
'date': r'^\d\d\.\d\d\.\d{4}$',
'phone': r'^79\s*\d{2}\s*\d{3}\s*\d{2}\s*\d{2}$',
}
for t, regex in validators.items():
if re.fullmatch(regex, val):
return t
return 'text'
def type_form(d):
res = {}
d = d.dict()
for k, v in d.items():
res[k] = validate(v)
return res
| krlns/forms_template | test_task/app/views.py | views.py | py | 924 | python | en | code | 0 | github-code | 36 |
20543243144 | import jax
import numpy as np
from jax import lax
from jax import numpy as jnp
def gaussian(x, sigma):
return 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(x**2 / (-2 * sigma**2))
def make_gaussian_kernel(n, sigma, dx=0.001):
assert n % 2 == 1 # Make sure n is odd
# Compute gaussian on a symmetric grid
x = np.arange((-n + dx) / 2, n / 2, dx)
y = gaussian(x, sigma)
# Integrate the gaussian over each cell
# x = x.reshape((n, -1))
# xint = np.median(x, axis=-1)
y = y.reshape((n, -1))
yint = np.trapz(y, dx=dx, axis=-1)
# Make sure the kernel integrates to 1. It would anyway if n >> sigma.
yint /= np.sum(yint)
return yint
def compute_radial_distance_grid(rmax, ndim):
n = 2 * rmax + 1
# Compute the midpoint of each bin in each dimension.
midpoints = np.arange(-rmax, rmax + 1)
assert len(midpoints) == n
# Compute the squared Euclidean distance to every bin midpoint.
midsq = midpoints**2
dsq = np.zeros((n, ) * ndim)
for d in range(ndim):
reshape = [1] * ndim
reshape[d] = n
dsq += midsq.reshape(reshape)
return np.sqrt(dsq)
def make_spherical_top_hat(rmax, ndim, normalize=True):
grid = compute_radial_distance_grid(rmax, ndim)
np.less_equal(grid, rmax, out=grid)
if normalize:
grid /= np.sum(grid)
return grid
def conv3d(grid, kernel, padding="SAME"):
assert grid.ndim == 3
assert kernel.ndim == 3
# Put "batch" and "input feature" dimensions first.
grid = jnp.expand_dims(grid, axis=(0, 1))
kernel = jnp.expand_dims(kernel, axis=(0, 1))
# Do the convolution.
grid = lax.conv_general_dilated(grid,
kernel,
window_strides=(1, 1, 1),
padding=padding)
return jnp.squeeze(grid)
def conv3d_separable(grid, kernels, padding="SAME"):
ndim = grid.ndim
assert len(kernels) == ndim
# Do ndim separate convolutions, aligning the kernel with each of the
# spatial dimensions in turn.
for i, kernel in enumerate(kernels):
n, = kernel.shape
shape_3d = np.ones(ndim, dtype=int)
shape_3d[i] = n
grid = conv3d(grid, kernel.reshape(shape_3d), padding)
return jnp.squeeze(grid)
| cshallue/recon-cnn | recon/smoothing.py | smoothing.py | py | 2,316 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.