seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
74051998556 | import csv
import re
import json
INPUT_FNAME = "../webapp/schemas/uploader/v5 CSH Project - Data Specifications - Case Charge Data.tsv"
OUTPUT_FNAME = '../webapp/schemas/uploader/case-charge.json'
PKEY_REGEX = '\* Row identifier is a combination of (.*) and should be unique to each row'
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
def type_convert(raw_type):
if raw_type in ['text', 'varchar', 'text-uppercase'] or 'char(' in raw_type:
return 'string'
elif raw_type == 'timestamp with timezone':
return 'string'
else:
return raw_type
def constraints(column_name, raw_type, desc, nullable):
constraints = {}
if raw_type == 'timestamp with timezone':
constraints['datetime_with_timezone_hour_only'] = True
if nullable == 'NO':
constraints['required'] = True
enum = enum_from_desc(desc)
if enum:
constraints['enum'] = enum
return constraints
def enum_from_desc(desc):
match = re.search('\((.*, .*)\)$', desc)
if match:
raw_list = match.groups(1)[0].split(', ')
cleaned_list = [
row.split(' = ')[0]
for row in raw_list
]
return cleaned_list
else:
return None
if __name__ == '__main__':
schema = {'fields': []}
with open(INPUT_FNAME) as f:
found_fields = False
reader = csv.reader(f, delimiter='\t')
for row in reader:
if not found_fields and row[0] != 'Data Field Short Description':
if row[0].startswith('* Row identifier'):
match = re.search(PKEY_REGEX, row[0])
if match:
pkey_string = match.groups(1)[0]
schema['primaryKey'] = pkey_string.split(' + ')
continue
elif not found_fields:
found_fields = True
else:
schema_type = type_convert(row[2])
field_constraints = constraints(row[1], row[2], row[3], row[7])
field = {
'name': row[1],
'type': schema_type,
}
if field_constraints:
field['constraints'] = field_constraints
if schema_type == 'date':
field['format'] = DEFAULT_DATE_FORMAT
schema['fields'].append(field)
with open(OUTPUT_FNAME, 'w') as f:
json.dump(schema, f, indent=4)
print(schema)
| dssg/matching-tool | scripts/development/schema_convert.py | schema_convert.py | py | 2,465 | python | en | code | 7 | github-code | 50 |
74851067356 | from __future__ import division, absolute_import, print_function
from six import string_types
import warnings
from datetime import datetime, timedelta
import numpy as np
from math import sqrt, sin, cos, atan2
from astropy.io import fits
from astropy.wcs.wcs import WCS
import astropy.units as u
import astropy.coordinates as coord
import time
import sys
import astropy.utils.data
from astroquery.query import suspend_cache
try:
from astroquery.vizier import Vizier
except ImportError as e:
print(str(e))
warnings.warn('astroquery not available, .fits.getCatalogStars cannot be used')
def readHeader(filePath):
""" Return the primary FITS header. """
return fits.getheader(filePath, checksum=True)
def writeHeader(filePath, header, overwrite=False):
"""
Creates a new file containing the given FITS header.
:param filePath: Path where FITS header will be written
:param header: FITS header to write
:param overwrite: Raises an error if False and filePath already exists
"""
fits.writeto(filePath, None, header, clobber=overwrite, checksum=True)
def getPixelScale(header):
"""
Returns the pixel scale in degrees/pixel based on the CD matrix of the WCS header.
SIP distortion coefficients (if present) are not considered.
"""
assert header['CUNIT1'] == 'deg' and header['CUNIT2'] == 'deg'
cd11 = header['CD1_1']
cd21 = header['CD2_1']
scale = sqrt(cd11**2 + cd21**2)
return scale
def getRotationAngle(header):
"""
Returns the rotation (roll) angle in degrees based on the CD matrix of the WCS header.
Angle is in [-180,180].
"""
assert header['CUNIT1'] == 'deg' and header['CUNIT2'] == 'deg'
cd11 = header['CD1_1']
cd21 = header['CD2_1']
rho_a = atan2(cd21, cd11)
return np.rad2deg(rho_a)
def cd11cd21(scale, rotation):
"""
Calculates CD11 and CD21 from the given pixel scale and rotation.
:param scale: pixel scale in degrees/pixel
:param rotation: rotation angle in degrees within [-180,180]
:rtype: tuple (cd11, cd21)
"""
rho = np.deg2rad(rotation)
cd11 = scale * cos(rho)
cd21 = scale * sin(rho)
return cd11, cd21
def setCdMatrix(header, scale, rotation):
"""
Sets the CD matrix from the given pixel scale and rotation.
:param header:
:param scale: pixel scale in degrees/pixel
:param rotation: rotation angle in degrees within [-180,180]
"""
cd11, cd21 = cd11cd21(scale, rotation)
header['CD1_1'] = cd11
header['CD1_2'] = -cd21
header['CD2_1'] = cd21
header['CD2_2'] = cd11
def getRadius(header, extend=0):
"""
Returns the radius in degrees of the circle which encloses the image.
Degrees are based on the CD matrix of the WCS header.
SIP distortion coefficients (if present) are not considered.
:param header: must also contain IMAGEW and IMAGEH
:param extend: how much to extend the circle in percent [0,1]
"""
diagPx = sqrt(header['IMAGEW']**2 + header['IMAGEH']**2)
radiusPx = diagPx/2 * (1+extend)
radiusDeg = getPixelScale(header) * radiusPx
return radiusDeg
def getCenterRADec(header):
"""
Returns RA,Dec for the image center.
:param header: must also contain IMAGEW and IMAGEH
:rtype: tuple (ra,dec) in degrees, ra is [0,360], dec is [-90,90]
"""
w = header['IMAGEW']
h = header['IMAGEH']
del header['NAXIS'] # emits a warning in WCS constructor if present (as it's 0 for .wcs files)
return WCS(header).all_pix2world(w/2, h/2, 0, ra_dec_order=True)
def setCenterRADec(header, ra, dec):
"""
Set the WCS reference point celestial coordinates.
The reference point is in the image center.
:param header: FITS header
:param ra: in degrees within [0,360]
:param dec: in degrees within [-90,90]
"""
assert 0 <= ra <= 360
assert -90 <= dec <= 90
w = header['IMAGEW']
h = header['IMAGEH']
header['CRPIX1'] = int(w//2 + 1) # FITS is 1-based
header['CRPIX2'] = int(h//2 + 1)
header['CRVAL1'] = ra
header['CRVAL2'] = dec
def readQuadMatch(fitsMatchPath):
"""
:param fitsMatchPath: path to .match file written by astrometry.net
:rtype: array of pixel coordinates of the stars in the quad, shape (n,2) in [x,y] order
"""
with fits.open(fitsMatchPath) as hdulist:
data = hdulist[1].data[0]
starCount = data.field('DIMQUADS')
starPxCoords = data.field('QUADPIX').reshape(-1,2)[:starCount]
return starPxCoords
def readCorr(corrPath):
"""
Return corresponding sources and reference stars as found by astrometry.net
from the given .corr file.
:param str corrPath: path to .corr file written by astrometry.net
:rtype: tuple (xField, yField, xIndex, yIndex)
"""
with fits.open(corrPath) as hdulist:
data = hdulist[1].data
xField = data.field('field_x')
yField = data.field('field_y')
xIndex = data.field('index_x')
yIndex = data.field('index_y')
return xField, yField, xIndex, yIndex
def readXy(fitsXyPath, sort=False, sortKey='FLUX', sortReverse=True, retSortField=False):
"""
X,Y position of sources/stars with origin (0,0)
.axy = extracted sources from image, includes flux
.xyls = stars from reference catalog, no flux
:raise KeyError: if sort is True and the sortKey doesn't exist
"""
with fits.open(fitsXyPath) as hdulist:
data = hdulist[1].data
x = data.field('X')-1 # FITS has (1,1) origin
y = data.field('Y')-1
if sort:
flux = data.field(sortKey)
fluxSort = np.argsort(flux)
if sortReverse:
fluxSort = fluxSort[::-1]
x = x[fluxSort]
y = y[fluxSort]
if sort and retSortField:
return x,y,flux
else:
return x,y
def recomputeXylsPixelPositions(originalXylsPath, originalWcsPath, newWcsPathOrHeader):
"""
Return pixel coordinates valid for `newWcsPathOrHeader` for
the reference stars found in `originalXylsPath` (belonging to `originalWcsPath`).
:rtype: tuple (x,y) with x and y being ndarrays
"""
# Step 1: compute RA,Dec of reference stars (as this is not stored in .xyls)
originalWCS = WCS(readHeader(originalWcsPath))
x, y = readXy(originalXylsPath)
ra, dec = originalWCS.all_pix2world(x, y, 0)
# Step 2: compute pixel positions of reference stars in new WCS solution
if isinstance(newWcsPathOrHeader, string_types):
newWCS = WCS(readHeader(newWcsPathOrHeader))
else:
newWCS = WCS(newWcsPathOrHeader)
# all_world2pix raised a NoConvergenc error
# As we don't use SIP, we don't need to use all_world2pix.
# wcs_world2pix doesn't support any distortion correction.
xNew, yNew = newWCS.wcs_world2pix(ra, dec, 0)
return xNew,yNew
def getCatalogStars(header, limit=500, limitFactor=2.5, maxVmag=None, retVmag=False, retry=1):
"""
Queries the Vizier catalog and retrieves stars for the sky area
as defined by the given WCS header.
:param header: FITS WCS header, must include IMAGEW and IMAGEH
:param limit: maximum number of stars to return (optional)
:param limitFactor: how much more stars to query for;
The search region is a circle. To reach the desired
limit of returned stars after filtering stars outside
the image bounds more stars have to be queried for initially.
:param maxVmag: maximum magnitude of stars (optional)
:param retVmag: if true, include Vmag in the result tuple
:param retry: how many times to retry in case of errors (e.g. network problems)
:rtype: tuple (x, y) or (x, y, vmag) sorted by decreasing brightness, origin (0,0)
Note that vmag is a masked array and can contain masked values.
"""
column_filters = {}
if maxVmag:
column_filters['VTmag'] = '<' + str(maxVmag)
w, h = header['IMAGEW'], header['IMAGEH']
# Step 1: query stars in tycho-2 online catalog, ordered by Vmag
# We add a small border here. This is useful for
# circling stars in an image, such that half circles
# are drawn at the image corners instead of suddenly disappearing
# circles.
catalog = 'I/259/tyc2'
centerRa, centerDec = getCenterRADec(header)
border = 0.01 * w
radiusBorder = getPixelScale(header)*border
radius = getRadius(header) + radiusBorder
if limit:
# we have to query more stars as our search region is a circle
# and we are filtering stars out afterwards
row_limit = int(limitFactor*limit)
else:
row_limit = -1
print('Querying Vizier...')
v = Vizier(columns=['_RAJ2000', '_DEJ2000', '+VTmag'],
column_filters=column_filters,
row_limit=row_limit)
try:
result = v.query_region(coord.SkyCoord(ra=centerRa, dec=centerDec,
unit=(u.deg, u.deg),
frame='icrs'),
radius=radius*u.deg, catalog=catalog)[0]
except Exception as e:
if retry > 0:
print(repr(e))
print('retrying...')
time.sleep(2)
# astroquery may have stored a corrupt response in its cache,
# so we try again without using the cache
# see https://github.com/astropy/astroquery/issues/465
with suspend_cache(Vizier):
return getCatalogStars(header, limit, limitFactor, maxVmag, retVmag, retry-1)
print('Vizier query_region: ra={}, dec={}, radius={}, column_filters={}, row_limit={}, catalog={}'.
format(centerRa, centerDec, radius, column_filters, row_limit, catalog),
file=sys.stderr)
raise
vmag = result['VTmag']
ra = result['_RAJ2000']
dec = result['_DEJ2000']
print(len(vmag), 'stars received')
# Step 2: compute pixel coordinates for stars
wcs = WCS(header)
x, y = wcs.wcs_world2pix(ra, dec, 0)
# Step 3: remove stars outside the image bounds
# As above, we leave a small border here.
inside = (-border <= y) & (y < h+border) & (-border <= x) & (x < w+border)
x = x[inside]
y = y[inside]
vmag = vmag[inside]
print(len(vmag), 'stars left after filtering')
if limit and len(vmag) < limit:
print('NOTE: limit of {} not reached, debug info follows'.format(limit), file=sys.stderr)
print('Vizier query_region: ra={}, dec={}, radius={}, column_filters={}, row_limit={}, catalog={}'.
format(centerRa, centerDec, radius, column_filters, row_limit, catalog),
file=sys.stderr)
print('filter: border={}, width={}, height={}'.format(border,w,h))
# Step 4: apply limit by removing the faintest stars
if limit:
x = x[:limit]
y = y[:limit]
vmag = vmag[:limit]
if retVmag:
return x, y, vmag
else:
return x, y
def writeXyls(path, x, y, vmag=None, clobber=False):
"""
Writes an .xyls file as produced by astrometry.net.
The input data can be retrieved using getCatalogStars.
:param path: path to
:param x: x coordinates of stars, origin 0
:param y: y coordinates of stars, origin 0
:param vmag: if masked array, then masked values become nan
:param clobber: overwrite output file if it already exists
"""
assert len(x) == len(y)
if vmag is not None:
assert len(x) == len(vmag)
x = x+1
y = y+1
colX = fits.Column(name='X', format='1D', array=x)
colY = fits.Column(name='Y', format='1D', array=y)
cols = [colX, colY]
if vmag is not None:
colVmag = fits.Column(name='Vmag', format='1D', array=vmag)
cols.append(colVmag)
tbhdu = fits.BinTableHDU.from_columns(cols)
prihdr = fits.Header()
prihdr['AN_FILE'] = ('XYLS', 'Astrometry.net file type')
prihdu = fits.PrimaryHDU(header=prihdr)
thdulist = fits.HDUList([prihdu, tbhdu])
thdulist.writeto(path, checksum=True, clobber=clobber)
def getNoradId(header):
"""
Returns the NORAD ID as a five-char string from the NORADID card of a FITS header,
or None if the card doesn't exist.
"""
noradId = header.get('NORADID')
if noradId is not None:
noradId = int(noradId)
return noradId
def setNoradId(header, noradId):
noradId = str(int(noradId))
if header.get('NORADID') is None:
header['HISTORY'] = 'NORADID added by auromat Python library'
header['NORADID'] = (noradId, 'NORAD ID of spacecraft')
def getPhotoTime(header):
"""
Return the phototime as found in DATE-OBS or None if not existing.
:rtype: datetime.datetime | None
"""
dateobs = header.get('DATE-OBS')
if dateobs is None:
return None
else:
try:
d = datetime.strptime(dateobs, '%Y-%m-%dT%H:%M:%S.%f')
except:
d = datetime.strptime(dateobs, '%Y-%m-%dT%H:%M:%S')
return d
def getShiftedPhotoTime(header):
"""
Returns the corrected photo time or if not available, the
original photo time.
:rtype: datetime.datetime | None
"""
_, d, _ = getShiftedSpacecraftPosition(header)
if d is None:
d = getPhotoTime(header)
return d
def getSpacecraftPosition(header):
"""
Returns the spacecraft position at the original photo time
in cartesian GCRS coordinates.
:rtype: tuple([x,y,z], date)
"""
date = getPhotoTime(header)
x = header.get('POSX')
if x is None or date is None:
return None, None
y = header['POSY']
z = header['POSZ']
return np.array([x,y,z]), date
def setSpacecraftPosition(header, xyz, date):
"""
Add the spacecraft position at the original photo time
in cartesian GCRS coordinates to header cards
POSX, POSY, POSZ, DATE-OBS.
:param ndarray xyz: [x,y,z] position of spacecraft at 'date'
:param datetime date: the original photo time
"""
x,y,z = xyz
if header.get('POSX') is None:
header['HISTORY'] = 'POS* & DATE-OBS added by auromat Python library'
header['POSX'] = (x, 'X coordinate of spacecraft in GCRS at DATE-OBS')
header['POSY'] = (y, 'Y coordinate of spacecraft in GCRS at DATE-OBS')
header['POSZ'] = (z, 'Z coordinate of spacecraft in GCRS at DATE-OBS')
dateStr = date.strftime('%Y-%m-%dT%H:%M:%S.%f')
header['DATE-OBS'] = (dateStr, 'EXIF timestamp of the photograph')
def getShiftedSpacecraftPosition(header):
"""
Returns the spacecraft position at the corrected photo time
in cartesian GCRS coordinates.
:rtype: tuple([x,y,z], datetime date, timedelta delta)
"""
date = getPhotoTime(header)
shift = header.get('DATESHIF')
x = header.get('POSXSHIF')
if x is None or date is None or shift is None:
return None, None, None
y = header['POSYSHIF']
z = header['POSZSHIF']
delta = timedelta(seconds=shift)
shiftedDate = date + delta
return np.array([x,y,z]), shiftedDate, delta
def setShiftedSpacecraftPosition(header, xyz, shiftedDate):
"""
Add the spacecraft position at the given shifted photo time
in cartesian GCRS coordinates to header cards
POSXSHIF, POSYSHIF, POSZSHIF, DATESHIF.
:param ndarray xyz: [x,y,z] position of spacecraft at 'date'
:param datetime shiftedDate: the shifted photo time
"""
x,y,z = xyz
date = getPhotoTime(header)
if date is None:
raise ValueError('DATE-OBS needs to be set before setting the shifted date')
delta = (shiftedDate - date).total_seconds()
if header.get('POSXSHIF') is None:
header['HISTORY'] = 'POS*SHIF & DATESHIF added by auromat Python library'
header['POSXSHIF'] = (x, 'X coordinate of spacecraft in GCRS at DATESHIF')
header['POSYSHIF'] = (y, 'Y coordinate of spacecraft in GCRS at DATESHIF')
header['POSZSHIF'] = (z, 'Z coordinate of spacecraft in GCRS at DATESHIF')
header['DATESHIF'] = (delta, 'DATE-OBS shift in seconds')
| esa/auromat | auromat/fits.py | fits.py | py | 16,299 | python | en | code | 17 | github-code | 50 |
3598408884 | # import dependencies
import sqlite3
import sys
# Create the connections
conn = sqlite3.connect('Sensors.db')
curs = conn.cursor()
# Print out data from table BME_DATA
for row in curs.execute("SELECT * FROM BME_DATA ORDER BY TIME_STAMP DESC LIMIT 2000"):
print (row)
conn.close() | slavisha84/ETL_PROJECT | Testing_DB.py | Testing_DB.py | py | 287 | python | en | code | 0 | github-code | 50 |
32710713295 | import abc
from abc import ABC
from minerl.herobraine.hero.handlers.translation import TranslationHandler
from minerl.herobraine.hero.handler import Handler
from minerl.herobraine.hero import handlers
from minerl.herobraine.hero.handlers import POVObservation, CameraAction, KeybasedCommandAction
from minerl.herobraine.hero.mc import INVERSE_KEYMAP
from minerl.herobraine.env_spec import EnvSpec
from typing import List
KEYBOARD_ACTIONS = [
"forward",
"back",
"left",
"right",
"jump",
"sneak",
"sprint",
"attack",
"use",
"drop",
"inventory"
]
class HumanControlEnvSpec(EnvSpec, ABC):
"""
A simple base environment from which all other simple envs inherit.
"""
def __init__(self, name, *args, resolution=(640, 480), **kwargs):
self.resolution = resolution
super().__init__(name, *args, **kwargs)
def create_observables(self) -> List[TranslationHandler]:
return [
POVObservation(self.resolution),
]
def create_actionables(self) -> List[TranslationHandler]:
"""
Simple envs have some basic keyboard control functionality, but
not all.
"""
return [
KeybasedCommandAction(k, INVERSE_KEYMAP[k]) for k in KEYBOARD_ACTIONS
] + [
KeybasedCommandAction(f"hotbar.{i}", INVERSE_KEYMAP[str(i)]) for i in range(1, 10)
] + [CameraAction()]
def create_monitors(self) -> List[TranslationHandler]:
return [] # No monitors by default!o
def create_agent_start(self) -> List[Handler]:
return [handlers.LowLevelInputsAgentStart()]
| sihangw/minerl | minerl/herobraine/env_specs/human_controls.py | human_controls.py | py | 1,635 | python | en | code | null | github-code | 50 |
41535672559 | from typing import List, Tuple
SIZE_OF_FIELD = 4
def read_input() -> Tuple[int, List[List[str]]]:
buttons_count = int(input())
field = []
for _ in range(SIZE_OF_FIELD):
field.append(list(input()))
return buttons_count, field
def count_win_rounds(buttons_count: int, field: List[List[str]]) -> int:
symbols = {}
for row in field:
for symbol in row:
if not symbols.get(symbol):
symbols[symbol] = 0
symbols[symbol] += 1
return sum(
1
for key in symbols.keys()
if symbols[key] <= buttons_count * 2 and key != "."
)
if __name__ == "__main__":
max_buttons, game_field = read_input()
print(count_win_rounds(max_buttons, game_field))
# time complexity O(N x M x len(keys)) => O(4 x 4 x 10(max keys)) => O(1)
# space complexity O(10(keys) x 10(values)) => O(1)
# ID = 79440406
| and-volkov/yap.algorithms | sprint15/task2.py | task2.py | py | 896 | python | en | code | 0 | github-code | 50 |
73821537114 | import DataProcessing.ModuleReanalysisData as Mre
import DataProcessing.ModuleFeatures as Ml
folder_data_tot = './data/'
folderLUT = folder_data_tot
foldersaving=folder_data_tot+'Xy/'
pkl_inputfile = folder_data_tot+'tracks_IBTRACKS_1979_after.pkl'
size_grid = 1
size_crop = 11
levtype = 'pl' # or 'sfc'
flag_write_ys = False # write the target (y)
flag_write_X = True # write the input data (X)
possible_levels = Mre.open_level_list('pl', folderLUT=folderLUT)
levelist = [lev for lev in possible_levels if int(lev)>90] # no level smaller than 100 hPa
levelist = levelist[1::2]
LUTfilename=folderLUT+'list_params_nums_'+levtype+'.txt'
shortnames, ids, names, namesCDF=Mre.open_LUT_list_params(LUTfilename)
total_list_params=shortnames
list_params = ['r', 'vo','w', 'z','u','v']
history = 0 # or 1, or 2 (number of historical time steps to also store (1= 6h behind stored, 2=6h and 2h behind stored)
folderpath=folder_data_tot+'ERA_interim/'\
+'grid_'+str(size_grid)+'/'+levtype+'/'+'_'.join(total_list_params)+'/'
# check if params in list params:
if not set(list_params).issubset(shortnames):
print('Warning! list_params is not a subset of possible parameters!')
#check if levelist in possible levels:
if levelist:
possible_levels = Mre.open_level_list('pl', folderLUT=folderLUT)
if not set(levelist).issubset(possible_levels):
print('Warning! levelist is not a subset of possible levels!')
Ml.load_data_storm_interim( list_params, size_crop=size_crop, levtype=levtype, pkl_inputfile=pkl_inputfile,
folder_data=folderpath, folder_saving=foldersaving, folderLUT=folderLUT,
levels=levelist, flag_write_ys=flag_write_ys, flag_write_X=flag_write_X,
history=history)
| sophiegif/FusionCNN_hurricanes | scripts_data_collect_process/script_make_img_features_database.py | script_make_img_features_database.py | py | 1,786 | python | en | code | 20 | github-code | 50 |
26385476925 | from store.models import Product, Category
from users.models import Consultant
imprimerie_category = Category.objects.get(name="Imprimerie")
sup_publi_category = Category.objects.get(name="Supports publicitaires")
mane_category = Category.objects.get(name="Objets publicitaires")
articles_promotionnels= Category.objects.get(name="Articles promotionnels")
con1 = Consultant.objects.get(pk=2)
con2 = Consultant.objects.get(pk=3)
con3 = Consultant.objects.get(pk=4)
products_data = [
# {
# "category": imprimerie_category,
# "created_by": con1,
# "name": "Affiches publicitaires",
# "description": "Affiches grand format pour la promotion d'événements et de produits.",
# "price": "19.99",
# "discount_price": "18.99",
# "image": "products/Affiches-publicitaires..png",
# "created_at": "2023-11-15T23:11:28.816Z",
# "slug": "affiches-publicitaires",
# "updated": "2023-11-15T23:13:32.809Z"
# },
# {
# "category": imprimerie_category,
# "created_by": con1,
# "name": "Flyers publicitaires",
# "description": "Flyers imprimés pour la distribution lors d'événements et de campagnes.",
# "price": "9.99",
# "discount_price": "18.99",
# "image": "products/Flyers-publicitaires_.png",
# "created_at": "2023-11-15T23:11:28.816Z",
# "slug": "flyers-publicitaires",
# "updated": "2023-11-15T23:14:07.032Z"
# },
# {
# "category": imprimerie_category,
# "created_by": con1,
# "name": "Catalogues promotionnels",
# "description": "Conception et impression de catalogues mettant en valeur vos produits.",
# "price": "29.99",
# "discount_price": "18.99",
# "image": "products/Catalogues-promotionnels..png",
# "created_at": "2023-11-15T23:11:28.816Z",
# "slug": "catalogues-promotionnels",
# "updated": "2023-11-15T23:14:59.484Z"
# },
# {
# "category": imprimerie_category,
# "created_by": con1,
# "name": "Packaging sur mesure",
# "description": "Créér un emballage personnalisé pour vos produits avec une impression de qualité",
# "price": "14.99",
# "discount_price": "18.99",
# "image": "products/Packaging-sur-mesure..png",
# "created_at": "2023-11-15T23:11:28.816Z",
# "slug": "packaging-sur-mesure",
# "updated": "2023-11-15T23:14:43.337Z"
# },
# {
# "category": sup_publi_category,
# "created_by": con1,
# "name": "Drapeaux publicitaires",
# "description": "Drapeaux personnalisés pour attirer l'attention à l'exterieur de votre entreprise.",
# "price": "49.99",
# "discount_price": "18.99",
# "image": "products/Drapeaux-publicitaires..png",
# "created_at": "2023-11-15T23:18:10.848Z",
# "slug": "drapeaux-publicitaires",
# "updated": "2023-11-15T23:19:43.291Z"
# },
# {
# "category": sup_publi_category,
# "created_by": con1,
# "name": "Signalalétique intéieure",
# "description": "Signalétique personnalisé pour guider les clients dans vos locaux.",
# "price": "29.99",
# "discount_price": "18.99",
# "image": "products/Signaletique-interior.png",
# "created_at": "2023-11-15T23:18:10.848Z",
# "slug": "signaletique-interieure",
# "updated": "2023-11-15T23:20:09.030Z"
# },
# {
# "category": sup_publi_category,
# "created_by": con1,
# "name": "Stands d'exposition",
# "description": "Conception et construction de stands d'exposition sur mesure pour les salons.",
# "price": "149.99",
# "discount_price": "18.99",
# "image": "products/Stands-d-exposition..png",
# "created_at": "2023-11-15T23:18:10.864Z",
# "slug": "stands-dexposition",
# "updated": "2023-11-15T23:18:49.061Z"
# },
# {
# "category": sup_publi_category,
# "created_by": con1,
# "name": "Structures gonflables",
# "description": "Structures publicitaires gonflables pour des événements uniques.",
# "price": "99.99",
# "discount_price": "18.99",
# "image": "products/Structures-gonflables..png",
# "created_at": "2023-11-15T23:18:10.864Z",
# "slug": "structures-gonflables",
# "updated": "2023-11-15T23:19:12.589Z"
# },
{
"category": mane_category,
"created_by": con3,
"name": "Systèmes de projection",
"description": "Projecteurs haute réolution pour des présentations de qualité",
"price": "299.99",
"discount_price": "18.99",
"image": "products/Systemes-de-projection..png",
"created_at": "2023-11-15T23:22:31.281Z",
"slug": "systemes-de-projection",
"updated": "2023-11-15T23:23:45.806Z"
},
{
"category": mane_category,
"created_by": con3,
"name": "Supports de conférence",
"description": "Supports et accessoires pour une préentation efficace en salle de conféence.",
"price": "49.99",
"discount_price": "18.99",
"image": "products/Supports-de-conference-personnalises.png",
"created_at": "2023-11-15T23:22:31.282Z",
"slug": "supports-de-conference",
"updated": "2023-11-15T23:23:27.134Z"
},
{
"category": mane_category,
"created_by": con3,
"name": "Solutions de visioconférence",
"description": "Equipements pour des réunions virtuelles et des webinaires.",
"price": "199.99",
"discount_price": "18.99",
"image": "products/Solutions-de-visioconference..png",
"created_at": "2023-11-15T23:22:31.284Z",
"slug": "solutions-de-visioconference",
"updated": "2023-11-15T23:23:11.826Z"
},
{
"category": articles_promotionnels,
"created_by": con1,
"name": "Stylos personnalisés",
"description": "Des stylos personnalisés avec le logo de votre entreprise, parfaits pour les cadeaux promotionnels.",
"price": "19.99",
"discount_price": "18.99",
"image": "products/Stylos-personnalises..png",
"created_at": "2023-11-15T23:46:10.092Z",
"slug": "stylos-personnalises",
"updated": "2023-11-15T23:46:59.421Z"
},
{
"category": articles_promotionnels,
"created_by": con1,
"name": "T-shirts promotionnels",
"description": "T-shirts imprimés avec votre logo pour les événements d'entreprise et les campagnes de marketing.",
"price": "29.99",
"discount_price": "18.99",
"image": "products/T-shirts-imprimes-avec-votre-logo..png",
"created_at": "2023-11-15T23:46:10.110Z",
"slug": "t-shirts-promotionnels",
"updated": "2023-11-15T23:46:37.671Z"
},
{
"category": articles_promotionnels,
"created_by": con1,
"name": "Porte-clé personnalisés",
"description": "Des stylos personnalisés avec le logo de votre entreprise, parfaits pour les cadeaux promotionnels..",
"price": "19.99",
"discount_price": "18.99",
"image": "products/Porte-clss-personnalises.png",
"created_at": "2023-11-15T23:51:41.705Z",
"slug": "porte-cles-personnalises",
"updated": "2023-11-15T23:54:21.249Z"
},
{
"category": articles_promotionnels,
"created_by": con1,
"name": "Sacs fourre-tout personnalisés",
"description": "T-shirts imprimés avec votre logo pour les événements d'entreprise et les campagnes de marketing.",
"price": "29.99",
"discount_price": "18.99",
"image": "products/Emarketing-bags.png",
"created_at": "2023-11-15T23:51:41.715Z",
"slug": "sacs-fourre-tout-personnalises",
"updated": "2023-11-15T23:54:00.242Z"
},
{
"category": articles_promotionnels,
"created_by": con1,
"name": "Calendriers d'entreprise",
"description": "Calendriers personnalisés affichant vos produits ou services toute l'année.",
"price": "19.99",
"discount_price": "18.99",
"image": "products/Calendriers-d-entreprise..png",
"created_at": "2023-11-15T23:51:41.715Z",
"slug": "calendriers-dentreprise",
"updated": "2023-11-15T23:54:10.010Z"
},
{
"category": articles_promotionnels,
"created_by": con1,
"name": "Verres et mugs personnalisés",
"description": "Verres, tasses et mugs avec votre logo pour les cadeaux d'entreprise.",
"price": "29.99",
"discount_price": "18.99",
"image": "products/Verres-et-mugs-personnalises..png",
"created_at": "2023-11-15T23:51:41.719Z",
"slug": "verres-et-mugs-personnalises",
"updated": "2023-11-15T23:53:02.147Z"
}
]
for product_data in products_data:
product = Product(**product_data)
product.save() | Aleks512/Ventalis | products.py | products.py | py | 8,955 | python | en | code | 0 | github-code | 50 |
34536155371 | # -*- coding: utf-8 -*-
import codecs
import csv
import requests
from bs4 import BeautifulSoup
def getHTML(url):
r = requests.get(url)
return r.content
def parseHTML(html):
soup = BeautifulSoup(html,'html.parser')
body = soup.body
company_middle = body.find('div',attrs={'class':'middle'})
company_list_ct = company_middle.find('div',attrs={'class':'list-ct'})
company_list = [] #修改
for company_ul in company_list_ct.find_all('ul',attrs={'class':'company-list'}):
for company_li in company_ul.find_all('li'):
company_url = company_li.a['href']
company_info = company_li.get_text()
company_list.append([company_info,company_url]) #修改
return company_list #修改
def writeCSV(file_name,data_list):
with open(file_name,'w') as f:
writer = csv.writer(f)
for data in data_list:
writer.writerow(data)
URL = 'http://www.cninfo.com.cn/cninfo-new/information/companylist'
html = getHTML(URL)
data_list = parseHTML(html) #修改
writeCSV('F://test.csv',data_list)
| zhanghanxuan123/Python_study_code | demo01/Netdemo06.py | Netdemo06.py | py | 1,118 | python | en | code | 1 | github-code | 50 |
170087305 | from tkinter import ttk
import tkinter
#Auliary constants
WINDOW_NAME = "Calculator"
ROOT_DISPLAY_STATE = "readonly"
CLEAR_OPERATOR = "C"
ERASE_OPERATOR = "←"
ADD_OPERATOR = "+"
SUBTRACT_OPERATOR = "-"
MULTIPLICATION_OPERATOR = "×"
DIVISION_OPERATOR = "÷"
DOT = "."
RESULT_OPERATOR = "="
C_0 = "0"
C_1 = "1"
C_2 = "2"
C_3 = "3"
C_4 = "4"
C_5 = "5"
C_6 = "6"
C_7 = "7"
C_8 = "8"
C_9 = "9"
WIDTH_SIZE = 320
HEIGHT_SIZE = 150
COLUMN_1_POSITION = 0
COLUMN_2_POSITION = 80
COLUMN_3_POSITION = 160
COLUMN_4_POSITION = 240
ROW_1_POSITION = 30
ROW_2_POSITION = 60
ROW_3_POSITION = 90
ROW_4_POSITION = 120
EMPTY_STRING = ""
C2_0 = 0
C2_1 = 1
C__1 = -1
#Global variables
accum = C2_0
flag = False
operator_to_use = EMPTY_STRING
def is_dot_last_char(string):
aux_string = string.get()
if aux_string.endswith(DOT):
erase_last(string)
return string
def add(number, string):
global flag
if flag:
string.set(EMPTY_STRING)
flag = False
aux_string = string.get() + number
result = aux_string.count(DOT) == C2_1
if aux_string.isdecimal() or result:
string.set(aux_string)
def erase_last(string):
aux_string = string.get()
aux_string_size = len(aux_string)
if aux_string_size > C2_0:
string.set(aux_string[:C__1])
def clear(string):
string.set(EMPTY_STRING)
global accum, flag, operator_to_use
accum = C2_0
flag = False
operator_to_use = EMPTY_STRING
def operation(operator_received, string):
global accum, operator_to_use
if accum == C2_0:
aux_string = is_dot_last_char(string)
accum = float(aux_string.get())
operator_to_use = operator_received
string.set(EMPTY_STRING)
def show_result(string):
global accum, flag
if operator_to_use != EMPTY_STRING:
aux_string = is_dot_last_char(string)
value = float(aux_string.get())
if operator_to_use == ADD_OPERATOR:
result = accum + value
elif operator_to_use == SUBTRACT_OPERATOR:
result = accum - value
elif operator_to_use == MULTIPLICATION_OPERATOR:
result = accum * value
elif operator_to_use == DIVISION_OPERATOR:
result = accum / value
string.set(result)
flag = True
accum = C2_0
else:
clear(string)
def main():
#window setting
root = tkinter.Tk()
root.config(width=WIDTH_SIZE, height=HEIGHT_SIZE)
root.title(WINDOW_NAME)
root.resizable(C2_0, C2_0)
#Display setting
string = tkinter.StringVar()
string.set(EMPTY_STRING)
display = ttk.Entry(textvariable=string, state=ROOT_DISPLAY_STATE)
display.place(x=C2_0, y=C2_0)
#Numbers buttons settins
button_7 = ttk.Button(root, text=C_7, command=lambda:add(C_7, string))
button_7.place(x=COLUMN_1_POSITION, y=ROW_1_POSITION)
button_4 = ttk.Button(root, text=C_4, command=lambda:add(C_4, string))
button_4.place(x=COLUMN_1_POSITION, y=ROW_2_POSITION)
button_1 = ttk.Button(root, text=C_1, command=lambda:add(C_1, string))
button_1.place(x=COLUMN_1_POSITION, y=ROW_3_POSITION)
button_0 = ttk.Button(root, text=C_0, command=lambda:add(C_0, string))
button_0.place(x=COLUMN_1_POSITION, y=ROW_4_POSITION)
button_8 = ttk.Button(root, text=C_8, command=lambda:add(C_8, string))
button_8.place(x=COLUMN_2_POSITION, y=ROW_1_POSITION)
button_5 = ttk.Button(root, text=C_5, command=lambda:add(C_5, string))
button_5.place(x=COLUMN_2_POSITION, y=ROW_2_POSITION)
button_2 = ttk.Button(root, text=C_2, command=lambda:add(C_2, string))
button_2.place(x=COLUMN_2_POSITION, y=ROW_3_POSITION)
button_dot = ttk.Button(root, text=DOT, command=lambda:add(DOT, string))
button_dot.place(x=COLUMN_2_POSITION, y=ROW_4_POSITION)
button_9 = ttk.Button(root, text=C_9, command=lambda:add(C_9, string))
button_9.place(x=COLUMN_3_POSITION, y=ROW_1_POSITION)
button_6 = ttk.Button(root, text=C_6, command=lambda:add(C_6, string))
button_6.place(x=COLUMN_3_POSITION, y=ROW_2_POSITION)
button_3 = ttk.Button(root, text=C_3, command=lambda:add(C_3, string))
button_3.place(x=COLUMN_3_POSITION, y=ROW_3_POSITION)
button_result = ttk.Button(root, text=RESULT_OPERATOR, command=lambda:show_result(string))
button_result.place(x=COLUMN_3_POSITION, y=ROW_4_POSITION)
#operator buttons settins
add_button = ttk.Button(root, text=ADD_OPERATOR, command=lambda:operation(ADD_OPERATOR, string))
add_button.place(x=COLUMN_4_POSITION, y=ROW_1_POSITION)
subtract_button = ttk.Button(root, text=SUBTRACT_OPERATOR, command=lambda:operation(SUBTRACT_OPERATOR, string))
subtract_button.place(x=COLUMN_4_POSITION, y=ROW_2_POSITION)
multiplication_button = ttk.Button(root, text=MULTIPLICATION_OPERATOR, command=lambda:operation(MULTIPLICATION_OPERATOR, string))
multiplication_button.place(x=COLUMN_4_POSITION, y=ROW_3_POSITION)
division_button = ttk.Button(root, text=DIVISION_OPERATOR, command=lambda:operation(DIVISION_OPERATOR, string))
division_button.place(x=COLUMN_4_POSITION, y=ROW_4_POSITION)
clean_button = ttk.Button(root, text=CLEAR_OPERATOR, command=lambda:clear(string))
clean_button.place(x=COLUMN_3_POSITION, y=C2_0)
erase_button = ttk.Button(root, text=ERASE_OPERATOR, command=lambda:erase_last(string))
erase_button.place(x=COLUMN_4_POSITION, y=C2_0)
root.mainloop()
if __name__ == "__main__":
main()
| FimesX/Calculator-v1.0 | main.py | main.py | py | 5,470 | python | en | code | 0 | github-code | 50 |
42983694220 | from odoo import models, fields, api
class ConvertToEmployee(models.TransientModel):
_name = "covert.to.employee"
_description = "Employee List"
ROLES = [
('developer', 'Developer'),
('tester', 'Tester'),
('analyst', 'Analyst'),
('trainer', 'Trainer')
]
trainee_name = fields.Many2one('trainee.master', string="Trainee")
role = fields.Selection(ROLES, string='Role to Convert')
def convert_to_employee(self):
trainee_name = self.trainee_name
trainee_name.status = 'employed'
@api.model
def create(self, vals):
# import pdb
# pdb.set_trace()
value_of_logs = {
'employee_name': self.env['trainee.master'].browse(vals.get('trainee_name')).name,
'role': vals.get('role')
}
self.env['employee.list'].create(value_of_logs)
return super(ConvertToEmployee, self).create(vals)
| kunalchambhare/js_code | bista_training/wizard/convert_to_employee_wiz.py | convert_to_employee_wiz.py | py | 932 | python | en | code | 1 | github-code | 50 |
39317579296 | """
Some various utilities to ease making tests around Django and HTML responses.
"""
import os
import hashlib
from django.contrib.sites.models import Site
from django.template.response import TemplateResponse
from django.test.html import parse_html
from django.urls import reverse
from pyquery import PyQuery as pq
from .factories import create_image_file
# A dummy password that should pass form validation
VALID_PASSWORD_SAMPLE = "Azerty12345678"
# A dummy blank GIF file in byte value to simulate an uploaded file like with
# 'django.core.files.uploadedfile.SimpleUploadedFile'
DUMMY_GIF_BYTES = (
b'\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04'
b'\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02'
b'\x02\x4c\x01\x00\x3b'
)
def get_website_url(site_settings=None):
"""
A shortand to retrieve the full website URL according to Site ID and HTTP
protocole settings.
Keyword Arguments:
site_settings (django.conf.settings): Settings object, if not given the method
will be unable to determine if HTTPS is enabled or not, so it will always
return a HTTP URL.
Returns:
string: Full website URL.
"""
domain = Site.objects.get_current().domain
protocol = "http://"
if site_settings and site_settings.HTTPS_ENABLED:
protocol = "https://"
return "{}{}".format(protocol, domain)
def get_relative_path(site_url, url):
"""
From given URL, retrieve the relative path (URL without domain and starting
slash).
Arguments:
site_url (string): Website URL to remove from given ``url``
argument.
url (string): Full URL (starting with http/https) to make relative to
website URL.
Returns:
string: Admin change view URL path for given model object.
"""
if url.startswith(site_url):
return url[len(site_url):]
return url
def get_admin_add_url(model):
"""
Return the right admin URL for add form view for given class.
Arguments:
model (Model object): A model object to use to find its admin
add form view URL.
Returns:
string: Admin add form view URL path.
"""
url_pattern = "admin:{app}_{model}_add"
return reverse(url_pattern.format(
app=model._meta.app_label,
model=model._meta.model_name
))
def get_admin_change_url(obj):
"""
Return the right admin URL for a change view for given object.
Arguments:
obj (Model object): A model object instance to use to find its admin
change view URL.
Returns:
string: Admin change view URL path.
"""
url_pattern = "admin:{app}_{model}_change"
return reverse(url_pattern.format(
app=obj._meta.app_label,
model=obj._meta.model_name
), args=[
obj.pk
])
def get_admin_list_url(model):
"""
Return the right admin URL for a list view for given class.
Arguments:
model (Model object): A model object to use to find its admin
list view URL.
Returns:
string: Admin list view URL path.
"""
url_pattern = "admin:{app}_{model}_changelist"
return reverse(url_pattern.format(
app=model._meta.app_label,
model=model._meta.model_name
))
def decode_response_or_string(content):
"""
Shortand to get HTML string from either a TemplateResponse (as returned
from Django test client) or a simple string so you can blindly give a
response or a string without to care about content type.
Arguments:
content (TemplateResponse or string): If content is a string it will
just return it. If content is a TemplateResponse it will decode byte
string from its ``content`` attribute.
Returns:
string: HTML string.
"""
if isinstance(content, TemplateResponse):
return content.content.decode()
return content
def html_element(content):
"""
Shortand to use Django HTML parsing on given content.
This is more useful for comparaison on HTML parts.
Arguments:
content (TemplateResponse or string): HTML content to parse.
Returns:
django.test.html.Element: A Python object structure able to perform
comparaison on a semantical way. See ``django.test.html.parse_html`` for
more details.
"""
return parse_html(
decode_response_or_string(content)
)
def html_pyquery(content):
"""
Shortand to use Pyquery parsing on given content.
This is more useful to dig in advanced HTML content. PyQuery is basically a
wrapper around ``lxml.etree`` it helps with a more intuitive API (alike
Jquery) to traverse elements but when reaching a node content it will
return ``lxml.html.HtmlElement`` object which have a less intuitive API.
Arguments:
content (TemplateResponse or string): HTML content to parse.
Returns:
pyquery.PyQuery: A PyQuery object.
"""
return pq(
decode_response_or_string(content),
parser='html'
)
def compact_form_errors(form):
"""
Build a compact dict of field errors without messages.
This is a helper for errors, keeping it more easy to test since messages
may be too long and can be translated which is more difficult to test.
Arguments:
form (django.forms.Form): A bounded form.
Returns:
dict: A dict of invalid fields, each item is indexed by field name and
value is a list of error codes.
"""
errors = {}
for name, validationerror in form.errors.as_data().items():
errors[name] = [item.code for item in validationerror]
return errors
def sum_file_object(fileobj):
"""
Return a hash checksum for given file object using "Black2b" algorithm.
Arguments:
fileobj (object): Any file valid object with ``getvalue`` or ``read`` method.
Returns:
string: Checksum for file object.
"""
algorithm = hashlib.blake2b()
# Some file object alike like BytesIO does not return correct content from "read()"
# method
if hasattr(fileobj, "getvalue"):
content = fileobj.getvalue()
else:
content = fileobj.read()
algorithm.update(content)
return algorithm.hexdigest()
def get_test_source(storage, *args, **kwargs):
"""
Shortcut helper to create a dummy image, save it to the FS and get its
final path from storage.
Arguments:
storage (django.core.files.storage.Storage): Storage class to use to
save file to the file system.
Keyword Arguments:
storage (django.core.files.storage.Storage): Storage class to use to
save file to the file system. This is a required argument.
destination_dir (string): relative directory path where to save file
in storage directory. Default to "pil".
*args: Any other positionnal arguments are passed to
``create_image_file``.
**kwargs: Any other Keyword arguments are passed to
``create_image_file``.
Returns:
django.core.files.File: File object with the right final file path.
"""
destination_dir = kwargs.pop("destination_dir", "pil")
image = create_image_file(*args, **kwargs)
destination = os.path.join(destination_dir, image.name)
source_filepath = storage.save(destination, image)
# Trick to update name to final destination since File object is not
# automatically updated to the final filepath during storage.
image.name = source_filepath
return image
| emencia/cmsplugin-blocks | cmsplugin_blocks/utils/tests.py | tests.py | py | 7,647 | python | en | code | 2 | github-code | 50 |
19949969236 | from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
data=input("enter the name of the dataset file:")
df=pd.read_csv(data)
number=LabelEncoder()
df['class']=number.fit_transform(df['class'].astype('str'))
X=df.iloc[:,:df.shape[1]-1]
Y=df.iloc[:,df.shape[1]-1]
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3,random_state=41)
regr = linear_model.LinearRegression()
regr.fit(X_train,Y_train)
sc=regr.score(X_test,Y_test)
y_pred = regr.predict(X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(Y_test,y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(Y_test,y_pred))
print('Accuracy: \n', sc)
| not4win/5th-sem-soft-computing | sc lab1/skl.py | skl.py | py | 951 | python | en | code | 0 | github-code | 50 |
28569627767 | import json
import logging
import yaml
from alarm import AlarmManager
from filtered_metrics import get_filtered_metrics
from paramiko_ssh_client import ParamikoSSHClient
from server_metrics_aggregator import ServerMetricsAggregator
with open('server_config.yaml', 'r') as config_file:
server_config = yaml.safe_load(config_file)
def main():
aggregated_metrics = []
for server in server_config['servers']:
server_name = server.get('name')
ssh_client = ParamikoSSHClient(server['address'], server['username'], server['password'])
server_aggregator = ServerMetricsAggregator(ssh_client)
metrics = server_aggregator.aggregate_metrics()
server_data = {server_name: metrics}
aggregated_metrics.append(server_data)
alarm_manager = AlarmManager(server)
alarm_manager.check_alarms(server_data)
filtered_metrics = get_filtered_metrics(aggregated_metrics, server_config)
print(json.dumps(filtered_metrics))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# print(json.dumps(server_config, indent=2))
main()
| aakashjangidme/server_monitor_raw | main.py | main.py | py | 1,125 | python | en | code | 0 | github-code | 50 |
21229650655 | from math import sqrt, pi, sin, cos
from supervisor import Supervisor
from basic import AvoidObstacles, GoToGoal, AOAndGTG
from ..geometry import Pose2D
class K3Supervisor(Supervisor):
def __init__(self):
Supervisor.__init__(self)
self._controllers = [
AvoidObstacles(),
GoToGoal(),
AOAndGTG()
]
self.set_current_controller(2)
self._prev_ticks = {
'left': 0,
'right': 0
}
self.goal = (0, 0)
self.reached_goal = False
def set_current_controller(self, controller_id):
self._current_controller = self._controllers[controller_id]
def execute(self, time_delta):
"""
Select and execute the current controller.
See also controller/execute
"""
x_distance = self._state_estimate.x - self.goal[0]
y_distance = self._state_estimate.y - self.goal[1]
if sqrt(x_distance ** 2 + y_distance ** 2) > 0.02:
outputs = self._current_controller.execute(
self._robot,
self._state_estimate,
time_delta,
x_g=self.goal[0],
y_g=self.goal[1],
v=0.1,
d_c=0.08,
d_s=0.1)
w_r, w_l = self._robot.dynamics.uni_to_diff(outputs['v'], outputs['w'])
self._robot.set_wheel_speeds(w_r, w_l)
else:
self.reached_goal = True
self._robot.set_wheel_speeds(0, 0)
self._update_odometry()
def _update_odometry(self):
"""
Approximate the location of the robot.
This method should be called from the
execute function every iteration. The location of the robot is
updated based on the difference to the previous wheel encoder
ticks. This is only an approximation.
_state_estimate is updated with the new location and the
measured wheel encoder tick counts are stored in _prev_ticks.
"""
# Get wheel encoder ticks from the robot
right_ticks = self._robot.encoders[0].ticks
left_ticks = self._robot.encoders[1].ticks
prev_right_ticks = self._prev_ticks['right']
prev_left_ticks = self._prev_ticks['left']
self._prev_ticks['right'] = right_ticks
self._prev_ticks['left'] = left_ticks
# Previous estimate
x = self._state_estimate.x
y = self._state_estimate.y
theta = self._state_estimate.theta
# Compute odometry here
m_per_tick = (2 * pi * self._robot.wheel_radius) / self._robot.encoders[0].ticks_per_rev
d_right = (right_ticks - prev_right_ticks) * m_per_tick
d_left = (left_ticks - prev_left_ticks) * m_per_tick
d_center = (d_right + d_left) / 2
phi = (d_right - d_left) / self._robot.wheel_base_length
theta_p = theta + phi
x_p = x + d_center * cos(theta)
y_p = y + d_center * sin(theta)
# fprintf('Estimated pose (x,y,theta): (%0.3g,%0.3g,%0.3g)\n', x_p, y_p, theta_p);
# Update your estimate of (x,y,theta)
self._state_estimate = Pose2D(x_p, y_p, theta_p)
| dgchurchill/python-simiam | simiam/controllers/khepera3.py | khepera3.py | py | 3,311 | python | en | code | 1 | github-code | 50 |
1685678996 | from typing import List
import paddle
import paddle.nn as nn
from paddle3d.ops import pointnet2_ops
def voxel_query(max_range: int, radius: float, nsample: int, xyz: paddle.Tensor, \
new_xyz: paddle.Tensor, new_coords: paddle.Tensor, point_indices: paddle.Tensor):
"""
Args:
max_range: int, max range of voxels to be grouped
nsample: int, maximum number of features in the balls
new_coords: (M1 + M2, 4), [batch_id, z, y, x] cooridnates of keypoints
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
point_indices: (batch_size, Z, Y, X) 4-D tensor recording the point indices of voxels
Returns:
idx: (M1 + M2, nsample) tensor with the indices of the features that form the query balls
"""
z_range, y_range, x_range = max_range
idx = pointnet2_ops.voxel_query_wrapper(new_xyz, xyz, new_coords, point_indices, \
radius, nsample, z_range, y_range, x_range)
empty_ball_mask = (idx[:, 0] == -1)
idx[empty_ball_mask] = 0
return idx, empty_ball_mask
class VoxelQueryAndGrouping(nn.Layer):
def __init__(self, max_range: int, radius: float, nsample: int):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
"""
super().__init__()
self.max_range, self.radius, self.nsample = max_range, radius, nsample
def forward(self, new_coords: paddle.Tensor, xyz: paddle.Tensor,
xyz_batch_cnt: paddle.Tensor, new_xyz: paddle.Tensor,
new_xyz_batch_cnt: paddle.Tensor, features: paddle.Tensor,
voxel2point_indices: paddle.Tensor):
"""
Args:
new_coords: (M1 + M2 ..., 3) centers voxel indices of the ball query
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
voxel2point_indices: (B, Z, Y, X) tensor of points indices of voxels
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(
), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape),
str(new_xyz_batch_cnt))
assert new_coords.shape[0] == new_xyz_batch_cnt.sum(), \
'new_coords: %s, new_xyz_batch_cnt: %s' % (str(new_coords.shape), str(new_xyz_batch_cnt))
batch_size = xyz_batch_cnt.shape[0]
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx1, empty_ball_mask1 = voxel_query(self.max_range, self.radius,
self.nsample, xyz, new_xyz,
new_coords, voxel2point_indices)
idx1 = idx1.reshape([batch_size, -1, self.nsample])
count = 0
for bs_idx in range(batch_size):
idx1[bs_idx] -= count
count += xyz_batch_cnt[bs_idx]
idx1 = idx1.reshape([-1, self.nsample])
idx1[empty_ball_mask1] = 0
idx = idx1
empty_ball_mask = empty_ball_mask1
grouped_xyz = pointnet2_ops.grouping_operation_stack(
xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt)
# grouped_features: (M1 + M2, C, nsample)
grouped_features = pointnet2_ops.grouping_operation_stack(
features, xyz_batch_cnt, idx, new_xyz_batch_cnt)
return grouped_features, grouped_xyz, empty_ball_mask
| PaddlePaddle/Paddle3D | paddle3d/models/common/pointnet2_stack/voxel_query_utils.py | voxel_query_utils.py | py | 3,663 | python | en | code | 479 | github-code | 50 |
18191728823 | import json
import boto3
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('JobTable')
s3 = boto3.client('s3', region_name='us-east-1')
sqs = boto3.resource('sqs', region_name='us-east-1')
queue = sqs.get_queue_by_name(QueueName='JobQueue')
JOB_SIZE = 3
bucket_name = 'render-files-bucket'
def validate_input(filename, start_frame, end_frame):
if len(filename) < 7 or filename[-6:] != '.blend':
return {
'statusCode': 400,
'body': 'Bad Request: File must be a Blender .blend file.'
}
elif not start_frame.isnumeric() or not end_frame.isnumeric():
return {
'statusCode': 400,
'body': 'Bad Request: Start and end frames must be integers.'
}
elif int(start_frame) > int(end_frame):
return {
'statusCode': 400,
'body': 'Bad Request: Start frame must be less than end frame.'
}
else:
return None
def file_in_s3(filename):
keys = s3.list_objects_v2(Bucket=bucket_name, Prefix=filename)
return 'Contents' in keys
def split_job(filename, start_frame, end_frame):
n_frames = int(end_frame) - int(start_frame) + 1
n_jobs = n_frames // JOB_SIZE
jobs = []
for i in range(n_jobs):
s = i * JOB_SIZE + 1
e = s + JOB_SIZE - 1
job = {'type': 'render', 'file': filename, 'start': str(s), 'end': str(e)}
jobs.append(job)
if n_frames % JOB_SIZE > 0:
s = n_frames - (n_frames % JOB_SIZE) + 1
job = {'type': 'render', 'file': filename, 'start': str(s), 'end': str(end_frame)}
jobs.append(job)
return jobs
def submit_render_job(filename, start_frame, end_frame):
# Check that the file is actually in S3
if not file_in_s3(filename):
return {
'statusCode': 500,
'body': 'Internal Server Error: Blender file not found in S3.'
}
# Split job into batch jobs
jobs = split_job(filename, start_frame, end_frame)
batches = dict()
# Send jobs to workers via SQS queue
for job in jobs:
queue.send_message(MessageBody=json.dumps(job))
batches[job['start'] + '-' + job['end']] = 'Processing'
# Upload a job spec to DynamoDB
response = table.put_item(
Item={
'file': filename,
'range': start_frame + '-' + end_frame,
'job_status': 'Waiting',
'batches': batches
}
)
# Return the status
return {
'statusCode': 200,
'body': f'Submitted render job at time {time.time()}.'
}
def get_status(query_result):
if query_result['job_status'] == 'Finished':
return {
'statusCode': 200,
'body': 'Sequencing complete: MP4 file ready in S3'
}
batches = query_result['batches']
complete_batches = sum([1 if status == 'Complete' else 0 for status in batches.values()])
percent_render = (complete_batches / len(batches)) * 100
return {
'statusCode': 200,
'body': '%d/%d batches rendered (%.2f%%)' % (complete_batches, len(batches), percent_render)
}
def lambda_handler(event, context):
# Extract the request data
filename = str(event['file'])
start_frame = str(event['start'])
end_frame = str(event['end'])
# Validate the input
result = validate_input(filename, start_frame, end_frame)
if result != None:
return result
# Check to see if the given file is being processed
response = table.get_item(Key={'file': filename})
if not 'Item' in response:
result = submit_render_job(filename, start_frame, end_frame)
else:
result = get_status(response['Item'])
return result
| PaoloMura/render-farm | server/lambda_server.py | lambda_server.py | py | 3,714 | python | en | code | 0 | github-code | 50 |
21217707169 | import rhinoscriptsyntax as rs
import random
rs.EnableRedraw(True)
#definition for placing random points in x,y, and z ranges
def placePt(x_range,y_range,z_range):
x = random.uniform(0,x_range)
y = random.uniform(0,y_range)
z = random.uniform(0,z_range)
pt = [x,y,z]
return pt
#initializing first point, making empty pts array, appending first point to pts array
pts = []
ptZero = (0,0,0)
pts.append(ptZero)
#initializing lines array so i can fillet the fucker
lines = []
for i in range(10):
pt = rs.AddPoint(placePt(100,100,100))
pts.append(pt)
if i >= 0:
line = rs.AddLine(pts[i-1],pts[i])
lines.append(line)
#if i >= 2:
# fillet = rs.AddFilletCurve(lines[i-1],lines[i])
| wloka-1/python | line to follow next points.py | line to follow next points.py | py | 736 | python | en | code | 0 | github-code | 50 |
30265484508 | import os
import pprint
os.system('clear')
ruta = '/home/teo/codigo/curso_21_22/viernes/programacion/python/funciones_miercoles.txt'
dic_salida = {}
def modo1():
clave = 0
# Leer archivo
with open(ruta) as archivo:
for l in archivo:
#Procesar fila a fila
fila = l[:-1:].split(',')
#Recorrer lista y llenar dict
for nombre in fila:
dic_salida[clave] = nombre
clave += 1
def modo2():
clave = 0
texto = open(ruta,'r').readlines()
for archivo in texto:
dic_salida[clave] = archivo
clave += 1
return dic_salida
pprint.pprint(modo2())
| teo-core/curso_21_22 | fichero_a_dict.py | fichero_a_dict.py | py | 674 | python | es | code | 3 | github-code | 50 |
14189568262 | import pygame
import time
import random
pygame.init()
screenWidth=800
screenHeight=600
window=pygame.display.set_mode([screenWidth,screenHeight])
pygame.display.set_caption("ENDLESS")
black=(0,0,0)
green=(0,255,0)
blue=(0,0,255)
red=(255,0,0)
runRight=[pygame.image.load('sonic run 1 flip.gif'),pygame.image.load('sonic run 2 flip.gif'),pygame.image.load('sonic run 3 flip.gif'),pygame.image.load('sonic run 4 flip.gif'),pygame.image.load('sonic run 5 flip.gif'),pygame.image.load('sonic run 6 flip.gif')]
Jump=[pygame.image.load('Spin1.gif'),pygame.image.load('Spin2.gif'),pygame.image.load('Spin3.gif'),pygame.image.load('Spin4.gif')]
Fireball=[pygame.image.load('fireball1.png'),pygame.image.load('fireball2.png'),pygame.image.load('fireball3.png'),pygame.image.load('fireball4.png'),pygame.image.load('fireball5.png')]
saw=[pygame.image.load('saw1.png'),pygame.image.load('saw2.png'),pygame.image.load('saw3.png'),pygame.image.load('saw4.png'),pygame.image.load('saw5.png'),pygame.image.load('saw6.png'),pygame.image.load('saw7.png')]
Axe=[pygame.image.load('axe1.png'),pygame.image.load('axe2 s.png'),pygame.image.load('axe2.png'),pygame.image.load('axe3 s.png'),pygame.image.load('axe3.png'),pygame.image.load('axe4 s.png'),pygame.image.load('axe4.png'),pygame.image.load('axe5 s.png'),pygame.image.load('axe5.png'),pygame.image.load('axe5 s.png'),pygame.image.load('axe4.png'),pygame.image.load('axe4 s.png'),pygame.image.load('axe3.png'),pygame.image.load('axe3 s.png'),pygame.image.load('axe2.png'),pygame.image.load('axe2 s.png')]
target=[pygame.image.load('Meteor explosion1.png'),pygame.image.load('Meteor explosion2.png'),pygame.image.load('Meteor explosion3.png'),pygame.image.load('Meteor explosion4.png'),pygame.image.load('Meteor explosion5.png'),pygame.image.load('Meteor explosion6.png'),pygame.image.load('Meteor explosion7.png'),pygame.image.load('Meteor explosion8.png'),pygame.image.load('Meteor explosion9.png'),pygame.image.load('Meteor explosion10.png'),pygame.image.load('Meteor explosion11.png'),pygame.image.load('Meteor explosion12.png'),pygame.image.load('Meteor explosion13.png'),pygame.image.load('Meteor explosion14.png'),pygame.image.load('Meteor explosion15.png'),pygame.image.load('Meteor explosion16.png'),pygame.image.load('Meteor explosion17.png')]
Meteor=[pygame.image.load('Meteor1.png'),pygame.image.load('Meteor2.png'),pygame.image.load('Meteor3.png'),pygame.image.load('Meteor4.png'),pygame.image.load('Meteor5.png'),pygame.image.load('Meteor6.png'),pygame.image.load('Meteor7.png')]#,pygame.image.load('Meteor8.png')]
Spikes=[pygame.image.load('spikes.png')]
bg1=pygame.image.load('Green_Hill_Zone2.png')
Keys=pygame.image.load('Key.png')
bg3=[pygame.image.load('snow1.png'),pygame.image.load('snow2.png'),pygame.image.load('snow3.png'),pygame.image.load('snow4.png'),pygame.image.load('snow5.png')]
bg1=pygame.transform.scale(bg1,(screenWidth+screenWidth+screenWidth+screenWidth+screenWidth-48,screenHeight))
CurrentScore=0
Stats=open("TEST SCORE.txt","r")
lines=Stats.readlines()
TimesPlayed=lines[1][24]
TimesPlayed=int(TimesPlayed)
if TimesPlayed>=1:
TimesPlayed=str(TimesPlayed)
Stats=open("TEST SCORE.txt","r")
lines=Stats.readlines()
HighScore=lines[0][11]
if lines[0][12] != " ":
HighScore=HighScore+lines[0][12]
if lines[1][25] !=" ":
TimesPlayed=TimesPlayed+lines[1][25]
if lines[1][26] !=" ":
TimesPlayed=TimesPlayed+lines[1][26]
KeysCollected=lines[2][26]
if lines[2][27] !=" ":
KeysCollected=KeysCollected+lines[2][27]
PlayTime=lines[3][17]
if lines[3][18]!=" " and lines[3][18]!=".":
PlayTime=PlayTime+lines[3][18]
if lines[3][19]!=" " and lines[3][19]!=".":
PlayTime=PlayTime+lines[3][19]
print(TimesPlayed,"TIMES PLAYED TEST 2")
else:
HighScore=0
KeysCollected=0
HighScore=int(HighScore)
KeysCollected=int(KeysCollected)
PlayTime=int(PlayTime)
TimesPlayed=int(TimesPlayed)
X=300
Y=380
playerposX=300
clock=pygame.time.Clock()
isJump=False
jumpCount=10
Run=True
slide=True
FireBall=True
Snow=True
spin=True
KEY=True
AXE=True
METEOR=True
walkCount=0
snowCount=0
spinCount=0
slideCount=0
fbCount=0
axeCount=0
meteorCount=0
targetCount=0
explosionCount=0
i=0
n=0
obstaclemove=-500
obstaclemovefb=-500
axemove=-500
keymove=-500
meteormoveX= 800
meteormoveY= 0
def retry():
global obstaclemove,obstaclemovefb,axemove,meteormoveY,meteormoveX,meteorCount,X,Y,Run,isJump,jumpCount,playerposX,KeysCollected,CurrentScore,HighScore,TimesPlayed,PlayTime,keymove
isJump=False
Run=False
X=300
Y=380
playerposX=0
meteormoveY=-500
meteormoveX=-500
obstaclemove=-500
obstaclemovefb=-500
keymove=-500
axemove=-500
pygame.font.init()
header=pygame.font.SysFont(None,140)
font=pygame.font.SysFont(None,55)#25 is the size of the font
Score=pygame.font.SysFont(None,55)
header_text=header.render('GAME OVER',False,(255,0,0))
Score_text=Score.render(('SCORE: '+str(CurrentScore)),False,(255,0,0))
HScore_text=Score.render(('HIGH SCORE: '+str(HighScore)),False,(255,0,0))
screen_textRetry=font.render('PRESS c TO PLAY AGAIN OR q TO QUIT',False,(255,0,0))
gameOver=True
while gameOver:
window.fill(black)
window.blit(header_text,(100,screenHeight/8))
window.blit(Score_text,(40,screenHeight/3))
window.blit(HScore_text,(485,screenHeight/3))
window.blit(screen_textRetry,(40,screenHeight/2))
pygame.display.update()
Stats=open("TEST SCORE.txt","w+")
Stats.write("HighScore: "+str(HighScore)+" "+"\n")
Stats.write("Number of times played: "+str(TimesPlayed)+" "+"\n")
Stats.write("Number of Keys collected: "+str(KeysCollected)+" "+"\n")
Stats.write("Total Play Time: "+str(PlayTime/15)+" "+"\n")
Stats.close()
for event in pygame.event.get():
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_q:
pygame.quit()
quit()
while event.key==pygame.K_c:
CurrentScore=0
Run=True
gameStart=True
jumpCount=10
gameLoop()
def gameLoop():
global isJump,Run,Y,X,jumpCount,i,n,obstaclemove,obstaclemovefb,axemove,meteorY,meteorX,playerposX,CurrentScore,HighScore,KeysCollected,PlayTime,TimesPlayed
def obstacle():
global obstaclemove,obstaclemovefb,meteormoveY,meteormoveX,meteorCount,axemove,spinCount,fbCount,axeCount,targetCount,explosionCount,HighScore,TimesPlayed,KEY,keymove,KeysCollected
if spinCount +1>=7:
spinCount=0
if spin:
Saw=window.blit(saw[spinCount//2],(obstaclemove,400))
spinCount +=1
if fbCount +1>=4:
fbCount=0
if FireBall:
fireball=window.blit(Fireball[fbCount//2],(obstaclemovefb,200))
fbCount +=1
if KEY:
window.blit(Keys,(keymove,400))
if axeCount +1>=16:
axeCount=0
if AXE:
axe=window.blit(Axe[axeCount//1],(axemove,-15))
axeCount +=1
if meteorCount +1>8:
meteorCount=0
if targetCount +1>16:
targetCount=0
if METEOR:
window.blit(Spikes,(obstaclemove,400))
meteor=window.blit(Meteor[meteorCount//2],(meteormoveX,meteormoveY))
Target=window.blit(target[targetCount//1],(210,400))
targetCount +=1
meteorCount +=1
if meteormoveY>=380 and meteormoveX<=300:
meteorCount=8
if meteorCount +1>10:
meteorCount +=1
if meteorCount==10:
meteormoveY=0
meteormoveX=800
obstaclemove=obstaclemove-1
obstaclemovefb=obstaclemovefb-100
keymove=keymove-50
axemove=axemove-20
meteormoveX=meteormoveX-59
meteormoveY=meteormoveY+60
if screenWidth+obstaclemove<=0:
obstacleplacement=random.randint(screenWidth,screenWidth+screenWidth)
obstaclemove=obstacleplacement
if obstaclemovefb<=320 and obstaclemovefb>=220 and Y<=233 or obstaclemove<=330 and obstaclemove>=220 and Y<=400 and Y>=380:
TimesPlayed=int(TimesPlayed)
TimesPlayed=TimesPlayed+1
print(TimesPlayed)
if CurrentScore>HighScore:
HighScore=CurrentScore
retry()
if keymove<=330 and 290<=keymove and Y>=380:
KeysCollected=int(KeysCollected)
KeysCollected=KeysCollected+1
keymove=-500
obstaclemove=obstaclemove-50
while screenWidth+obstaclemove<=0 and screenWidth+obstaclemovefb<=0 and screenWidth+keymove<=0 and screenWidth+axemove<=0:
obstacleplacement=random.randint(screenWidth,screenWidth+screenWidth)
obstaclemove=obstacleplacement
obstacleplacementfb=random.randint(screenWidth,screenWidth+screenWidth)
obstaclemovefb=obstacleplacementfb
keyplacement=random.randint(screenWidth,screenWidth+500)
keymove=keyplacement
axeplacement=random.randint(screenWidth,screenWidth)
axemove=axeplacement
meteorplacementX=800
meteorplacementY=0
meteormoveX=meteorplacementX
meteormoveY=meteorplacementY
def BackgroundChange0():
global n
window.blit(bg1,[n,0])
n=n-15
window.blit(bg1,[screenWidth+screenWidth+screenWidth+screenWidth+screenWidth+n,0])
n=n-50
if screenWidth+screenWidth+screenWidth+screenWidth+screenWidth+n<=0:
n=0
def redrawGameWindow():
global walkCount,jumpCount,i,Slide,playerposX
if walkCount +1>=12:
walkCount=0
if Run:
window.blit(runRight[walkCount//2],(playerposX,380))
walkCount +=1
else:
window.blit(Jump[i],(X,Y))
pygame.display.update()
gameStart=True
gameRun=False
while gameStart:
playerposX=0
while playerposX<300:
playerposX=playerposX+5
redrawGameWindow()
BackgroundChange0()
clock.tick(15)
else:
gameStart=False
gameRun=True
while gameRun:
PlayTime=PlayTime+1
for event in pygame.event.get():
if event.type==pygame.QUIT:
gameRun=False
if event.key==pygame.K_c:
gameLoop()
keys= pygame.key.get_pressed()
if not (isJump):
if keys[pygame.K_SPACE]:
isJump=True
Run=False
Slide=False
walkCount=0
else:
if jumpCount>=-10:
neg=1
if jumpCount<0:
neg=-1
Y -=(jumpCount**2)*0.5*neg
i=i+1
if i>=4:
i=0
jumpCount -=1
else:
isJump=False
Run=True
Slide=False
jumpCount=10
i=0
clock.tick(15)
redrawGameWindow()
BackgroundChange0()
obstacle()
HighScore=int(HighScore)
TimesPlayed=int(TimesPlayed)
print(TimesPlayed,"TIMES PLAYED TEST 9")
if n==0:
CurrentScore=CurrentScore+1
if CurrentScore>HighScore:
HighScore=CurrentScore
Stats=open("TEST SCORE.txt","w+")
Stats.write("HighScore: "+str(HighScore)+" "+"\n")
Stats.write("Number of times played: "+str(TimesPlayed)+" "+"\n")
Stats.write("Number of Keys collected: "+str(KeysCollected)+" "+"\n")
Stats.write("Total Play Time: "+str(PlayTime/15)+" "+"\n")
Stats.close()
pygame.quit()
quit()
gameLoop()
| TDAF2509/Python-Libraries | PYTHON/Endless for my own testing/Older versions/endless adding keys.py | endless adding keys.py | py | 12,577 | python | en | code | 0 | github-code | 50 |
74562549275 | #
# @lc app=leetcode.cn id=783 lang=python3
#
# [783] 二叉搜索树节点最小距离
#
from sbw import *
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def minDiffInBST(self, root: Optional[TreeNode]) -> int:
vals=[]
def traverse(node):
if not node:
return
traverse(node.left)
vals.append(node.val)
traverse(node.right)
traverse(root)
ret=10**5
for i in range(1,len(vals)):
ret=min(ret,vals[i]-vals[i-1])
if ret==0:
return 0
return ret
# @lc code=end
| StBinge/leetcode | 783.二叉搜索树节点最小距离.py | 783.二叉搜索树节点最小距离.py | py | 789 | python | en | code | 0 | github-code | 50 |
42246359724 |
from geoip import geolite2
from socket import *
from googlesearch import *
from hashlib import *
import pyfiglet
from termcolor import colored
import os
from time import sleep
screen=pyfiglet.figlet_format('No System Is Safe ')
print('''
Follow Me in Telegram https://t.me/System_Hac
**************************************************
''')
print(screen)
print('\033[34m','''
[1] Locate IP
[2] Get Dns From IP
[3] Get IP from Dns
[4] Get Port From Number
[5] Search in Dorks about Sqli
[6] hash
''')
user=input(' Choice Number > ')
def locate():
if user =="1":
os.system('clear')
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(2)
user1=input("Enter a {Victim IP} > ")
ip=geolite2.lookup(user1)
print(ip)
locate()
def dns():
if user=="2":
os.system('clear')
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(1)
sleep(2)
for i in user:
user2=input("Enter a {IP} > ")
get=gethostbyaddr(user2)
print(get)
print(i)
dns()
def IP():
if user =="3":
os.system('clear')
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(1)
sleep(2)
user3=input("Enter a {DNS} > ")
GETIP=gethostbyname(user3)
print(GETIP)
IP()
def port():
if user =="4":
os.system('clear')
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(2)
user4=input("Enter a Number > ")
port_num=getservbyport(int(user4))
print(port_num)
port()
def sqli():
if user =="5":
os.system('clear')
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(1)
print('\033[31m','Loading ... ')
sleep(2)
for i in search("php?id="):
print('>>>>>>>',i)
sqli()
def hashm():
if user =="6":
os.system('clear')
user6=input(" Enter a Text :> " )
sleep(1)
print('\033[31m','Now You see Encryption ! ')
sleep(1)
print('\033[36m','Choose ($!$)')
sleep(2)
print('''
[1] md5
[2] sha1
[3] sha224
[4] sha256
[5] sha384
[6] sha512
[7] blake2b
[8] blake2s
''')
user9=input(" Choose any Encryption type : ")
if user9 =="1":
MD5=md5(user6.encode()).hexdigest()
print(MD5)
if user9 =="2":
SHa1=sha1(user6.encode()).hexdigest()
print(SHa1)
if user9=="3":
SHA224=sha224(user6.encode()).hexdigest()
print(SHA224)
if user9=="4":
SHA256=sha256(user6.encode()).hexdigest()
print(SHA256)
if user9=="5":
SHA384=sha384(user6.encode()).hexdigest()
print(SHA384)
if user9=="6":
SHA512=sha512(user6.encode()).hexdigest()
print(SHA512)
if user9=="7":
bLAKE2b=blake2b(user6.encode()).hexdigest()
print(bLAKE2b)
if user9=="8":
BLAKE2s=blake2s(user6.encode()).hexdigest()
print(BLAKE2s)
hashm()
| systemhacked1/system-info-networkp-ip | oneinall.py | oneinall.py | py | 3,982 | python | en | code | 1 | github-code | 50 |
36118534420 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
from ckip_transformers import __version__
from ckip_transformers.nlp import CkipWordSegmenter, CkipPosTagger, CkipNerChunker
def main():
# Show version
print(__version__)
# Initialize drivers
print("Initializing drivers ... WS")
ws_driver = CkipWordSegmenter(model="bert-base")
print("Initializing drivers ... POS")
pos_driver = CkipPosTagger(model="bert-base")
print("Initializing drivers ... NER")
ner_driver = CkipNerChunker(model="bert-base")
print("Initializing drivers ... done")
print()
# Input text
text = [
"傅達仁今將執行安樂死,卻突然爆出自己20年前遭緯來體育台封殺,他不懂自己哪裡得罪到電視台。",
"美國參議院針對今天總統布什所提名的勞工部長趙小蘭展開認可聽證會,預料她將會很順利通過參議院支持,成為該國有史以來第一位的華裔女性內閣成員。",
"空白 也是可以的~",
]
# Run pipeline
print("Running pipeline ... WS")
ws = ws_driver(text)
print("Running pipeline ... POS")
pos = pos_driver(ws)
print("Running pipeline ... NER")
ner = ner_driver(text)
print("Running pipeline ... done")
print()
# Show results
for sentence, sentence_ws, sentence_pos, sentence_ner in zip(text, ws, pos, ner):
print(sentence)
print(pack_ws_pos_sentece(sentence_ws, sentence_pos))
for entity in sentence_ner:
print(entity)
print()
# Pack word segmentation and part-of-speech results
def pack_ws_pos_sentece(sentence_ws, sentence_pos):
assert len(sentence_ws) == len(sentence_pos)
res = []
for word_ws, word_pos in zip(sentence_ws, sentence_pos):
res.append(f"{word_ws}({word_pos})")
return "\u3000".join(res)
if __name__ == "__main__":
main()
| ckiplab/ckip-transformers | example/example.py | example.py | py | 1,900 | python | en | code | 573 | github-code | 50 |
14894536637 | # Old table parsing info
# 20120614: removed pp distance to next player
# 20140209: removed score rank
from bs4 import BeautifulSoup
import os
import csv
SNAPSHOTS_DIR = "snapshots"
# stupid locale stuff
def clean_int(s):
return ''.join(c for c in s if c not in ",.\xa0")
def clean_float(s):
return float(s.replace('\xa0', '').replace('%', '').replace(',', '.'))
def extract_table_info(html_doc, date_str):
soup = BeautifulSoup(html_doc, "lxml")
result = []
# old site table
table_soup = soup.find("table", class_="beatmapListing")
if table_soup:
table_rows = table_soup.find_all("tr")
table_header, player_rows = table_rows[0], table_rows[1:]
# unused for now
table_has_score_rank = "Score" in table_header.get_text()
for player_row in player_rows:
#print(player_row)
row_dict = dict()
row_dict["date"] = date_str
# Get two letter country code from flag img url
img_flag = player_row.find("img", class_="flag")
img_url = img_flag["src"]
assert img_url.endswith(".gif")
row_dict["country"] = img_url.split('/')[-1][:-4]
player_url = player_row.find("a")["href"]
row_dict["username"] = player_row.find("a").text
row_dict["id"] = int(player_url.split('/')[-1])
cells = player_row.find_all("td")
row_dict["rank"] = int(cells[0].get_text()[1:])
row_dict["accuracy"] = clean_float(cells[2].get_text())
# playcount and level
player_row_pl = cells[3].get_text().split(' ')
row_dict["playcount"] = clean_int(player_row_pl[0])
pp_text = cells[4].get_text().strip()
row_dict["pp"] = clean_int(pp_text.split("pp")[0])
result.append(row_dict)
return result
# new site table
table_soup = soup.find("table", class_="ranking-page-table")
if table_soup:
table_rows = table_soup.find_all("tr")
player_rows = table_rows[1:]
for player_row in player_rows:
row_dict = dict()
row_dict["date"] = date_str
cells = player_row.find_all("td")
row_dict["rank"] = int(cells[0].get_text().strip()[1:])
# Starting from 20180317, country is also linked in cell
player_url = cells[1].find_all("a")[-1]["href"]
row_dict["id"] = int(player_url.split('/')[-1])
flag_span = cells[1].find("span", class_="flag-country")
flag_style = flag_span["style"]
str_idx = flag_style.find(".png")
row_dict["country"] = flag_style[str_idx-2:str_idx].lower()
row_dict["username"] = cells[1].get_text().strip()
row_dict["accuracy"] = clean_float(cells[2].get_text().strip())
row_dict["playcount"] = clean_int(cells[3].get_text().strip())
row_dict["pp"] = clean_int(cells[4].get_text().strip())
result.append(row_dict)
return result
# Somehow not new site or old site
raise Exception("Can't find table")
def main():
all_rows = []
for snapshot_filename in sorted(os.listdir(SNAPSHOTS_DIR)):
date_str = os.path.splitext(snapshot_filename)[0]
#if date_str < "20170617": continue # testing
snapshot_path = os.path.join(SNAPSHOTS_DIR, snapshot_filename)
print(snapshot_path)
with open(snapshot_path) as f:
html_doc = f.read()
extracted_rows = extract_table_info(html_doc, date_str)
all_rows.extend(extracted_rows)
col_names = ("date", "rank", "country", "username", "id", "accuracy",
"playcount", "pp")
with open("player_history.csv", 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=col_names)
writer.writeheader()
writer.writerows(all_rows)
if __name__ == "__main__":
main() | jxu/osu-player-history | extract_table.py | extract_table.py | py | 3,970 | python | en | code | 1 | github-code | 50 |
33020146610 | #!/usr/bin/env python3
#
# A lot of this code is based on one of these two projects:
# https://github.com/kumina/python_container_demo_app
# https://github.com/yurishkuro/opentracing-tutorial/tree/master/python
import os
import sys
import http.server
import prometheus_client
import json
import signal
import threading
import logging
import requests
from jaeger_client import Config
from opentracing.ext import tags
from opentracing.propagation import Format
# This is for Prometheus metrics
REQUEST_LATENCY = prometheus_client.Histogram(
'router_request_latency_seconds',
'Time it took to process incoming HTTP requests, in seconds.')
BACKEND_LATENCY = prometheus_client.Histogram(
'router_backend_latency_seconds',
'Time spent waiting on backends to respond to requests, in seconds.')
class Router(http.server.BaseHTTPRequestHandler):
@REQUEST_LATENCY.time()
def do_GET(s):
try:
span_ctx = s.server.tracer.extract(Format.HTTP_HEADERS, dict(s.headers))
except:
span_ctx = None
with s.server.tracer.start_active_span('handle_request', child_of=span_ctx) as scope:
scope.span.log_kv({'event': 'request-response', 'path': s.path})
if s.path == '/healthz/live':
s.liveness_check()
elif s.path == '/healthz/ready':
s.readiness_check()
else:
s.default_response()
def get_remote(s, service):
url = 'http://%s/' % (service)
span = s.server.tracer.active_span
span.set_tag(tags.HTTP_METHOD, 'GET')
span.set_tag(tags.HTTP_URL, url)
span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT)
headers = {}
tracer.inject(span, Format.HTTP_HEADERS, headers)
r = requests.get(url, headers=headers)
assert r.status_code == 200
return r.text
def default_response(s):
with s.server.tracer.start_active_span('prepare_headers') as scope:
s.send_response(200)
s.send_header('Content-Type', 'text/html')
s.end_headers()
with s.server.tracer.start_active_span('get_backend_data') as scope:
year = s.get_remote('year')
scope.span.log_kv({'event': 'get-year', 'year': str(year)})
month = s.get_remote('month')
scope.span.log_kv({'event': 'get-month', 'month': str(month)})
day = s.get_remote('day')
scope.span.log_kv({'event': 'get-day', 'day': str(day)})
hour = s.get_remote('hour')
scope.span.log_kv({'event': 'get-hour', 'hour': str(hour)})
minute = s.get_remote('minute')
scope.span.log_kv({'event': 'get-minute', 'minute': str(minute)})
second = s.get_remote('second')
scope.span.log_kv({'event': 'get-second', 'second': str(second)})
cur_date = '%s-%s-%s' % (day, month, year)
cur_time = '%s:%s:%s' % (hour, minute, second)
scope.span.log_kv({'event': 'format-timestamps', 'date': cur_date, 'time': cur_time})
with s.server.tracer.start_active_span('write_response') as scope:
s.wfile.write(b'''
<!DOCTYPE html>
<html>
<head>
<title>Distributed Time</title>
</head>
<body>
<h1>Distributed Time</h1>
<p>The current date is: %s</p>
<p>The current time is: %s</p>
</body>
</html>''' % (str.encode(cur_date), str.encode(cur_time)))
def liveness_check(s):
with s.server.tracer.start_active_span('liveness_check') as scope:
s.send_response(200)
s.send_header('Content-Type', 'text/html')
s.end_headers()
s.wfile.write(b'''Ok.''')
def readiness_check(s):
with s.server.tracer.start_active_span('readiness_check') as scope:
scope.span.log_kv({'event': 'check-if-ready', 'ready.var': str(s.server.ready)})
if s.server.ready:
s.send_response(200)
s.send_header('Content-Type', 'text/plain')
s.end_headers()
s.wfile.write(b'''Ok.''')
else:
# The actual response does not really matter, as long as it's not
# a HTTP 200 status.
s.send_response(503)
s.send_header('Content-Type', 'text/plain')
s.end_headers()
s.wfile.write(b'''Not ready yet.''')
def log_message(self, format, *args):
with self.server.tracer.start_active_span('log') as scope:
log = { 'router':
{
'client_ip': self.address_string(),
'timestamp': self.log_date_time_string(),
'message': format%args
}
}
print(json.dumps(log))
if __name__ == '__main__':
listen_port = int(os.getenv('LISTEN_PORT', 80))
prom_listen_port = int(os.getenv('PROM_LISTEN_PORT', 8080))
prometheus_client.start_http_server(prom_listen_port)
jaeger_agent_host = os.getenv('JAEGER_AGENT_HOST', 'localhost')
istio = int(os.getenv('ISTIO', 0))
logging.getLogger('').handlers = []
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
# Set up the tracer
cfg = {
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
'local_agent': {
'reporting_host': jaeger_agent_host,
},
}
if istio > 0:
cfg['propagation'] = 'b3'
config = Config(
config=cfg,
service_name='router',
validate=True,
)
# this call also sets opentracing.tracer
tracer = config.initialize_tracer()
httpd = http.server.HTTPServer(('0.0.0.0', listen_port), Router)
httpd.ready = True
httpd.tracer = tracer
# Simple handler function to show that we we're handling the SIGTERM
def do_shutdown(signum, frame):
global httpd
log = { 'router': { 'message': 'Graceful shutdown.' } }
print(json.dumps(log))
threading.Thread(target = httpd.shutdown).start()
sys.exit(0)
signal.signal(signal.SIGTERM, do_shutdown)
# Forever serve requests. Or at least until we receive the proper signal.
httpd.serve_forever()
| kumina/jaeger-demo | time-app/router.py | router.py | py | 6,541 | python | en | code | 0 | github-code | 50 |
74248898074 | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
import struct
from .base import CharsetBase
class UTF8(CharsetBase):
title = 'CODE TABLE OF UTF-8'
description = [
u"UTF-8(8-bit Unicode Transformation Format)是一种针对Unicode的可变长度字符编码,也是一种前缀码。它可以用来表示Unicode标准中的任何字符,且其编码中的第一个字节仍与ASCII兼容,这使得原来处理ASCII字符的软件无须或只须做少部分修改,即可继续使用。因此,它逐渐成为电子邮件、网页及其他存储或发送文字的应用中,优先采用的编码。",
u'UTF-8使用一至六个字节为每个字符编码(尽管如此,2003年11月UTF-8被RFC 3629重新规范,只能使用原来Unicode定义的区域,U+0000到U+10FFFF,也就是说最多四个字节)',
]
detail = [
u'128个US-ASCII字符只需一个字节编码(Unicode范围由U+0000至U+007F)。ASCII字符范围,字节由零开始,0zzzzzzz(00-7F)',
u'带有附加符号的拉丁文、希腊文、西里尔字母、亚美尼亚语、希伯来文、阿拉伯文、叙利亚文及它拿字母则需要两个字节编码(Unicode范围由U+0080至U+07FF)。第一个字节由110开始,接着的字节由10开始,110yyyyy(C0-DF) 10zzzzzz(80-BF)',
u'其他基本多文种平面(BMP)中的字符(这包含了大部分常用字,如大部分的汉字)使用三个字节编码(Unicode范围由U+0800至U+FFFF)。第一个字节由1110开始,接着的字节由10开始,1110xxxx(E0-EF) 10yyyyyy 10zzzzzz',
u'其他极少使用的Unicode 辅助平面的字符使用四至六字节编码(Unicode范围由U+10000至U+1FFFFF使用四字节,Unicode范围由U+200000至U+3FFFFFF使用五字节,Unicode范围由U+4000000至U+7FFFFFFF使用六字节)。将由11110开始,接着的字节由10开始,11110www(F0-F7) 10xxxxxx 10yyyyyy 10zzzzzz',
u'对上述提及的第四种字符而言,UTF-8使用四至六个字节来编码似乎太耗费资源了。但UTF-8对所有常用的字符都可以用三个字节表示,而且它的另一种选择,UTF-16编码,对前述的第四种字符同样需要四个字节来编码,所以要决定UTF-8或UTF-16哪种编码比较有效率,还要视所使用的字符的分布范围而定。',
u'对于UTF-8编码中的任意字节B,如果B的第一位为0,则B独立的表示一个字符(ASCII码);',
u'如果B的第一位为1,第二位为0,则B为一个多字节字符中的一个字节(非ASCII字符);',
u'如果B的前两位为1,第三位为0,则B为两个字节表示的字符中的第一个字节;',
u'如果B的前三位为1,第四位为0,则B为三个字节表示的字符中的第一个字节;',
u'如果B的前四位为1,第五位为0,则B为四个字节表示的字符中的第一个字节;',
u'因此,对UTF-8编码中的任意字节,根据第一位,可判断是否为ASCII字符;根据前二位,可判断该字节是否为一个字符编码的第一个字节;根据前四位(如果前两位均为1),可确定该字节为字符编码的第一个字节,并且可判断对应的字符由几个字节表示;根据前五位(如果前四位为1),可判断编码是否有错误或数据传输过程中是否有错误。',
]
wiki = 'https://zh.wikipedia.org/wiki/UTF-8'
encoding = 'utf-8'
define = {
'panel0': {
'desc': ['One Bytes'],
'range': (0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f),
},
'panel1': {
'desc': ['Two Bytes'],
'range': (0x00, 0x00, 0x00, 0x00, 0xc0, 0xdf, 0x80, 0xbf),
},
'panel2': {
'desc': ['Three Bytes'],
'range': (0x00, 0x00, 0xe0, 0xef, 0x80, 0xbf, 0x80, 0xbf),
},
'panel3': {
'desc': ['Four Bytes'],
'range': (0xf0, 0xf7, 0x80, 0xbf, 0x80, 0xbf, 0x80, 0xbf),
},
}
category = {
'panel0': [
{
'name': 'Control',
'range': ((0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f),),
'desc': 'Control',
},
{
'name': 'Text',
'range': ((0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x7f),),
'desc': 'Text',
},
],
}
def __init__(self, errors=None):
super(UTF8, self).__init__(errors=errors)
def get_category(self, b_code):
code, = struct.unpack('>L', b_code)
f = code >> 24 & 0xff
s = code >> 16 & 0xff
t = code >> 8 & 0xff
fo = code & 0xff
for k, v in self.category.items():
for vv in v:
for f1, f2, s1, s2, t1, t2, fo1, fo2 in vv['range']:
if f1 <= f <= f2 and s1 <= s <= s2 and t1 <= t <= t2 and fo1 <= fo <= fo2:
return vv['name']
return None
def do_panel_as_html(self, define, err_ch):
f1, f2, s1, s2, t1, t2, fo1, fo2 = define['range']
if f1 == f2 == s1 == s2 == t1 == t2 == 0x0:
return self.panel_as_html_1B(define, err_ch)
elif f1 == f2 == s1 == s2 == 0x0:
return self.panel_as_html_2B(define, err_ch)
elif f1 == f2 == 0x0:
return self.panel_as_html_3B(define, err_ch)
else:
return self.panel_as_html_4B(define, err_ch)
return []
def panel_as_html_1B(self, define, err_ch):
html = []
m1, m2, n1, n2, x1, x2, y1, y2 = define['range']
x1, x2 = (y1 >> 4 & 0xf, y2 >> 4 & 0xf)
y1, y2 = (y1 & 0xf, y2 & 0xf)
html.append('<table>')
html.append(
'<tr>' +
'<td></td>' +
''.join(['<td>%02X</td>' % y for y in range(y1, y2 + 1)]) +
'</tr>'
)
for x in range(x1, x2 + 1):
row = []
row.append('<td>%02X</td>' % x)
for y in range(y1, y2 + 1):
b_code = struct.pack('>B', x << 4 | y)
ch = b_code.decode(self.encoding, errors='ignore')
if not ch:
ch = err_ch
name = self.get_category(b'\x00\x00\x00' + b_code)
if name:
row.append('<td class="%s">%s</td>' % (name, ch))
else:
row.append('<td>%s</td>' % ch)
html.append('<tr>' + ''.join(row) + '</tr>')
html.append('</table>')
return html
def panel_as_html_2B(self, define, err_ch):
# two
html = []
m1, m2, n1, n2, x1, x2, y1, y2 = define['range']
html.append('<table>')
html.append(
'<tr>' +
'<td></td>' +
''.join(['<td>%02X</td>' % y for y in range(y1, y2 + 1)]) +
'</tr>'
)
for x in range(x1, x2 + 1):
row = []
row.append('<td>%02X</td>' % x)
for y in range(y1, y2 + 1):
b_code = struct.pack('>2B', x, y)
ch = b_code.decode(self.encoding, errors='ignore')
if not ch:
ch = err_ch
name = self.get_category(b'\x00\x00' + b_code)
if name:
row.append('<td class="%s">%s</td>' % (name, ch))
else:
row.append('<td>%s</td>' % ch)
html.append('<tr>' + ''.join(row) + '</tr>')
html.append('</table>')
return html
def panel_as_html_3B(self, define, err_ch):
html = []
m1, m2, x1, x2, y1, y2, z1, z2 = define['range']
for x in range(x1, x2 + 1):
html.append('<h4>%02X table</h4>' % x)
html.append('<table>')
html.append(
'<tr>' +
'<td></td>' +
''.join(['<td>%02X</td>' % z for z in range(z1, z2 + 1)]) +
'</tr>'
)
for y in range(y1, y2 + 1):
row = []
row.append('<td>%02X</td>' % y)
for z in range(z1, z2 + 1):
b_code = struct.pack('>3B', x, y, z)
ch = b_code.decode(self.encoding, errors='ignore')
if not ch:
ch = err_ch
name = self.get_category(b'\x00' + b_code)
if name:
row.append('<td class="%s">%s</td>' % (name, ch))
else:
row.append('<td>%s</td>' % ch)
html.append('<tr>' + ''.join(row) + '</tr>')
html.append('</table>')
return html
def panel_as_html_4B(self, define, err_ch):
html = []
a1, a2, x1, x2, y1, y2, z1, z2 = define['range']
for a in range(a1, a2 + 1):
for x in range(x1, x2 + 1):
html.append('<h4>%02X %02X table</h4>' % (a, x))
html.append('<table>')
html.append(
'<tr>' +
'<td></td>' +
''.join(['<td>%02X</td>' % z for z in range(z1, z2 + 1)]) +
'</tr>'
)
for y in range(y1, y2 + 1):
row = []
row.append('<td>%02X</td>' % y)
for z in range(z1, z2 + 1):
b_code = struct.pack('>4B', a, x, y, z)
ch = b_code.decode(self.encoding, errors='ignore')
if not ch:
ch = err_ch
name = self.get_category(b_code)
if name:
row.append('<td class="%s">%s</td>' % (name, ch))
else:
row.append('<td>%s</td>' % ch)
html.append('<tr>' + ''.join(row) + '</tr>')
html.append('</table>')
return html
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Output UTF-8 code table to HTML.')
parser.add_argument('--panel', help='UTF-8 encoding panel: 1, 2, 3, 4')
parser.add_argument('--code', dest='char', help='output UTF-8 code for character')
parser.add_argument('--char', dest='code', help='output character for UTF-8 code')
args = parser.parse_args()
utf8 = UTF8()
if args.char:
chars = args.char.decode('utf-8')
code = utf8.codes(chars)
print('UTF-8 code:')
print(code)
elif args.code:
if '-' in args.code:
codes = args.code.split('-')
elif ',' in args.code:
codes = args.code.split(',')
print(''.join(utf8.chars(codes)))
elif args.zone:
html = utf8.as_html(panel=args.panel)
print(html.encode('utf-8'))
else:
parser.print_help()
| liuyug/charset | charset/utf8.py | utf8.py | py | 10,972 | python | zh | code | 5 | github-code | 50 |
29900411158 | # Copyright 2017, Digi International Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Changes:
# _3.py 11/10/18 RWV - Added local storage of 64 bit address to avoid need to get it with each message.
# _4.py 11/10/18 RWV - changed to packet receive method to allow additional reception info to be exposed
# _4A.py 2/16/19 RWV - Added OLED diplay of RSS
import array
from digi.xbee.devices import XBeeDevice
from datetime import timedelta
from datetime import datetime
from digi.xbee.packets.base import DictKeys
import os
import re
import signal
from time import sleep
# Globals
# Serial port where local module is connected to
PORT = "/dev/XB0"
# Baud rate of local module.
BAUD_RATE = 9600
stop_requested = False
LOGFILE = ""
version = "Rev 5A 2/16/20"
def log(caption):
global LOGFILE
now = datetime.now()
now_str = datetime.strftime(now, "%a %Y-%m-%d %H:%M:%S")
LOGFILE.write( now_str + ": " + caption + "\r\n" )
LOGFILE.flush()
def sig_handler(signum, frame):
log("handling signal: %s" % signum)
global stop_requested
stop_requested = True
def main():
device = XBeeDevice(PORT, BAUD_RATE)
global LOGFILE
global spinner
global spinstr
global stop_requested
spinner = 0
spinstr = "|/-\\"
logfile_name = "/home/pi/HHS_logfile.txt"
LOGFILE = open(logfile_name, "a+")
mypid = os.getpid()
pidfile_name = "/home/pi/HHS_pidfile.txt"
PIDFILE = open(pidfile_name, "w+")
PIDFILE.write( str(mypid) + "\n" )
PIDFILE.flush()
ver_str = " " + version + " "*(41-len(version)-1)
log(" +-----------------------------------------+")
log(" | XBee Python Library Receive Data Sample |")
log(" | w OLED disp |")
log(" |" + ver_str + "|")
log(" +-----------------------------------------+\n")
try:
device.open()
device.set_sync_ops_timeout(60)
global D1
D1 = ""
# def data_receive_callback(xbee_message):
# # print ("got interrupt!")
# D3 = ""
# global D1
# if (len(D1)==0):
# D1 = str(xbee_message.remote_device.get_64bit_addr())
# D2 = xbee_message.data.decode()
# # sleep(0.3)
# # RawRssi = device.get_parameter("DB")
# # Rssi = -1 * struct.unpack("=B", RawRssi )[0]
# # D3 = " %4.0f dBm" % Rssi
# # sleep(0.3)
# print("From %s >> %s %s" % (D1,D2,D3))
# device.add_data_received_callback(data_receive_callback)
def packet_received_callback(packet):
global spinner
global spinstr
packet_dict = packet.to_dict()
# import pdb; pdb.set_trace()
api_data = packet_dict[DictKeys.FRAME_SPEC_DATA][DictKeys.API_DATA]
data = api_data[DictKeys.RF_DATA]
datastr = array.array('B',data).tostring().decode('ascii')
# print ("datastr=" + datastr)
# rssi = api_data[DictKeys.RSSI]
rssi = -99
address64 = api_data[DictKeys.X64BIT_ADDR].hex()
# extract and diplay RSSI on OLED display
p = re.compile(r' ([^ ]*) dBm')
try:
last_rssi = p.search(datastr).group(1)
except AttributeError:
last_rssi = 0
log ("Dodged Attribute Error! Possible inability to match dBm search string")
# print(last_rssi)
RSSI = int(last_rssi) # convert to number
file = open("oled.txt","w")
# spinner |/-\
sc = spinstr[spinner % 4]
spinner += 1
file.write("RSSI: " + str(RSSI) + " dBm " + sc + "\n")
Xstr =""
XS = 21 + int((RSSI+30)/4)
# file.write(str(XS)+"\n")
for j in range(1,XS):
Xstr = Xstr + "*"
file.write(str(Xstr)+"\n")
file.close()
log("from: {}, RSSI: {}, Data: {}".format(address64, rssi, datastr))
device.add_packet_received_callback(packet_received_callback)
stop_requested = False
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
log("Waiting for data...\n")
while(not stop_requested):
sleep(2)
t = Thread(target=main_loop)
t.start()
t.join()
log("main_loop join and task completed\n")
finally:
if device is not None and device.is_open():
device.close()
LOGFILE.close()
if __name__ == '__main__':
main()
| voreckr/HHS-Rocket-Project | ReceiveDataSample_HHS_6A.py | ReceiveDataSample_HHS_6A.py | py | 5,536 | python | en | code | 0 | github-code | 50 |
18781707691 | import Create_csv as cc
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
class ML:
def __init__(self):
pass
def preprocess(self, a, y, t):
cc.Create_csv()
df1 = pd.read_csv('/home/karan/PycharmProjects/coll_proj/data.csv')
df1.loc[df1["d_type"] == 'Deadly', 'class'] = 0
df1.loc[df1["d_type"] == 'Not deadly', 'class'] = 1
df1.drop(columns=['name', 'd_type'], inplace=True)
df1.loc[df1["disease"] == 'Stroke', 'dis'] = 2
df1.loc[df1["disease"] == 'Cancer', 'dis'] = 3
df1.loc[df1["disease"] == 'Heart Disease', 'dis'] = 4
df1.loc[df1["disease"] == 'Malaria', 'dis'] = 5
df1.loc[df1["disease"] == 'Diabetes', 'dis'] = 6
df1.loc[df1["disease"] == 'Tumor', 'dis'] = 7
df1.loc[df1["disease"] == 'Asthma', 'dis'] = 8
df1.loc[df1["disease"] == 'Diarrhea', 'dis'] = 9
df1.loc[df1["disease"] == 'Depression', 'dis'] = 10
df1.loc[df1["disease"] == 'Flu', 'dis'] = 11
df1_dis = df1['disease']
df1.drop(columns='disease', inplace=True)
X = np.array(df1.drop(['class'], 1))
Y = np.array(df1['class'])
ob = ML()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
clf, X_train1, X_test1, y_train1, y_test1, accuracy=ob.train(X_train, X_test, y_train, y_test)
clf1, X_train, X_test, y_train, y_test, accuracy=ob.train(X_train1, X_test1, y_train1, y_test1)
# a=age
# y=year
# t=type
pred = ob.predict1(clf1,a,y,t)
return pred,accuracy
def train(self,X_train, X_test, y_train, y_test):
clf = LogisticRegression()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
# print(accuracy)
return clf,X_train, X_test, y_train, y_test, accuracy
def predict1(self,clf,a,y,t):
test_measures = np.array([[a, y, t]])
prediction = clf.predict(test_measures)
# print(prediction)
return prediction | karandoshi98/First-Python-Project-Disease-Survey | ml.py | ml.py | py | 2,129 | python | en | code | 1 | github-code | 50 |
75043703196 | #!/usr/bin/python
import time
import roslib
roslib.load_manifest('raw_script_server')
import rospy
import actionlib
from raw_script_server.msg import *
from simple_script_server import *
sss = simple_script_server()
## Script server class which inherits from script class.
#
# Implements actionlib interface for the script server.
#
class script_server():
## Initializes the actionlib interface of the script server.
#
def __init__(self):
self.ns_global_prefix = "/script_server"
self.script_action_server = actionlib.SimpleActionServer(self.ns_global_prefix, ScriptAction, self.execute_cb, False)
self.script_action_server.start()
#------------------- Actionlib section -------------------#
## Executes actionlib callbacks.
#
# \param server_goal ScriptActionGoal
#
def execute_cb(self, server_goal):
server_result = ScriptActionResult().result
if server_goal.function_name == "stop":
handle01 = sss.stop(server_goal.component_name)
elif server_goal.function_name == "move":
handle01 = sss.move(server_goal.component_name,server_goal.parameter_name,mode=server_goal.mode)
else:
rospy.logerr("function <<%s>> not supported", server_goal.function_name)
self.script_action_server.set_aborted(server_result)
return
server_result.error_code = handle01.get_error_code()
if server_result.error_code == 0:
rospy.logdebug("action result success")
self.script_action_server.set_succeeded(server_result)
else:
rospy.logerr("action result error")
self.script_action_server.set_aborted(server_result)
## Main routine for running the script server
#
if __name__ == '__main__':
rospy.init_node('script_server')
script_server()
rospy.loginfo("script_server is running")
rospy.spin()
| RC4Group4/ResearchCamp4 | raw_command_tools/raw_script_server/src/script_server.py | script_server.py | py | 1,737 | python | en | code | 1 | github-code | 50 |
44878419809 | from plone import api
from plone.app.testing import setRoles
from plone.app.testing import TEST_USER_ID
from plone.dexterity.interfaces import IDexterityFTI
from ploneconf.core.content.person import IPerson
from ploneconf.core.content.person import Person
from ploneconf.core.testing import PLONECONF_CORE_INTEGRATION_TESTING
from z3c.relationfield import RelationValue
from zope.component import createObject
from zope.component import getUtility
from zope.component import queryUtility
from zope.intid.interfaces import IIntIds
import unittest
class PersonIntegrationTest(unittest.TestCase):
layer = PLONECONF_CORE_INTEGRATION_TESTING
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer["portal"]
setRoles(self.portal, TEST_USER_ID, ["Manager"])
def create_person(self) -> Person:
person = api.content.create(
container=self.portal.speakers,
type="Person",
id="ada-lovelace",
title="Ada Lovelace",
)
return person
def create_activity(self, portal_type: str, person: Person):
container = self.portal.schedule
intids = getUtility(IIntIds)
person_value = RelationValue(intids.getId(person))
return api.content.create(
container=container,
type=portal_type,
id=f"a-{portal_type.lower()}",
title=f"A {portal_type}",
presenters=[
person_value,
],
)
def test_fti(self):
fti = queryUtility(IDexterityFTI, name="Person")
self.assertTrue(fti)
def test_factory(self):
fti = queryUtility(IDexterityFTI, name="Person")
factory = fti.factory
obj = createObject(factory)
self.assertTrue(IPerson.providedBy(obj))
def test_adding(self):
folder = self.portal.speakers
folder.invokeFactory("Person", "Person")
self.assertTrue(IPerson.providedBy(folder["Person"]))
def test_activities(self):
person = self.create_person()
self.assertEqual(len(person.activities), 0)
self.create_activity("Keynote", person)
self.assertEqual(len(person.activities), 1)
self.create_activity("Talk", person)
self.assertEqual(len(person.activities), 2)
self.create_activity("Training", person)
self.assertEqual(len(person.activities), 3)
def test_labels(self):
person = self.create_person()
self.assertEqual(len(person.activities), 0)
self.create_activity("Keynote", person)
self.create_activity("Talk", person)
self.create_activity("Training", person)
self.assertEqual(len(person.labels), 3)
self.assertIn("keynote-speaker", person.labels)
self.assertIn("speaker", person.labels)
self.assertIn("instructor", person.labels)
| cleberjsantos/2021.ploneconf.org | api/src/ploneconf.core/src/ploneconf/core/tests/test_content_person.py | test_content_person.py | py | 2,904 | python | en | code | null | github-code | 50 |
39503259202 | import pika
import pymongo
import json
# Connect to MongoDB
mongo_client = pymongo.MongoClient("mongodb://admin:secret@localhost:27017/")
mongo_db = mongo_client["project"]
patients_collection = mongo_db["Patient"]
# Define the queue names
register_queue = 'register_patient'
lookup_queue = 'lookup_patient'
all_patients_queue = 'all_patients'
# Connect to RabbitMQ
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
# Set up the queues
channel.queue_declare(queue=register_queue)
channel.queue_declare(queue=lookup_queue)
channel.queue_declare(queue=all_patients_queue)
# Set up RabbitMQ consumers for patient registration, lookup, and list requests
def register_patient(ch, method, properties, body):
patient_data = json.loads(body)
print(patient_data)
patients_collection.insert_one(patient_data)
print("Registered new patient:", patient_data)
ch.basic_ack(delivery_tag=method.delivery_tag)
def lookup_patient(ch, method, properties, body):
phone = body.decode()
patients = []
for patient in patients_collection.find({"phone": phone}):
patient['_id'] = str(patient['_id']) # convert ObjectId to string
patients.append(patient)
response_body = json.dumps(patients)
channel.basic_publish(exchange='', routing_key=properties.reply_to, body=response_body)
ch.basic_ack(delivery_tag=method.delivery_tag)
def list_patients(ch, method, properties, body):
patients = []
for patient in patients_collection.find():
patient['_id'] = str(patient['_id']) # convert ObjectId to string
patients.append(patient)
response_body = json.dumps(patients)
channel.basic_publish(exchange='', routing_key=properties.reply_to, body=response_body)
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='register_patient', on_message_callback=register_patient)
channel.basic_consume(queue='lookup_patient', on_message_callback=lookup_patient)
channel.basic_consume(queue='all_patients', on_message_callback=list_patients)
print("Patient worker started...")
channel.start_consuming()
| Ahkh3e/RabbitMQProject | Workers/patient_worker.py | patient_worker.py | py | 2,188 | python | en | code | 0 | github-code | 50 |
28201624930 | # -*- coding: utf-8 -*-
def getFetchedTitle(s):
end = (len(s)-2)
if s[end:] == ",\n":
return s[0:end]
else:
# ultima parola dell'elenco
return s
def filterTagNameSequence(tags, coll):
coll_ = [] # istanzio una collection vuota
for el in coll:
# filtro una lista di elementi che contengono il tag:
t = el(tags[0])
# se la lista non è vuota, aggiungo tutti gli elementi alla nuova collection
for el_ in t:
coll_.append(el_)
if len(tags) > 1:
return filterTagNameSequence(tags[1:], coll_)
else:
# restituisco la nuova lista di tag
return coll_
def getParagraphById(body, title_span_id):
title_span = body.find(id=title_span_id)
if title_span == None:
return []
title = title_span.parent
nextTitle = title.find_next_sibling(title.name)
# look for the string that matches title and return all the html
# until nextTitle is found.
paragraph = [title]
for el in title.find_next_siblings():
if el == nextTitle:
break
else:
paragraph.append(el)
return paragraph
def songFileName(artista, traccia):
return artista + "_" + traccia + ".lyr"
| salvioner/lyra | dataset/fetch.py | fetch.py | py | 1,244 | python | it | code | 0 | github-code | 50 |
71935869596 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import datetime as dt
import struct
import os
import logging
from scipy.io import loadmat
from netCDF4 import Dataset
import netCDF4 as nc4
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
import time
from sys import exit
import numba as nb
"""
Created on Wed Jun 9 14:12:26 2021
@author: conwayek
"""
def F_isrf_convolve_fft(w1,s1,w2,isrf_w,isrf_dw0,isrf_lut0,ISRFtype,fitisrf):
"""
astropy.covolute.convolve_fft-based convolution using wavelength-dependent isrf
w1:
high-resolution wavelength
s1:
high-resolution spectrum
w2:
low-resolution wavelength
isrf_w:
center wavelength grid of isrf lut
isrf_dw:
wavelength grid on which isrfs are defined
isrf_lut:
instrument spectral response function look up table
"""
from astropy.convolution import convolve_fft
from scipy.interpolate import RegularGridInterpolator
from scipy import interpolate,optimize
if(fitisrf == False):
if isrf_lut0.shape != (len(isrf_w),len(isrf_dw0)):
raise ValueError('isrf_lut dimension incompatible!')
return np.full(w2.shape,np.nan)
# make sure w1 and isrf_dw have the same resolution
w1_step = np.median(np.diff(w1))
isrf_dw_min = np.min(isrf_dw0)
isrf_dw_max = -isrf_dw_min
isrf_dw = np.linspace(isrf_dw_min,isrf_dw_max,int((isrf_dw_max-isrf_dw_min)/w1_step)+1)
isrf_lut = np.zeros((len(isrf_w),len(isrf_dw)))
for (iw,w) in enumerate(isrf_w):
if(ISRFtype == 'ISRF'):
interp_func = interpolate.interp1d(isrf_dw0,isrf_lut0[iw,:])
isrf_lut[iw,:] = interp_func(isrf_dw)
elif(ISRFtype == 'GAUSS'):
popt, pcov = optimize.curve_fit(gaussian, isrf_lut0[iw,:])
isrf_lut[iw,:] = gaussian(isrf_dw,popt[0])
elif(ISRFtype == 'SUPER'):
popt, pcov = optimize.curve_fit(supergaussian, isrf_dw0, isrf_lut0[iw,:])
isrf_lut[iw,:] = supergaussian(isrf_dw,popt[0],popt[1])
else:
print('ISRF Function Not Coded: ONLY ISRF, GAUSS and SUPER Allowed')
exit()
# note that the isrf is flipped: convolution is the mirror-image of kernel averaging
s2_fft_lut = np.array([convolve_fft(s1,isrf_lut[iw,::-1]) for (iw,w) in enumerate(isrf_w)])
inter_func = RegularGridInterpolator((isrf_w,w1),s2_fft_lut,bounds_error=False)
return inter_func((w2,w2))
elif( (fitisrf == True) and ISRFtype == 'SQUEEZE'):
if isrf_lut0.shape != (len(isrf_w),len(isrf_dw0[0,:])):
raise ValueError('isrf_lut dimension incompatible!')
return np.full(w2.shape,np.nan)
# make sure w1 and isrf_dw have the same resolution
s2_fft_lut = np.zeros(( len(isrf_w),len(s1) ))
for (iw,w) in enumerate(isrf_w):
w1_step = np.median(np.diff(w1))
isrf_dw_min = np.min(isrf_dw0[iw,:])
isrf_dw_max = -isrf_dw_min
isrf_dw = np.linspace(isrf_dw_min,isrf_dw_max,int((isrf_dw_max-isrf_dw_min)/w1_step)+1)
isrf_lut = np.zeros((len(isrf_w),len(isrf_dw)))
interp_func = interpolate.interp1d(isrf_dw0[iw,:],isrf_lut0[iw,:])
isrf_lut[iw,:] = interp_func(isrf_dw)
s2_fft_lut[iw,:] = np.array([convolve_fft(s1,isrf_lut[iw,::-1])])
inter_func = RegularGridInterpolator((isrf_w,w1),s2_fft_lut,bounds_error=False)
return inter_func((w2,w2))
elif( (fitisrf == True) and ISRFtype == 'SUPER'):
if (isrf_lut0.shape) != (2,len(isrf_w)):
raise ValueError('isrf_lut dimension incompatible!')
return np.full(w2.shape,np.nan)
# make sure w1 and isrf_dw have the same resolution
w1_step = np.median(np.diff(w1))
isrf_dw_min = np.min(isrf_dw0)
isrf_dw_max = -isrf_dw_min
isrf_dw = np.linspace(isrf_dw_min,isrf_dw_max,int((isrf_dw_max-isrf_dw_min)/w1_step)+1)
isrf_lut = np.zeros((len(isrf_w),len(isrf_dw)))
for (iw,w) in enumerate(isrf_w):
isrf_lut[iw,:] = supergaussian(isrf_dw,isrf_lut0[0,iw],isrf_lut0[1,iw])
# note that the isrf is flipped: convolution is the mirror-image of kernel averaging
s2_fft_lut = np.array([convolve_fft(s1,isrf_lut[iw,::-1]) for (iw,w) in enumerate(isrf_w)])
inter_func = RegularGridInterpolator((isrf_w,w1),s2_fft_lut,bounds_error=False)
return inter_func((w2,w2))
else:
#IN THIS CASE, ISRF_LUT0 ARE THE FITTED HWHM: NO NEED TO FIT THE DATA
if len(isrf_lut0) != (len(isrf_w)):
raise ValueError('isrf_lut dimension incompatible!')
return np.full(w2.shape,np.nan)
# make sure w1 and isrf_dw have the same resolution
w1_step = np.median(np.diff(w1))
isrf_dw_min = np.min(isrf_dw0)
isrf_dw_max = -isrf_dw_min
isrf_dw = np.linspace(isrf_dw_min,isrf_dw_max,int((isrf_dw_max-isrf_dw_min)/w1_step)+1)
isrf_lut = np.zeros((len(isrf_w),len(isrf_dw)))
for (iw,w) in enumerate(isrf_w):
isrf_lut[iw,:] = gaussian(isrf_dw,isrf_lut0[iw])
# note that the isrf is flipped: convolution is the mirror-image of kernel averaging
s2_fft_lut = np.array([convolve_fft(s1,isrf_lut[iw,::-1]) for (iw,w) in enumerate(isrf_w)])
inter_func = RegularGridInterpolator((isrf_w,w1),s2_fft_lut,bounds_error=False)
return inter_func((w2,w2))
#SUPER GAUSSIAN ISRF
def supergaussian(x, w, k):
from math import gamma
awk = k/( 2.0 * w * gamma(1/k) )
return awk * np.exp(-(abs(x/w) )**k)
#GAUSSIAN ISRF
def gaussian(x,width):
return (1.0/(width*np.sqrt(2*np.pi) ) ) * np.exp(-0.5 * (x**2) / (width**2) )
def FWHM(x,y):
hmx = half_max_x(x,y)
fwhm = hmx[1] - hmx[0]
return fwhm
def lin_interp(x, y, i, half):
return x[i] + (x[i+1] - x[i]) * ((half - y[i]) / (y[i+1] - y[i]))
def half_max_x(x, y):
half = max(y)/2.0
signs = np.sign(np.add(y, -half))
zero_crossings = (signs[0:-2] != signs[1:-1])
zero_crossings_i = np.where(zero_crossings)[0]
return [lin_interp(x, y, zero_crossings_i[0], half),
lin_interp(x, y, zero_crossings_i[1], half)]
def fitspectra(Data,l1FitPath,whichBand,solarRefFile,calidata,o2spectraReffile,ciaspectraReffile,co2spectraReffile,h2ospectraReffile,ch4spectraReffile,fitSZA,SZA,ALT,l1DataDir,frameDateTime,pixlimitX,
isrf_lut,isrf_w,isrf_dw0,wavelength,pixlimit,fitisrf,ISRF,xtol,ftol,xtrackaggfactor):
from scipy import interpolate,optimize
from lmfit import minimize, Parameters, Parameter,fit_report
from scipy.interpolate import RegularGridInterpolator
import math
from skimage.measure import block_reduce
#NUMBER OF FRAMES CAN BE USED TO GET FIRST GUESS ON THE VALUE OF SOLAR SCALING
numframes = int(Data.shape[2])
radiance = np.nanmean(Data,axis=2)
radiance = radiance*1e-14
#aggregate the data in the xtrack dimenssion
norm_2d = block_reduce(np.ones(radiance.shape), block_size=(xtrackaggfactor, 1),func=np.mean )
# Valid pixels
valpix = np.zeros(radiance.shape)
idv = np.logical_and(np.isfinite(radiance),radiance>0.0)
valpix[idv] = 1.0
valpix_agg = block_reduce(valpix, block_size=(xtrackaggfactor,1),func=np.mean )
valpix_agg = valpix_agg / norm_2d
# Coadd radiance data
rad_obs = block_reduce(radiance,block_size=(xtrackaggfactor,1),func=np.mean)
rad_obs = rad_obs / norm_2d
# Coadd wvl data
wavelength = block_reduce(wavelength,block_size=(xtrackaggfactor,1),func=np.mean)
wavelength = wavelength / norm_2d
# aggregate ISRF data in the same way
norm_3d = block_reduce(np.ones(isrf_lut.shape), block_size=(xtrackaggfactor,1,1),func=np.mean )
# Valid pixels
valpix = np.zeros(isrf_lut.shape)
idv = np.logical_and(np.isfinite(isrf_lut),isrf_lut>0.0)
valpix[idv] = 1.0
valpix_agg = block_reduce(valpix, block_size=(xtrackaggfactor,1,1),func=np.mean )
valpix_agg = valpix_agg / norm_3d
# Coadd radiance data
isrf_lut = block_reduce(isrf_lut,block_size=(xtrackaggfactor,1,1),func=np.mean)
isrf_lut = isrf_lut / norm_3d
##############################
#SOLAR DATA
ncid = Dataset(solarRefFile, 'r')
if(whichBand == 'CH4'):
f = (ncid['Wavelength'][:] > 1590) & (ncid['Wavelength'][:] < 1700)
refWavelength = ncid['Wavelength'][:][f].data
refSolarIrradiance = ncid['SSI'][:][f].data
else:
f = (ncid['Wavelength'][:] > 1245) & (ncid['Wavelength'][:] < 1310)
refWavelength = ncid['Wavelength'][:][f].data
refSolarIrradiance = ncid['SSI'][:][f].data
ncid.close()
##############################
##############################
# CROSS SECTIONAL DATA
#UT XSECTIONS + US ATM FILE
#US ST. ATM. DATA
usatmospath = os.path.join(calidata,str('AFGLUS_atmos.txt'))
uspressure = np.loadtxt(usatmospath,usecols=0)
usheight = np.loadtxt(usatmospath,usecols=1)
ustemperature = np.loadtxt(usatmospath,usecols=2)
usair = np.loadtxt(usatmospath,usecols=3)
usCH4 = np.loadtxt(usatmospath,usecols=4)
usCO2 = np.loadtxt(usatmospath,usecols=5)
usH2O = np.loadtxt(usatmospath,usecols=6)
specwave = refWavelength
if(whichBand == 'O2'):
#ncid = Dataset(o2spectraReffile, 'r')
specwave = np.arange(refWavelength[0],refWavelength[-1],0.001)
h2opath = os.path.join(calidata,str('h2o_lut_HITRAN2020_5e-3cm-1.nc'))
o2path = os.path.join(calidata,'o2_lut_HIT2020_5e-3cm-1.nc')
###################################################
new = Dataset(h2opath, 'r')
TH2O = new.variables['CrossSection'][:,:,:].data
PressH2O = new.variables['Pressure'][:].data
WvlH2O = new.variables['Wavelength'][:].data
TempH2O = new.variables['Temperature'][:].data
WvlH2O = 1.0e7/WvlH2O
WvlH2O = np.flip(WvlH2O)
TH2O = np.flip(TH2O,axis=2)
new.close()
###################################################
new = Dataset(o2path, 'r')
Temp = new.variables['Temperature'][:].data
Press = new.variables['Pressure'][:].data
Wvl = new.variables['Wavelength'][:].data
TO2 = new.variables['CrossSection'][:,:,:].data
Wvl = 1.0e7/Wvl
Wvl = np.flip(Wvl)
TO2 = np.flip(TO2,axis=2)
new.close()
##########
file = os.path.join(calidata,str('O2_CIA_296K_all.nc'))
data = Dataset(file,'r')
WvlCIA = data['Wavelength'][:]
TCIA = data['XSection'][:]
data.close()
fnO2 = RegularGridInterpolator((Press,Temp,Wvl), TO2)
fnH2O = RegularGridInterpolator((PressH2O,TempH2O,WvlH2O), TH2O)
wgtO2 = np.zeros(len(Wvl))
wgtH2O = np.zeros(len(WvlH2O))
wgtCIA = np.zeros(len(WvlCIA))
columntotalO2 = 0.0
columntotalH2O = 0.0
columntotalCIA = 0.0
xO2 = fnO2((400,250,Wvl))
for j in range(len(Wvl)):
wgtO2[j] = wgtO2[j] + xO2[j]
for i in range( len(usheight) ):
if(usheight[i] <= ALT):
"""
xO2 = fnO2((uspressure[i],ustemperature[i],Wvl)) * abs(1.0/np.cos(SZA) + 1)
for j in range(len(Wvl)):
wgtO2[j] = wgtO2[j] + xO2[j]
columntotalO2 = columntotalO2 + usair[i]
"""
xH2O = fnH2O((uspressure[i],ustemperature[i],WvlH2O)) * usH2O[i] * abs(1.0/np.cos(SZA) + 1.0)
columntotalH2O = columntotalH2O + usH2O[i]
#xCIA = fnCIA(ustemperature[i],WvlCIA) * abs(1.0/np.cos(SZA) + 1.0)
#for j in range(len(WvlCIA)):
#wgtCIA[j] = wgtCIA[j] + xCIA[j]
for j in range(len(WvlH2O)):
wgtH2O[j] = wgtH2O[j] + xH2O[j]
else:
"""
xO2 = fnO2((uspressure[i],ustemperature[i],Wvl)) * abs(1.0/np.cos(SZA) )
for i in range(len(Wvl)):
wgtO2[i] = wgtO2[i] + xO2[i]
columntotalO2 = columntotalO2 + usair[i]
"""
xH2O = fnH2O((uspressure[i],ustemperature[i],WvlH2O)) * usH2O[i] * abs(1.0/np.cos(SZA))
columntotalH2O = columntotalH2O + usH2O[i]
#xCIA = fnCIA(ustemperature[i],WvlCIA) * abs(1.0/np.cos(SZA) )
#for j in range(len(WvlCIA)):
#wgtCIA[j] = wgtCIA[j] + xCIA[j]
for j in range(len(WvlH2O)):
wgtH2O[j] = wgtH2O[j] + xH2O[j]
wgtH2O = wgtH2O/columntotalH2O
wgtCIA = TCIA#wgtCIA
wgtO2 = wgtO2
yH2O = interpolate.interp1d(WvlH2O,wgtH2O)
yO2 = interpolate.interp1d(Wvl,wgtO2)
yCIA = interpolate.interp1d(WvlCIA,wgtCIA)
H2O = np.zeros(len(specwave))
O2 = np.zeros(len(specwave))
CIA = np.zeros(len(specwave))
for i in range(len(specwave)):
H2O[i] = yH2O(specwave[i])
O2[i] = yO2(specwave[i])
CIA[i] = yCIA(specwave[i])
#o2path = os.path.join(calidata,spectraReffile)
##o2path = os.path.join(calidata,str('o2_lut_1200-1750nm_0p02fwhm_1e21vcd_mesat.nc'))
#new = Dataset(o2path, 'r')
#TO2 = new.variables['CrossSection'][:,:,:].data
#Temp = new.variables['Temperature'][:].data
#Press = new.variables['Pressure'][:].data
#Wvl = new.variables['Wavelength'][:].data
#
#
#fn = RegularGridInterpolator((Press,Temp,Wvl), TO2)
#
#wgtO2 = np.zeros(len(Wvl))
#columntotal = 0.0
#for i in range( len(usheight) ):
# if(usheight[i] <= 58.0):
# x = fn((uspressure[i],ustemperature[i],Wvl)) * usair[i]
# wgtO2 = wgtO2 + x
# columntotal = columntotal + usair[i]
#wgtO2 = wgtO2/columntotal
#
## wgtO2 = fn((1013,288,Wvl))
#y = interpolate.interp1d(Wvl,wgtO2)
#O2 = np.zeros(len(specwave))
#for i in range(len(specwave)):
# O2[i] = y(specwave[i])
#CIA = ncid.variables['CIAO2AIR'][:]
##CIA = ncid.variables['CIATCCON'][:]
#ncid.close()
else:
h2opath = os.path.join(calidata,str('h2o_lut_HITRAN2020_5e-3cm-1.nc'))
co2path = os.path.join(calidata,str('co2_lut_1200-1750nm_0p02fwhm_1e21vcd_mesat.nc'))
ch4path = os.path.join(calidata,str('ch4_lut_HITRAN2020_5e-3cm-1_g0_update.nc'))
new = Dataset(h2opath, 'r')
TH2O = new.variables['CrossSection'][:,:,:].data
TempH2O = new.variables['Temperature'][:].data
PressH2O = new.variables['Pressure'][:].data
WvlH2O = new.variables['Wavelength'][:].data
WvlH2O = 1.0e7/WvlH2O
WvlH2O = np.flip(WvlH2O)
TH2O = np.flip(TH2O,axis=2)
new.close()
new = Dataset(ch4path, 'r')
TCH4 = new.variables['CrossSection'][:,:,:].data
TempCH4 = new.variables['Temperature'][:].data
PressCH4 = new.variables['Pressure'][:].data
WvlCH4 = new.variables['Wavelength'][:].data
WvlCH4 = 1.0e7/WvlCH4
WvlCH4 = np.flip(WvlCH4)
TCH4 = np.flip(TCH4,axis=2)
new.close()
new = Dataset(co2path, 'r')
TCO2 = new.variables['CrossSection'][:,:,:].data
TempCO2 = new.variables['Temperature'][:].data
PressCO2 = new.variables['Pressure'][:].data
WvlCO2 = new.variables['Wavelength'][:].data
new.close()
fnCH4 = RegularGridInterpolator((PressCH4,TempCH4,WvlCH4), TCH4)
fnCO2 = RegularGridInterpolator((PressCO2,TempCO2,WvlCO2), TCO2)
fnH2O = RegularGridInterpolator((PressH2O,TempH2O,WvlH2O), TH2O)
wgtCH4 = np.zeros(len(WvlCH4))
wgtCO2 = np.zeros(len(WvlCO2))
wgtH2O = np.zeros(len(WvlH2O))
columntotalCH4 = 0.0
columntotalCO2 = 0.0
columntotalH2O = 0.0
if(fitSZA):
for i in range( len(usheight) ):
if(usheight[i] <= ALT):
xCH4 = fnCH4((uspressure[i],ustemperature[i],WvlCH4)) * usCH4[i] * (1.0/np.cos(SZA) + 1.0)
wgtCH4 = wgtCH4 + xCH4
columntotalCH4 = columntotalCH4 + usCH4[i]
###
xCO2 = fnCO2((uspressure[i],ustemperature[i],WvlCO2)) * usCO2[i] * (1.0/np.cos(SZA) + 1.0)
wgtCO2 = wgtCO2 + xCO2
columntotalCO2 = columntotalCO2 + usCO2[i]
###
xH2O = fnH2O((uspressure[i],ustemperature[i],WvlH2O)) * usH2O[i] * (1.0/np.cos(SZA) + 1.0)
wgtH2O = wgtH2O + xH2O
columntotalH2O = columntotalH2O + usH2O[i]
else:
xCH4 = fnCH4((uspressure[i],ustemperature[i],WvlCH4)) * usCH4[i] * (1.0/np.cos(SZA) )
wgtCH4 = wgtCH4 + xCH4
columntotalCH4 = columntotalCH4 + usCH4[i]
###
xCO2 = fnCO2((uspressure[i],ustemperature[i],WvlCO2)) * usCO2[i] * (1.0/np.cos(SZA))
wgtCO2 = wgtCO2 + xCO2
columntotalCO2 = columntotalCO2 + usCO2[i]
###
xH2O = fnH2O((uspressure[i],ustemperature[i],WvlH2O)) * usH2O[i] * (1.0/np.cos(SZA))
wgtH2O = wgtH2O + xH2O
columntotalH2O = columntotalH2O + usH2O[i]
else:
for i in range( len(usheight) ):
if(usheight[i] <= 58.0):
xCH4 = fnCH4((uspressure[i],ustemperature[i],WvlCH4)) * usCH4[i]
wgtCH4 = wgtCH4 + xCH4
columntotalCH4 = columntotalCH4 + usCH4[i]
###
xCO2 = fnCO2((uspressure[i],ustemperature[i],WvlCO2)) * usCO2[i]
wgtCO2 = wgtCO2 + xCO2
columntotalCO2 = columntotalCO2 + usCO2[i]
###
xH2O = fnH2O((uspressure[i],ustemperature[i],WvlH2O)) * usH2O[i]
wgtH2O = wgtH2O + xH2O
columntotalH2O = columntotalH2O + usH2O[i]
wgtCO2 = wgtCO2/columntotalCO2
wgtCH4 = wgtCH4/columntotalCH4
wgtH2O = wgtH2O/columntotalH2O
yCH4 = interpolate.interp1d(WvlCH4,wgtCH4)
CH4 = np.zeros(len(specwave))
yCO2 = interpolate.interp1d(WvlCO2,wgtCO2)
CO2 = np.zeros(len(specwave))
yH2O = interpolate.interp1d(WvlH2O,wgtH2O)
H2O = np.zeros(len(specwave))
for i in range(len(specwave)):
CH4[i] = yCH4(specwave[i])
CO2[i] = yCO2(specwave[i])
H2O[i] = yH2O(specwave[i])
"""
plt.plot(specwave,CH4)
plt.savefig('ch4_start.png')
plt.close()
plt.plot(specwave,CO2)
plt.savefig('co2_start.png')
plt.close()
plt.plot(specwave,H2O)
plt.savefig('h2o_start.png')
plt.close()
"""
##############################
# DEFINE CROSS TRACK POSITIONS TO FIT
counter = 0
if(whichBand == 'CH4'):
headerStr='MethaneAIR_L1B_CH4_'
l1FitISRFPath = os.path.join(l1DataDir,headerStr
+np.min(frameDateTime).strftime('%Y%m%dT%H%M%S')+'_'
+np.max(frameDateTime).strftime('%Y%m%dT%H%M%S')+'_'
+str('ISRF_proxy_fit_CH4.txt'))
f = open(l1FitPath,"w")
g = open(l1FitISRFPath,"w")
else:
headerStr='MethaneAIR_L1B_O2_'
l1FitISRFPath = os.path.join(l1DataDir,headerStr
+np.min(frameDateTime).strftime('%Y%m%dT%H%M%S')+'_'
+np.max(frameDateTime).strftime('%Y%m%dT%H%M%S')+'_'
+str('ISRF_proxy_fit_O2.txt'))
f = open(l1FitPath,"w")
g = open(l1FitISRFPath,"w")
#THE FITTING STARTS HERE
start = 0
#for j in range (pixlimitX[0],pixlimitX[1]):
wavelength_raw = wavelength
for j in range(rad_obs.shape[0]):
xTrack = j
error=False
if (whichBand == 'CH4'):
isrf_lut0 = isrf_lut[xTrack,:,:]
else:
isrf_lut0 = isrf_lut[xTrack,:,:]
# isrf = np.transpose(isrf_lut, [2,1,0] )
# isrf_lut0 = isrf[xTrack,:,:]
#OBSERVED DATA
wavelength_obs = wavelength_raw[xTrack,:].squeeze()
radiance_obs = rad_obs[xTrack,:].squeeze()
if math.isnan( wavelength_obs[pixlimit[0]]):
print(str(xTrack)+ " has a nan values in wvl window")
elif math.isnan( radiance_obs[pixlimit[0]]):
print(str(xTrack)+ " has a nan values in rad window")
elif math.isnan( wavelength_obs[pixlimit[1]]):
print(str(xTrack)+ " has a nan values in wvl window")
elif math.isnan( radiance_obs[pixlimit[1]]):
print(str(xTrack)+ " has a nan values in rad window")
else:
print('FITTING TRACK: '+str(xTrack))
# GET THE GRID OF INTEREST FOR INTERPOLATION
gridwidth = 1*(pixlimit[1] - pixlimit[0])
d = np.linspace(wavelength_obs[pixlimit[0]],wavelength_obs[pixlimit[1]], gridwidth+1 )
##############################
# DEFINE FITTING VARIABLES:
##############################
params = Parameters()
#BASELINE CORRECTION
if(start == 0):
if(whichBand == 'CH4'):
f0 = Parameter('par_f'+str(0),0.1402161187)
params.add(f0)
sc_solar = Parameter('scale_solar',0.00573476*numframes,min=0)
params.add(sc_solar)
f1 = Parameter('baseline0',-0.12)
f2 = Parameter('baseline1',8.1097e-05)
params.add(f1)
params.add(f2)
else:
f0 = Parameter('par_f'+str(0),0.149264)
params.add(f0)
sc_solar = Parameter('scale_solar',0.0153476*numframes,min=0)
params.add(sc_solar)
f1 = Parameter('baseline0', -1.34526326)
f2 = Parameter('baseline1',0.00106887)
#f3 = Parameter('baseline2',-6.3130e-04)
params.add(f1)
params.add(f2)
#params.add(f3)
#b1 = Parameter('al0',1.0)
# b2 = Parameter('al1',0.0)
# b3 = Parameter('al2',0)
# params.add(b1)
# params.add(b2)
# params.add(b3)
if(whichBand == 'CH4'):
sc_co2 = Parameter('scale_co2',6.7378e+22,max=1e24, min=1)
sc_ch4 = Parameter('scale_ch4', 2.9832e+20,max=1e24, min=1)
sc_h2o = Parameter('scale_h2o', 1.5151e+23,max=1e25, min=1)
params.add(sc_co2)
params.add(sc_ch4)
params.add(sc_h2o)
else:
sc_o2 = Parameter('scale_o2',1.13e+25,max=1e28, min=1)
params.add(sc_o2)
#sc_cia = Parameter('scale_cia',8e+24,max=1e28, min=1)
#params.add(sc_cia)
if(fitisrf == True):
if(ISRF == 'SQUEEZE'):
# p1 = Parameter('squeeze',1.0,min=0.6,max=1.3)
# params.add(p1)
wavelength = np.zeros((len(isrf_dw0),len(isrf_w) ))
width = np.ones(len(isrf_w))
sharp = np.ones(len(isrf_w))
for i in range(len(isrf_w)):
wavelength[:,i] = isrf_w[i] + isrf_dw0[:]
d_max = np.max(d)
d_min = np.min(d)
count=0
index = []
for i in range(len(isrf_w)):
if((d_min < wavelength[-1,i]) and (d_max > wavelength[0,i]) ):
index.append(i)
p1 = Parameter('squeeze'+str(count),0.9381123,max=1.2,min=0.8)
params.add(p1)
p2 = Parameter('sharp'+str(count),1.051526,max=1.2,min=0.8)
params.add(p2)
count=count+1
else:
pass
else:
wavelength = np.zeros((len(isrf_dw0),len(isrf_w) ))
width = np.zeros(len(isrf_w))
for i in range(len(isrf_w)):
wavelength[:,i] = isrf_w[i] + isrf_dw0[:]
d_max = np.max(d)
d_min = np.min(d)
if(ISRF == 'GAUSS'):
for i in range(len(isrf_w)):
popt, pcov = optimize.curve_fit(gaussian,isrf_dw0,isrf_lut0[i,:])
width[i] = popt[0]
count=0
index = []
for i in range(len(isrf_w)):
if((d_min < wavelength[-1,i]) and (d_max > wavelength[0,i]) ):
index.append(i)
p1 = Parameter('width'+str(count),0.1445,max=1,min=0.01)
params.add(p1)
count=count+1
else:
pass
elif(ISRF == 'SUPER'):
shape = np.zeros(len(isrf_w))
for i in range(len(isrf_w)):
popt, pcov = optimize.curve_fit(supergaussian,isrf_dw0,isrf_lut0[i,:])
width[i] = popt[0]
shape[i] = popt[1]
isrf_super = np.vstack((width,shape))
count=0
index = []
for i in range(len(isrf_w)):
if((d_min < wavelength[-1,i]) and (d_max > wavelength[0,i]) ):
p1 = Parameter('width'+str(count),width[i],max=1,min=0.01)
p2 = Parameter('shape'+str(count),shape[i],max=5,min=0.01)
params.add(p1)
params.add(p2)
count=count+1
index.append(i)
else:
pass
else:
index = None
#####
# temporary holders for fit parameters - updated upon sucessful iteration
f0CH4=0.0
b0CH4=1.0
b1CH4=0.0
solarCH4=1.0
scaleco2=1e21
scalech4=1e23
scaleh2o=1e21
#######
f0O2=0.0
b0O2=1.0
b1O2=0.0
b2O2=0.0
solarO2=1.0
scaleo2=1e23
else:
if(whichBand == 'CH4'):
f0 = Parameter('par_f'+str(0),f0CH4)
params.add(f0)
sc_solar = Parameter('scale_solar',solarCH4,min=0)
params.add(sc_solar)
f1 = Parameter('baseline0',b0CH4)
f2 = Parameter('baseline1',b1CH4)
params.add(f1)
params.add(f2)
sc_co2 = Parameter('scale_co2',scaleco2,max=1e24, min=1)
sc_ch4 = Parameter('scale_ch4', scalech4,max=1e24, min=1)
sc_h2o = Parameter('scale_h2o', scaleh2o,max=1e25, min=1)
params.add(sc_co2)
params.add(sc_ch4)
params.add(sc_h2o)
else:
f0 = Parameter('par_f'+str(0),f0O2)
params.add(f0)
sc_solar = Parameter('scale_solar',solarO2,min=0)
params.add(sc_solar)
f1 = Parameter('baseline0',b0O2)
f2 = Parameter('baseline1',b1O2)
#f3 = Parameter('baseline2',b2O2)
params.add(f1)
params.add(f2)
#params.add(f3)
sc_o2 = Parameter('scale_o2',scaleo2,max=1e28, min=1)
params.add(sc_o2)
#sc_cia = Parameter('scale_cia',8e+24,max=1e28, min=1)
#params.add(sc_cia)
if(fitisrf == True):
if(ISRF == 'SQUEEZE'):
#p1 = squeeze
# params.add(p1)
for i in range(len(index)):
p1 = Parameter('squeeze'+str(i),width[index[i]],max=1.2,min=0.8)
params.add(p1)
p2 = Parameter('sharp'+str(i),sharp[index[i]],max=1.2,min=0.8)
params.add(p2)
if(ISRF == 'GAUSS'):
for i in range(len(index)):
p1 = Parameter('width'+str(i),width[index[i]],max=1,min=0.01)
params.add(p1)
elif(ISRF == 'SUPER'):
for i in range(len(index)):
p1 = Parameter('width'+str(i),width[index[i]],max=1,min=0.01)
p2 = Parameter('shape'+str(i),shape[index[i]],max=5,min=0.01)
params.add(p1)
else:
index = None
fit_kws={'xtol':float(xtol),'ftol':float(ftol)}
##############################
# FIT THE DATA
#first_fit = time.time()
if(whichBand == 'CH4'):
lsqFit = minimize( spectrumResidual_CH4, params, args=(specwave,CH4,CO2,H2O,\
radiance_obs,wavelength_obs,xTrack,pixlimit,fitisrf,ISRF,isrf_lut0,
isrf_w,isrf_dw0,index,refWavelength,refSolarIrradiance),method='leastsq',max_nfev=1000,**fit_kws)
else:
lsqFit = minimize( spectrumResidual_O2, params, args=(specwave,O2,CIA,\
radiance_obs,wavelength_obs,xTrack,pixlimit,fitisrf,ISRF,isrf_lut0,
isrf_w,isrf_dw0,index,refWavelength,refSolarIrradiance),method='leastsq',max_nfev=1000,**fit_kws)
#second_fit = time.time()
#delta = (second_fit - first_fit)
#print('time for that fit = ', delta)
##############################
##############################
# GET THE WAVELENGTH SHIFTS
##############################
p = np.zeros(1)
for i in range(0,1):
p[i] = lsqFit.params['par_f'+str(i)].value
##############################
##############################
# PRINT THE RESULTS OF THE FIT
print(fit_report(lsqFit))
##############################
if(whichBand == 'O2'):
if((lsqFit.params['scale_o2'].stderr == None) or (lsqFit.params['scale_solar'].stderr == None) or \
(lsqFit.params['par_f'+str(0)].stderr == None) ):
error=True
elif(whichBand == 'CH4'):
if((lsqFit.params['scale_co2'].stderr == None) or (lsqFit.params['scale_solar'].stderr == None) or \
(lsqFit.params['par_f'+str(0)].stderr == None) ):
error=True
else:
pass
print(error,whichBand)
if(whichBand == 'CH4' and error == False):
errorch4 = 100*lsqFit.params['scale_ch4'].stderr/lsqFit.params['scale_ch4'].value
errorco2 = 100*lsqFit.params['scale_co2'].stderr/lsqFit.params['scale_co2'].value
errorh2o = 100*lsqFit.params['scale_h2o'].stderr/lsqFit.params['scale_h2o'].value
errorsolar = 100*lsqFit.params['scale_solar'].stderr/lsqFit.params['scale_solar'].value
errorf0 = 100*lsqFit.params['par_f'+str(0)].stderr/lsqFit.params['par_f'+str(0)].value
errorb0CH4=100.0*lsqFit.params['baseline0'].stderr/lsqFit.params['baseline0'].value
errorb1CH4=100.0*lsqFit.params['baseline1'].stderr/lsqFit.params['baseline1'].value
elif(whichBand == 'O2' and error == False):
erroro2 = 100*lsqFit.params['scale_o2'].stderr/lsqFit.params['scale_o2'].value
#errorcia = 100*lsqFit.params['scale_cia'].stderr/lsqFit.params['scale_cia'].value
errorsolar = 100*lsqFit.params['scale_solar'].stderr/lsqFit.params['scale_solar'].value
errorf0 = 100*lsqFit.params['par_f'+str(0)].stderr/lsqFit.params['par_f'+str(0)].value
errorb0o2=100.0*lsqFit.params['baseline0'].stderr/lsqFit.params['baseline0'].value
errorb1o2=100.0*lsqFit.params['baseline1'].stderr/lsqFit.params['baseline1'].value
#errorb2o2=100.0*lsqFit.params['baseline2'].stderr/lsqFit.params['baseline2'].value
else:
pass
if(fitisrf == 'True'):
if(index == None):
pass
elif(index != None):
widtherr = np.zeros(len(index))
for i in range(len(index)):
if(lsqFit.params['width'+str(i)].stderr == None):
pass
else:
widtherr[i] = 100.0*lsqFit.params['width'+str(i)].stderr/lsqFit.params['width'+str(i)].value
if(whichBand == 'CH4' and error == False):
if( (abs(errorch4) >= 100.0) or (abs(errorco2) >= 100.0) or (abs(errorh2o) >= 100.0) or (abs(errorf0) >= 100.0) or (abs(errorsolar) >= 100.0) ):
error=True
elif(whichBand == 'O2' and error == False):
if( (abs(erroro2) >= 100.0) or (abs(errorf0) >= 100.0) or (abs(errorsolar) >= 100.0) ):
error=True
else:
pass
# STORE FIT RESULTS
if( error == True):
pass
else:
counter = counter + 1
start = 1
if(whichBand == 'CH4'):
f0CH4=lsqFit.params['par_f'+str(0)].value
solarCH4=lsqFit.params['scale_solar'].value
b0CH4=lsqFit.params['baseline0'].value
b1CH4=lsqFit.params['baseline1'].value
scaleco2=lsqFit.params['scale_co2'].value
scalech4=lsqFit.params['scale_ch4'].value
scaleh2o=lsqFit.params['scale_h2o'].value
f.write(str(int(xtrackaggfactor))+' '+str(int(j))+' '+str(f0CH4)+' '+str(solarCH4)+' '+str(b0CH4)+' '+str(b1CH4)+' '+str(scalech4)+' '+str(scaleco2)+' '+str(scaleh2o)+'\n')
f.flush()
if(ISRF=='GAUSS'):
g.write(str(int(xtrackaggfactor))+' '+str(int(j)))
g.flush()
for k in range(len(index)):
g.write(' '+str(index[k])+' '+str(lsqFit.params['width'+str(k)].value))
g.flush()
g.write('\n')
g.flush()
elif(ISRF=='SQUEEZE'):
g.write(str(int(xtrackaggfactor))+' '+str(int(j)))
g.flush()
for k in range(len(index)):
g.write(' '+str(index[k])+' '+str(lsqFit.params['squeeze'+str(k)].value)+' '+str(lsqFit.params['sharp'+str(k)].value))
g.flush()
g.write('\n')
g.flush()
elif(ISRF=='SUPER'):
g.write(str(int(xtrackaggfactor))+' '+str(int(j)))
g.flush()
for k in range(len(index)):
g.write(' '+str(index[k])+' '+str(lsqFit.params['width'+str(k)].value)+' '+str(lsqFit.params['shape'+str(k)].value))
g.flush()
g.write('\n')
g.flush()
else:
f0O2=lsqFit.params['par_f'+str(0)].value
solarO2=lsqFit.params['scale_solar'].value
b0O2=lsqFit.params['baseline0'].value
b1O2=lsqFit.params['baseline1'].value
#b2O2=lsqFit.params['baseline2'].value
scaleo2=lsqFit.params['scale_o2'].value
#f.write(str(int(xtrackaggfactor))+' '+str(int(j))+' '+str(f0O2)+' '+str(solarO2)+' '+str(b0O2)+' '+str(b1O2)+' '+str(b2O2)+' '+str(scaleo2)+'\n')
f.write(str(int(xtrackaggfactor))+' '+str(int(j))+' '+str(f0O2)+' '+str(solarO2)+' '+str(b0O2)+' '+str(b1O2)+' '+str(scaleo2)+'\n')
f.flush()
if(ISRF=='GAUSS'):
g.write(str(int(xtrackaggfactor))+' '+str(int(j)))
g.flush()
for k in range(len(index)):
g.write(' '+str(index[k])+' '+str(lsqFit.params['width'+str(k)].value))
g.flush()
g.write('\n')
g.flush()
elif(ISRF=='SQUEEZE'):
g.write(str(int(xtrackaggfactor))+' '+str(int(j)))
g.flush()
for k in range(len(index)):
g.write(' '+str(index[k])+' '+str(lsqFit.params['squeeze'+str(k)].value)+' '+str(lsqFit.params['sharp'+str(k)].value))
g.flush()
g.write('\n')
g.flush()
elif(ISRF=='SUPER'):
g.write(str(int(xtrackaggfactor))+' '+str(int(j)))
g.flush()
for k in range(len(index)):
g.write(' '+str(index[k])+' '+str(lsqFit.params['width'+str(k)].value)+' '+str(lsqFit.params['shape'+str(k)].value))
g.flush()
g.write('\n')
g.flush()
f.close()
g.close()
time.sleep(30)
return()
###################################################
"""
**************************************************
"""
###################################################
# CH4 CHANNEL FORWARD MODEL
###################################################
def spectrumResidual_CH4(params,specwave,CH4,CO2,H2O,radiance_obs,wavelength_obs,xTrack,pixlimit,fitisrf,ISRF,isrf_lut0,
isrf_w,isrf_dw0,index,refWavelength,refSolarIrradiance):
from scipy import interpolate,optimize
# WAVELENGTH GRID OF INTEREST
gridwidth = pixlimit[1] - pixlimit[0]
d = np.linspace(wavelength_obs[pixlimit[0]],wavelength_obs[pixlimit[1]], gridwidth+1 )
# CONVOLVED SPECTRA IN THE INTERVAL D[:]
a0 = params['baseline0'].value
a1 = params['baseline1'].value
baseline = np.zeros(gridwidth+1)
for i in range(gridwidth+1):
baseline[i] = a0 + (a1 * d[i])
#NOW WE NEED TO CREATE THE SHIFTED SPECTRUM:
newobs_wavelength = np.zeros(gridwidth+1)
# Set up polynomial coefficients to fit ncol<-->wvl
p = np.zeros(1)
for i in range(0,1):
p[i] = params['par_f'+str(i)].value
# UPDATE OBSERVED WAVELENGTHS/SPECTRA
radnew = np.zeros(gridwidth+1)
idx_finite = np.isfinite(radiance_obs)
radinterp = interpolate.interp1d(wavelength_obs[idx_finite],radiance_obs[idx_finite])
Ioriginal = np.zeros(gridwidth+1)
for i in range(gridwidth+1):
newobs_wavelength[i] = d[i] + p[0]
radnew[i] = radinterp(newobs_wavelength[i])
Ioriginal[i] = radinterp(d[i])
if(fitisrf == True):
if( ISRF == 'SQUEEZE'):
width=np.ones(len(isrf_w))
sharp=np.ones(len(isrf_w))
isrf_dw0_new = np.zeros((len(isrf_w),len(isrf_dw0)) )
#CREATE NEW dw0 grids
for i in range(len(isrf_w)):
isrf_dw0_new[i,:] = isrf_dw0
for i in range(len(index)):
width[index[i]] = params['squeeze'+str(i)].value
sharp[index[i]] = params['sharp'+str(i)].value
fwhm0 = FWHM(isrf_dw0,isrf_lut0[index[i],:])
isrf_lut0[index[i],:] = (isrf_lut0[index[i],:])** sharp[index[i]]
fwhm1 = FWHM(isrf_dw0,isrf_lut0[index[i],:])
stretchfactor = fwhm0/fwhm1
isrf_dw0_new[index[i],:] = isrf_dw0 * width[index[i]]*stretchfactor
ISOLAR = F_isrf_convolve_fft(refWavelength,refSolarIrradiance,newobs_wavelength,isrf_w,isrf_dw0_new,isrf_lut0,ISRF,fitisrf)
ICH4 = F_isrf_convolve_fft(specwave,CH4,newobs_wavelength,isrf_w,isrf_dw0_new,isrf_lut0,ISRF,fitisrf)
ICO2 = F_isrf_convolve_fft(specwave,CO2,newobs_wavelength,isrf_w,isrf_dw0_new,isrf_lut0,ISRF,fitisrf)
IH2O = F_isrf_convolve_fft(specwave,H2O,newobs_wavelength,isrf_w,isrf_dw0_new,isrf_lut0,ISRF,fitisrf)
elif( ISRF == 'GAUSS'):
width=np.zeros(len(isrf_w))
for i in range(len(isrf_w)):
popt, pcov = optimize.curve_fit(gaussian,isrf_dw0,isrf_lut0[i,:])
width[i] = popt[0]
for i in range(len(index)):
width[index[i]] = params['width'+str(i)].value
ISOLAR = F_isrf_convolve_fft(refWavelength,refSolarIrradiance,newobs_wavelength,isrf_w,isrf_dw0,width,ISRF,fitisrf)
ICH4 = F_isrf_convolve_fft(specwave,CH4,newobs_wavelength,isrf_w,isrf_dw0,width,ISRF,fitisrf)
ICO2 = F_isrf_convolve_fft(specwave,CO2,newobs_wavelength,isrf_w,isrf_dw0,width,ISRF,fitisrf)
IH2O = F_isrf_convolve_fft(specwave,H2O,newobs_wavelength,isrf_w,isrf_dw0,width,ISRF,fitisrf)
else:
shape = np.zeros(len(isrf_w))
for i in range(len(isrf_w)):
popt, pcov = optimize.curve_fit(supergaussian,isrf_dw0,isrf_lut0[i,:])
width[i] = popt[0]
shape[i] = popt[1]
isrf_super = np.vstack((width,shape))
for i in range(len(index)):
width[index[i]] = params['width'+str(i)].value
shape[index[i]] = params['shape'+str(i)].value
#UPDATE ISRF WITH NEW SUERGAUSSIAN PARAMETERS
isrf_super[0,index[i]] = width[index[i]]
isrf_super[1,index[i]] = shape[index[i]]
ISOLAR = F_isrf_convolve_fft(refWavelength,refSolarIrradiance,newobs_wavelength,isrf_w,isrf_dw0,isrf_super,ISRF,fitisrf)
ICH4 = F_isrf_convolve_fft(specwave,CH4,newobs_wavelength,isrf_w,isrf_dw0,isrf_super,ISRF,fitisrf)
ICO2 = F_isrf_convolve_fft(specwave,CO2,newobs_wavelength,isrf_w,isrf_dw0,isrf_super,ISRF,fitisrf)
IH2O = F_isrf_convolve_fft(specwave,H2O,newobs_wavelength,isrf_w,isrf_dw0,isrf_super,ISRF,fitisrf)
else:
ISOLAR = F_isrf_convolve_fft(refWavelength,refSolarIrradiance,newobs_wavelength,isrf_w,isrf_dw0,isrf_lut0,ISRF,fitisrf)
ICH4 = F_isrf_convolve_fft(specwave,CH4,newobs_wavelength,isrf_w,isrf_dw0,isrf_lut0,ISRF,fitisrf)
ICO2 = F_isrf_convolve_fft(specwave,CO2,newobs_wavelength,isrf_w,isrf_dw0,isrf_lut0,ISRF,fitisrf)
IH2O = F_isrf_convolve_fft(specwave,H2O,newobs_wavelength,isrf_w,isrf_dw0,isrf_lut0,ISRF,fitisrf)
#InewFIT = interpolate.splrep(newobs_wavelength,radnew)
#Inew = interpolate.splev(newobs_wavelength,InewFIT,der=0)
#CREATE SIMULATED SPECTRA
ISOLAR = ISOLAR * params['scale_solar'].value
ICH4 = np.exp(-ICH4 * params['scale_ch4'].value)
ICO2 = np.exp(-ICO2 * params['scale_co2'].value )
IH2O = np.exp(-IH2O * params['scale_h2o'].value )
Isim = np.zeros(gridwidth+1)
Isim = (ICH4 + ICO2 + IH2O ) * ISOLAR + baseline
residual = np.zeros(gridwidth + 1)
residual = Isim - Ioriginal
return ( residual)
###################################################
"""
**************************************************
"""
###################################################
# O2 CHANNEL FORWARD MODEL
###################################################
def spectrumResidual_O2(params,specwave,O2,CIA,radiance_obs,wavelength_obs,xTrack,pixlimit,fitisrf,ISRF,isrf_lut0,
isrf_w,isrf_dw0,index,refWavelength,refSolarIrradiance):
from scipy import interpolate,optimize
gridwidth = 1*(pixlimit[1] - pixlimit[0])
d = np.linspace(wavelength_obs[pixlimit[0]],wavelength_obs[pixlimit[1]], gridwidth+1 )
a0 = params['baseline0'].value
a1 = params['baseline1'].value
#a2 = params['baseline2'].value
# b1 = params['al1'].value
# b2 = params['al2'].value
# albedo = np.zeros(width+1)
baseline = np.zeros(gridwidth+1)
for i in range(gridwidth+1):
# albedo[i] = 1.0 + (b1 * (d[i] - 1250)) + (b2 * (d[i] - 1250.0)**2)
#baseline[i] = a0 + (a1 * d[i]) + (a2*d[i]*d[i])
baseline[i] = a0 + (a1 * d[i]) #+ (a2*d[i]*d[i])
#NOW WE NEED TO CREATE THE SHIFTED SPECTRUM:
newobs_wavelength = np.zeros(gridwidth+1)
# Set up polynomial coefficients to fit ncol<-->wvl
p = np.zeros(1)
for i in range(0,1):
p[i] = params['par_f'+str(i)].value
# UPDATE OBSERVED WAVELENGTHS/SPECTRA
radnew = np.zeros(gridwidth+1)
idx_finite = np.isfinite(radiance_obs)
IobsFIT = interpolate.splrep(wavelength_obs[idx_finite],radiance_obs[idx_finite])
Ioriginal = interpolate.splev(d,IobsFIT,der=0)
radinterp = interpolate.interp1d(wavelength_obs[idx_finite],radiance_obs[idx_finite])
for i in range(gridwidth+1):
newobs_wavelength[i] = d[i] + p[0]
radnew = radinterp(newobs_wavelength)
#InewFIT = interpolate.splrep(newobs_wavelength,radnew)
#Inew = interpolate.splev(newobs_wavelength,InewFIT,der=0)
if(fitisrf == True):
if( ISRF == 'SQUEEZE'):
width=np.ones(len(isrf_w))
sharp=np.ones(len(isrf_w))
isrf_dw0_new = np.zeros((len(isrf_w),len(isrf_dw0)) )
#CREATE NEW dw0 grids
for i in range(len(isrf_w)):
isrf_dw0_new[i,:] = isrf_dw0
for i in range(len(index)):
width[index[i]] = params['squeeze'+str(i)].value
sharp[index[i]] = params['sharp'+str(i)].value
fwhm0 = FWHM(isrf_dw0,isrf_lut0[index[i],:])
isrf_lut0[index[i],:] = (isrf_lut0[index[i],:])** sharp[index[i]]
fwhm1 = FWHM(isrf_dw0,isrf_lut0[index[i],:])
stretchfactor = fwhm0/fwhm1
isrf_dw0_new[index[i],:] = isrf_dw0 * width[index[i]]*stretchfactor
ISOLAR = F_isrf_convolve_fft(refWavelength,refSolarIrradiance,newobs_wavelength,isrf_w,isrf_dw0_new,isrf_lut0,ISRF,fitisrf)
IO2 = F_isrf_convolve_fft(specwave,O2,newobs_wavelength,isrf_w,isrf_dw0_new,isrf_lut0,ISRF,fitisrf)
ICIA = F_isrf_convolve_fft(specwave,CIA,newobs_wavelength,isrf_w,isrf_dw0_new,isrf_lut0,ISRF,fitisrf)
elif( ISRF == 'GAUSS'):
width=np.zeros(len(isrf_w))
for i in range(len(isrf_w)):
popt, pcov = optimize.curve_fit(gaussian,isrf_dw0,isrf_lut0[i,:])
width[i] = popt[0]
for i in range(len(index)):
width[index[i]] = params['width'+str(i)].value
ISOLAR = F_isrf_convolve_fft(refWavelength,refSolarIrradiance,newobs_wavelength,isrf_w,isrf_dw0,width,ISRF,fitisrf)
IO2 = F_isrf_convolve_fft(specwave,O2,newobs_wavelength,isrf_w,isrf_dw0,width,ISRF,fitisrf)
ICIA = F_isrf_convolve_fft(specwave,CIA,newobs_wavelength,isrf_w,isrf_dw0,width,ISRF,fitisrf)
else:
shape = np.zeros(len(isrf_w))
for i in range(len(isrf_w)):
popt, pcov = optimize.curve_fit(supergaussian,isrf_dw0,isrf_lut0[i,:])
width[i] = popt[0]
shape[i] = popt[1]
isrf_super = np.vstack((width,shape))
for i in range(len(index)):
width[index[i]] = params['width'+str(i)].value
shape[index[i]] = params['shape'+str(i)].value
#UPDATE ISRF WITH NEW SUERGAUSSIAN PARAMETERS
isrf_super[0,index[i]] = width[index[i]]
isrf_super[1,index[i]] = shape[index[i]]
ISOLAR = F_isrf_convolve_fft(refWavelength,refSolarIrradiance,newobs_wavelength,isrf_w,isrf_dw0,isrf_super,ISRF,fitisrf)
ICIA = F_isrf_convolve_fft(specwave,CIA,newobs_wavelength,isrf_w,isrf_dw0,isrf_super,ISRF,fitisrf)
IO2 = F_isrf_convolve_fft(specwave,O2,newobs_wavelength,isrf_w,isrf_dw0,isrf_super,ISRF,fitisrf)
else:
ISOLAR = F_isrf_convolve_fft(refWavelength,refSolarIrradiance,newobs_wavelength,isrf_w,isrf_dw0,isrf_lut0,ISRF,fitisrf)
IO2 = F_isrf_convolve_fft(specwave,O2,newobs_wavelength,isrf_w,isrf_dw0,isrf_lut0,ISRF,fitisrf)
ICIA = F_isrf_convolve_fft(specwave,CIA,newobs_wavelength,isrf_w,isrf_dw0,isrf_lut0,ISRF,fitisrf)
#CREATE SIMULATED SPECTRA
ISOLAR = ISOLAR * params['scale_solar'].value
IO2 = np.exp(-IO2 * params['scale_o2'].value )
ICIA = np.exp(-ICIA * (params['scale_o2'].value**2)/1E20 ) #np.exp(-ICIA* params['scale_cia'].value )
Isim = np.zeros(gridwidth+1)
Isim = (( IO2 + ICIA ) *ISOLAR) + baseline
residual = Isim - Ioriginal
#plt.plot(newobs_wavelength,Ioriginal,label='Iobs')
#plt.plot(newobs_wavelength,Isim,label='Isim')
#plt.legend()
#plt.savefig('test_o2_fit.png')
#plt.close()
#exit()
return ( residual)
| conwayek/MethaneAIR_L0-L1B | wavecal_routines.py | wavecal_routines.py | py | 52,743 | python | en | code | 1 | github-code | 50 |
6302595863 | import pytest
from FeedAutofocus import Client, fetch_indicators_command, get_indicators_command
from CommonServerPython import *
INDICATORS = [
"d4da1b2d5554587136f2bcbdf0a6a1e29ab83f1d64a4b2049f9787479ad02fad",
"19.117.63.253",
"19.117.63.253:8080",
"domaintools.com",
"flake8.pycqa.org/en/latest",
"19.117.63.253/28",
"2001:db8:85a3:8d3:1319:8a2e:370:7348",
"2001:db8:85a3:8d3:1319:8a2e:370:7348/32",
"19.117.63.253:28/other/path"
]
TYPES = [
"File",
"IP",
"URL",
"Domain",
"URL",
"CIDR",
"IPv6",
"IPv6CIDR",
"URL"
]
@pytest.fixture()
def auto_focus_client():
return Client(api_key="a", insecure=False, proxy=None, indicator_feeds=['Daily Threat Feed'])
def test_url_format(auto_focus_client):
url1 = "https://autofocus.paloaltonetworks.com/IOCFeed/{ID}/{Name}"
url2 = "autofocus.paloaltonetworks.com/IOCFeed/{ID2}/{Name2}"
assert auto_focus_client.url_format(url1) == "https://autofocus.paloaltonetworks.com/api/v1.0/IOCFeed/{ID}/{Name}"
assert auto_focus_client.url_format(url2) == "https://autofocus.paloaltonetworks.com/api/v1.0/IOCFeed/{ID2}/{Name2}"
@pytest.mark.parametrize('tlp_color', ['', None, 'AMBER'])
def test_feed_tags_param(mocker, auto_focus_client, tlp_color):
"""Unit test
Given
- fetch indicators command
- command args
- command raw response
- tlp_color
When
- mock the feed tags param.
- mock the Client's daily_http_request.
Then
- run the fetch incidents command using the Client
Validate The value of the tags field.
Validate the value of trafficlightprotocol incident field.
"""
mocker.patch.object(auto_focus_client, 'daily_custom_http_request', return_value=INDICATORS)
indicators = fetch_indicators_command(auto_focus_client, ['test_tag'], tlp_color)
assert indicators[0].get('fields').get('tags') == ['test_tag']
if tlp_color:
assert indicators[0].get('fields').get('trafficlightprotocol') == tlp_color
else:
assert not indicators[0].get('fields').get('trafficlightprotocol')
INDICATORS_CLASSIFICATION_DATA = [
(
"1.1.1.1/path", FeedIndicatorType.URL
),
(
"1.1.1.1:8080", FeedIndicatorType.URL
),
(
"19.117.63.253:28/other/path", FeedIndicatorType.URL
),
(
"19.117.63.253:28/path", FeedIndicatorType.URL
),
(
'1.1.1.1/7', FeedIndicatorType.CIDR
),
(
"1.1.1.1/7/server/somestring/something.php?fjjasjkfhsjasofds=sjhfhdsfhasld", FeedIndicatorType.URL
),
(
"1.1.1.1/7/server", FeedIndicatorType.URL
),
(
"d4da1b2d5554587136f2bcbdf0a6a1e29ab83f1d64a4b2049f9787479ad02fad", FeedIndicatorType.File
),
(
"domaintools.com", FeedIndicatorType.Domain
),
(
"test.test.com", FeedIndicatorType.Domain
),
(
"flake8.pycqa.org/en/latest", FeedIndicatorType.URL
),
(
"19.117.63.253/28", FeedIndicatorType.CIDR,
),
(
"2001:db8:85a3:8d3:1319:8a2e:370:7348", FeedIndicatorType.IPv6
),
(
"2001:db8:85a3:8d3:1319:8a2e:370:7348/path/path", FeedIndicatorType.URL
),
(
"2001:db8:85a3:8d3:1319:8a2e:370:7348/32", FeedIndicatorType.IPv6CIDR
),
(
"2001:db8:85a3:8d3:1319:8a2e:370:7348/path", FeedIndicatorType.URL
),
(
"2001:db8:85a3:8d3:1319:8a2e:370:7348/32/path", FeedIndicatorType.URL
)
]
@pytest.mark.parametrize('indicator, expected_indicator_type', INDICATORS_CLASSIFICATION_DATA)
def test_indicator_classified_to_the_correct_type(mocker, auto_focus_client, indicator, expected_indicator_type):
"""
Given
- an indicator as string.
When
- trying to find the indicator type.
Then
- the indicator is classified correctly.
"""
mocker.patch.object(auto_focus_client, 'daily_custom_http_request')
assert auto_focus_client.find_indicator_type(indicator=indicator) == expected_indicator_type
def test_get_indicators_command(mocker, auto_focus_client):
"""
Given
- indicators list
When
- getting all the indicators
Then
- make sure the indicator type and values are returned correctly.
"""
mocker.patch.object(auto_focus_client, 'daily_custom_http_request', return_value=INDICATORS)
_, _, indicators = get_indicators_command(auto_focus_client, {}, feed_tags=[], tlp_color=None)
for i in range(0, 9):
assert indicators[i]['type'] == TYPES[i]
assert indicators[i]['value'] in INDICATORS[i]
| demisto/content | Packs/AutoFocus/Integrations/FeedAutofocus/FeedAutofocus_test.py | FeedAutofocus_test.py | py | 4,567 | python | en | code | 1,023 | github-code | 50 |
3765447154 | from typing import *
class Solution:
def xorOperation(self, n: int, start: int) -> int:
"""
Time: O(n)
Space: O(1)
"""
result = start
nums = [start]
for i in range(1, n):
nums.append(start + 2*i)
for i in range(1, n):
result ^= nums[i]
return result | rajpatel5/LeetCode | Python Solutions/Easy/1486.py | 1486.py | py | 398 | python | en | code | 0 | github-code | 50 |
45567581669 | import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
BayesianRidge,
)
from sklearn.svm import SVR, SVC
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import (
accuracy_score,
roc_auc_score,
confusion_matrix,
classification_report,
)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.pipeline import make_pipeline
import pickle
import codecs
import os
from log import client_logger
import json
def overview(ctx):
def detail(vn):
v = ctx.get(vn)
if v is not None:
client_logger.debug(f"{vn} = {v}, shape {np.shape(v)}")
else:
client_logger.debug(f"{vn} = {v}")
vns = [
"X",
"y",
"X_train",
"y_train",
"X_test",
"y_test",
"cv_score",
"mean_accuracy_score",
"y_pred",
]
client_logger.debug(f"ins_id = {ctx.get('ins_id')}")
for vn in vns:
detail(vn)
def configure(ctx, ins_id, opts: dict):
start_time = opts["time"]
interval = opts["sample_interval_nsecs"] + opts["round_interval_nsecs"]
X = []
uts = opts["result"]
for i in range(len(uts)):
X.append(start_time + i * interval)
X = np.array([(x, y) for x, y in zip(X, uts)])
# if "X" in ctx:
# X = np.vstack((ctx["X"], X))
y = []
for ut in np.array(uts):
if ut >= 10:
y.append(1) # busy
elif ut >= 1 and ut < 10:
# either busy or idle
y.append(int.from_bytes(os.urandom(1), "little") & 1)
else:
y.append(0) # idle
y = np.array(y)
# if "y" in ctx:
# y = np.hstack((ctx["y"], y))
ctx["X"] = X
ctx["y"] = y
return ctx
def train(ctx):
# Prepare dataset
if ctx["offline"]:
X_train = load_from_local(ctx["ins_id"], "X_train")
y_train = load_from_local(ctx["ins_id"], "y_train")
else:
X_train = ctx["X"]
y_train = ctx["y"]
# X_train, X_test, y_train, y_test = train_test_split(X, y)
ctx["X_train"] = X_train
ctx["y_train"] = y_train
# ctx["X_test"] = X_test
# ctx["y_test"] = y_test
# Training with different models
models = {
# "linear": LinearRegression(),
"svc": SVC(kernel="poly"),
"logistic": LogisticRegression(solver="liblinear"),
"knn": KNeighborsClassifier(n_neighbors=6),
"ann": MLPClassifier(activation="relu", alpha=0.005)
# "bayesian": BayesianRidge(),
}
ctx["models"] = {}
for model_name, model in models.items():
# client_logger.info(f"model {model_name} is training ...")
model.fit(X_train, y_train)
ctx["models"][model_name] = pickle.dumps(model)
return ctx
def cv_score(ctx):
X_train = ctx["X_train"]
y_train = ctx["y_train"]
ins_id = ctx["ins_id"]
ctx["cv_score"] = {}
for model_name, model in ctx["models"].items():
model = pickle.loads(model)
model_score = np.mean(cross_val_score(model, X_train, y_train))
client_logger.info(
f"ins {ins_id} model {model_name} cv_score {model_score}"
)
ctx["cv_score"][model_name] = model_score
return ctx
def predict(ctx):
ins_id = ctx["ins_id"]
if ctx["offline"]:
X_test = load_from_local(ins_id, "X_test")
y_test = load_from_local(ins_id, "y_test")
else:
X_test = ctx["X_test"]
y_test = ctx["y_test"]
ctx["y_pred"] = {}
ctx["mean_accuracy_score"] = {}
ctx["roc_auc_score"] = {}
ctx["confusion_matrix"] = {}
ctx["accuracy"] = {}
ctx["clf_report"] = {}
ctx["clf_report_str"] = {}
for model_name, model in ctx["models"].items():
model = pickle.loads(model)
y_pred = model.predict(X_test)
model_score = model.score(X_test, y_test)
ctx["mean_accuracy_score"][model_name] = model_score
try:
ras = roc_auc_score(y_test, y_pred)
except ValueError:
ras = 0
try:
acc_score = accuracy_score(y_test, y_pred)
except ValueError:
acc_score = 0
ctx["roc_auc_score"][model_name] = ras
cm = confusion_matrix(y_test, y_pred)
ctx["confusion_matrix"][model_name] = cm
ctx["accuracy"][model_name] = acc_score
clf_report = classification_report(y_test, y_pred, output_dict=True)
ctx["clf_report"][model_name] = clf_report
clf_report_str = classification_report(y_test, y_pred)
ctx["clf_report_str"][model_name] = clf_report_str
mean_accuracy_score_on_train = ctx["mean_accuracy_score"][model_name]
client_logger.info(
" ".join(
[
f"ins {ins_id} model {model_name}",
f"mean_accuracy_score on train {mean_accuracy_score_on_train}",
f"mean_accuracy_score on test {model_score}",
f"accuracy {acc_score}",
f"roc_auc_score {ras}",
f"clf_report {clf_report}",
f"confiusion_matrx {cm}",
]
)
)
y_pred = np.round(y_pred)
ctx["y_pred"][model_name] = y_pred
save(ctx)
return ctx
def save(ctx):
def gen_fname(stem, suffix=".txt"):
prefix = f"{ctx['ins_id']}"
os.makedirs(f"data/{prefix}", exist_ok=True)
return f"data/{prefix}/{stem}{suffix}"
np.savetxt(gen_fname("X_train"), ctx["X_train"])
np.savetxt(gen_fname("y_train"), ctx["y_train"])
np.savetxt(gen_fname("y_test"), ctx["y_test"])
np.savetxt(gen_fname("X_test"), ctx["X_test"])
for model in ctx["models"].keys():
np.save(gen_fname(f"model-{model}", ".npy"), ctx["models"][model])
with open(gen_fname(f"clf_report-{model}.json"), "w") as f:
json.dump(ctx['clf_report'][model], f, indent=2)
for metric in [
"y_pred",
"mean_accuracy_score",
"cv_score",
"roc_auc_score",
"confusion_matrix",
"accuracy",
]:
stem = f"{metric}-{model}"
y = ctx[metric][model]
if len(np.shape(ctx[metric][model])) == 0:
y = np.array([y])
np.savetxt(gen_fname(stem), y)
def select_best(ctx):
models = [k for k in ctx["models"].keys()]
evaluations = []
for model in models:
evaluations.append(
{
"ins_id": ctx["ins_id"],
"model": model,
"mean_accuracy_score": ctx["mean_accuracy_score"][model],
"roc_auc_score": ctx["roc_auc_score"][model],
}
)
client_logger.info(f"ins_id = {ctx['ins_id']}")
for evaluation in evaluations:
evaluation["total"] = (
evaluation["mean_accuracy_score"] + evaluation["roc_auc_score"]
)
best_score = sorted(evaluations, key=lambda item: item["total"])[-1]
return (best_score["model"], best_score["mean_accuracy_score"])
def load_from_local(ins_id: str, selector, prefix=None, suffix=".txt"):
if prefix is None:
prefix = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "..", "data"
)
instances = []
for p in os.listdir(prefix):
if os.path.isdir(prefix + p):
instances.append(p)
if ins_id not in instances:
return None
path = os.path.join(prefix, ins_id)
if type(selector) == str:
path = os.path.join(path, selector, suffix)
return np.loadtxt(path)
elif type(selector) == list:
ret = []
for token in selector:
ret.append(np.loadtxt(os.path.join(path, token, suffix)))
return ret
else:
return None
| t1anchen/peer-py | client/e2e/ml.py | ml.py | py | 8,012 | python | en | code | 1 | github-code | 50 |
71786589594 | # 내가 푼 것.
n, k = map(int, input().split())
cnt = [[0] * 201 for _ in range(201)] # 0으로 채우면 최소 1개는 나옴.
cnt[0] = [0] + [1] * 200
for i in range(1, 201):
cnt[i][1] = 1
for i in range(1, 201):
for j in range(1, 201):
for h in range(i+1):
cnt[i][j] += cnt[i - h][j - 1]
cnt[i][j] %= 1000000000
print(cnt[n][k])
# 다른 사람 코드 참고.(훨씬 빠름)
n, k = map(int, input().split())
dp = [[0] * (k + 1) for _ in range(n + 1)]
dp[0][0] = 1
for i in range(n + 1):
for j in range(1, k + 1):
dp[i][j] = (dp[i - 1][j] + dp[i][j - 1]) % 1000000000 # ***
print(dp[n][k])
# *** 부분 설명 : n을 k개의 수를 더해서 표현 가능한 경우의 수는
# k개의 수를 더해서 n-1을 나타낸 경우의 수 + k-1개의 수를 더해서 n을 나타낸 경우의 수이다.
# 말 그대로 k개의 수를 더해 n-1을 나타낸 경우의 수의 의미는 그냥 모든 경우에 1만 더해주면 n을 k개의 수를 더한 경우가 되기 떄문.
# 두 번째는 n개를 k-1개로 나타낸 경우마다 0을 더하는 것이아닌 추가해 주면 완성. | JH-TT/Coding_Practice | BaekJoon/Dynamic/2225.py | 2225.py | py | 1,165 | python | ko | code | 0 | github-code | 50 |
35995079594 | from django import forms
from core.constants import *
from core.models import Coach, StartingLineup
from core.utilities import populate_quarter
class PlayerEntryForm(forms.Form):
profile_image = forms.ImageField(required=False)
player_number = forms.IntegerField(min_value=0, max_value=99)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
position = forms.CharField(widget=forms.Select(choices=POSITION_CHOICES))
class_standing = forms.CharField(
widget=forms.Select(choices=CLASS_STANDING_CHOICES), required=False)
weight_pounds = forms.IntegerField(min_value=0, required=False)
height_feet = forms.IntegerField(min_value=0, required=False)
height_inches = forms.IntegerField(min_value=0, required=False)
major = forms.CharField(max_length=100, required=False)
hometown = forms.CharField(max_length=100, required=False)
def roster_player_form_factory(request, roster=None, **kwargs):
# If the user just lands on the page with a GET request, return an empty form.
if request.method == "GET":
return PlayerEntryForm(**kwargs)
# Determine all the valid player numbers for the roster.
player_numbers = [player.player_number for player in
roster.player_set.iterator()]
class __PlayerEntryForm(PlayerEntryForm):
def clean_player_number(self):
if self.cleaned_data["player_number"] in player_numbers:
self.add_error("player_number", forms.ValidationError(
"Added player is already in the selected roster!"))
if "player_number" in self.cleaned_data:
return self.cleaned_data["player_number"]
else:
return
# Return with the new form and pass it the POST request.
return __PlayerEntryForm(request.POST, **kwargs)
class RosterEntryForm(forms.Form):
school = forms.CharField(max_length=100)
team_name = forms.CharField(max_length=50)
# Create a form that restricts available players to those within
# the coach's roster.
def starting_lineup_form_factory(request, default=False):
# Get this coach user's roster.
coach = Coach.objects.filter(user=request.user).first()
roster = coach.roster
player_set = roster.player_set.all()
# Partition the roster into four core positions.
attack_ids, mid_ids, defend_ids, goalie_ids = [], [], [], []
for player in player_set:
if player.position in "ATT":
attack_ids.append(player.id)
elif player.position in ["MID", "SSDM", "LSM", "FOGO"]:
mid_ids.append(player.id)
elif player.position in "DEF":
defend_ids.append(player.id)
else:
goalie_ids.append(player.id)
# Convert to querysets.
attack_set = player_set.filter(id__in=attack_ids)
mid_set = player_set.filter(id__in=mid_ids)
defend_set = player_set.filter(id__in=defend_ids)
goalie_set = player_set.filter(id__in=goalie_ids)
# Create the form using the created querysets.
# A lacrosse team can only have ten players on the field.
class StartingLineupForm(forms.Form):
attackmen = forms.ModelMultipleChoiceField(queryset=attack_set,
help_text="Ctrl Click - Select 3 Attackmen")
midfielders = forms.ModelMultipleChoiceField(queryset=mid_set,
help_text="Ctrl Click - Select 3 Midfielders")
defensemen = forms.ModelMultipleChoiceField(queryset=defend_set,
help_text="Ctrl Click - Select 3 Defensemen")
goalie = forms.ModelChoiceField(widget=forms.Select,
queryset=goalie_set)
forms.CharField(widget=forms.HiddenInput(), required=False)
def clean_attackmen(self):
attackmen = self.cleaned_data["attackmen"]
if not default:
if len(attackmen) < 3 or len(attackmen) > 3:
self.add_error("attackmen", forms.ValidationError(
"You must select exactly three attackmen."))
return attackmen
def clean_midfielders(self):
midfielders = self.cleaned_data["midfielders"]
if not default:
if len(midfielders) < 3 or len(midfielders) > 3:
self.add_error("midfielders", forms.ValidationError(
"You must select exactly three midfielders."))
return midfielders
def clean_defensemen(self):
defensemen = self.cleaned_data["defensemen"]
if not default:
if len(defensemen) < 3 or len(defensemen) > 3:
self.add_error("defensemen", forms.ValidationError(
"You must select exactly three defensemen."))
return defensemen
def clean(self):
# Populate the list of substitutes players with those remaining.
cleaned_data = super().clean()
attackmen = list(self.cleaned_data["attackmen"])
midfielders = list(self.cleaned_data["midfielders"])
defensemen = list(self.cleaned_data["defensemen"])
goalie = [self.cleaned_data["goalie"]]
starting_players = attackmen + midfielders + defensemen + goalie
cleaned_data["substitutes"] = [player for player in player_set if
player not in starting_players]
return cleaned_data
if default:
form = StartingLineupForm()
else:
form = StartingLineupForm(request.POST)
return form
# Define Scorebook modal forms below.
class CreateScorebookForm(forms.Form):
home_school = forms.CharField(max_length=100)
home_team_name = forms.CharField(max_length=50)
visiting_school = forms.CharField(max_length=100)
visiting_team_name = forms.CharField(max_length=50)
# time_created = forms.TimeField()
class ScorebookScoreForm(forms.Form):
minutes = forms.IntegerField(min_value=0, max_value=90)
seconds = forms.IntegerField(min_value=0, max_value=59)
quarter = forms.CharField(widget=forms.HiddenInput(), required=False)
goal_jersey = forms.IntegerField(min_value=0)
assist_jersey = forms.IntegerField(min_value=0, required=False)
def clean(self):
cleaned_data = super(ScorebookScoreForm, self).clean()
return populate_quarter(cleaned_data)
def running_score_form_factory(request, scorebook=None, roster=None, **kwargs):
# If the user just lands on the page with a GET request, return an empty form.
if request.method == "GET":
return ScorebookScoreForm(**kwargs)
# Parse the POST request to see if the home/visiting roster should be used.
if roster:
roster = roster
elif "home" in str(request.POST).lower():
roster = scorebook.home_coach.roster
else:
roster = scorebook.visiting_coach.roster
# Determine all the valid player numbers for the roster.
player_numbers = [player.player_number for player in
roster.player_set.iterator()]
# Define a child of the ScoreForm which validates the player numbers.
class __ScorebookScoreForm(ScorebookScoreForm):
def clean_goal_jersey(self):
if self.cleaned_data["goal_jersey"] not in player_numbers:
self.add_error("goal_jersey", forms.ValidationError(
"Goal jersey is not in the selected roster!"))
if "goal_jersey" in self.cleaned_data:
return self.cleaned_data["goal_jersey"]
else:
return
def clean_assist_jersey(self):
if self.cleaned_data["assist_jersey"] is None:
return
elif self.cleaned_data["assist_jersey"] not in player_numbers:
self.add_error("assist_jersey", forms.ValidationError(
"Assist jersey is not in the selected roster!"))
if "assist_jersey" in self.cleaned_data:
return self.cleaned_data["assist_jersey"]
else:
return
def clean(self):
cleaned_data = super().clean()
if "goal_jersey" in cleaned_data and "assist_jersey" in cleaned_data:
if cleaned_data["assist_jersey"] == cleaned_data[
"goal_jersey"] is not None:
self.add_error("assist_jersey", forms.ValidationError(
"The same player cannot be marked as the goal and assist jersey!"))
if not self.errors:
if cleaned_data["goal_jersey"]:
player = roster.player_set.filter(
player_number=self.cleaned_data[
"goal_jersey"]).first()
player.statistics.shots += 1
player.statistics.goals += 1
player.statistics.save()
if cleaned_data["assist_jersey"]:
player = roster.player_set.filter(
player_number=self.cleaned_data[
"assist_jersey"]).first()
player.statistics.assists += 1
player.statistics.save()
# Set quarter played.
players = [
roster.player_set.filter(
player_number=self.cleaned_data["goal_jersey"]).first(),
roster.player_set.filter(
player_number=self.cleaned_data[
"assist_jersey"]).first()
]
for player in players:
if player:
if cleaned_data["quarter"] == "I":
player.statistics.first_quarter = True
elif cleaned_data["quarter"] == "II":
player.statistics.second_quarter = True
elif cleaned_data["quarter"] == "III":
player.statistics.third_quarter = True
elif cleaned_data["quarter"] == "IV":
player.statistics.fourth_quarter = True
elif cleaned_data["quarter"] == "OT":
player.statistics.overtime = True
player.statistics.save()
# Return with the new form and pass it the POST request.
return __ScorebookScoreForm(request.POST, **kwargs)
# Abstract Penalty Form.
class ScorebookPenaltyForm(forms.Form):
minutes = forms.IntegerField(min_value=0, max_value=90)
seconds = forms.IntegerField(min_value=0, max_value=59)
quarter = forms.CharField(widget=forms.HiddenInput(), required=False)
player_number = forms.IntegerField(min_value=0)
infraction = forms.CharField()
def clean(self):
cleaned_data = super(ScorebookPenaltyForm, self).clean()
return populate_quarter(cleaned_data)
# Personal Foul Penalty Form.
class ScorebookPersonalFoulForm(ScorebookPenaltyForm):
infraction = forms.CharField(widget=forms.Select(choices=PERSONAL_FOULS))
# Technical Foul Penalty Form
class ScorebookTechnicalFoulForm(ScorebookPenaltyForm):
infraction = forms.CharField(widget=forms.Select(choices=TECHNICAL_FOULS))
def penalty_form_factory(request, scorebook=None, is_personal=True, roster=None,
**kwargs):
# If the user just lands on the page with a GET request, return an empty form.
if request.method == "GET":
if is_personal:
return ScorebookPersonalFoulForm(**kwargs)
else:
return ScorebookTechnicalFoulForm(**kwargs)
# Parse the POST request to see if the home/visiting roster should be used.
if roster:
roster = roster
elif "home" in str(request.POST).lower():
roster = scorebook.home_coach.roster
else:
roster = scorebook.visiting_coach.roster
# Determine all the valid player numbers for the roster.
player_numbers = [player.player_number for player in
roster.player_set.iterator()]
# Define a child of the PenaltyForm which validates the player numbers.
if is_personal:
form = ScorebookPersonalFoulForm
else:
form = ScorebookTechnicalFoulForm
class __ScorebookPenaltyForm(form):
def clean_player_number(self):
if self.cleaned_data["player_number"] not in player_numbers:
self.add_error("player_number", forms.ValidationError(
"Penalized player is not in the selected roster!"))
if "player_number" in self.cleaned_data:
return self.cleaned_data["player_number"]
else:
return
# Return with the new form and pass it the POST request.
return __ScorebookPenaltyForm(request.POST, **kwargs)
class ScorebookTimeoutForm(forms.Form):
minutes = forms.IntegerField(min_value=0, max_value=90)
seconds = forms.IntegerField(min_value=0, max_value=59)
quarter = forms.CharField(widget=forms.HiddenInput(), required=False)
def timeout_form_factory(request, scorebook=None, timeouts=None, **kwargs):
# If the user just lands on the page with a GET request, return an empty form.
if request.method == "GET":
return ScorebookTimeoutForm(**kwargs)
# Parse the POST request to see if the home/visiting timeouts should be used.
if timeouts:
timeouts = timeouts.iterator()
elif "home" in str(request.POST).lower():
timeouts = scorebook.timeouts.home.iterator()
else:
timeouts = scorebook.timeouts.visiting.iterator()
# Determine how many timeouts have been made each half.
quarters = [timeout.quarter for timeout in timeouts]
first_half = quarters.count("I") + quarters.count("II")
second_half = quarters.count("III") + quarters.count("IV")
overtime = quarters.count("OT")
class __ScorebookTimeoutForm(ScorebookTimeoutForm):
def clean(self):
cleaned_data = super().clean()
cleaned_data = populate_quarter(cleaned_data)
quarter = cleaned_data["quarter"]
if quarter in ["I", "II"] and first_half >= 2:
self.add_error("minutes", forms.ValidationError(
"Team cannot have more than two timeouts in the first half!"))
if quarter in ["III", "IV"] and second_half >= 2:
self.add_error("minutes", forms.ValidationError(
"Team cannot have more than two timeouts in the second half!"))
if quarter in "OT" and overtime >= 1:
self.add_error("minutes", forms.ValidationError(
"Team cannot have more than one timeout in overtime!"))
# Return with the new form and pass it the POST request.
return __ScorebookTimeoutForm(request.POST, **kwargs)
class ScorebookPlayerForm(forms.Form):
player_number = forms.IntegerField(min_value=0)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
position = forms.CharField(widget=forms.Select(choices=POSITION_CHOICES))
def player_form_factory(request, scorebook=None, roster=None, **kwargs):
# If the user just lands on the page with a GET request, return an empty form.
if request.method == "GET":
return ScorebookPlayerForm(**kwargs)
# Parse the POST request to see if the home/visiting roster should be used.
if roster:
roster = roster
elif "home" in str(request.POST).lower():
roster = scorebook.home_coach.roster
else:
roster = scorebook.visiting_coach.roster
# Determine all the valid player numbers for the roster.
player_numbers = [player.player_number for player in
roster.player_set.iterator()]
class __ScorebookPlayerForm(ScorebookPlayerForm):
def clean_player_number(self):
if self.cleaned_data["player_number"] in player_numbers:
self.add_error("player_number", forms.ValidationError(
"Added player is already in the selected roster!"))
if "player_number" in self.cleaned_data:
return self.cleaned_data["player_number"]
else:
return
# Return with the new form and pass it the POST request.
return __ScorebookPlayerForm(request.POST, **kwargs)
class ScorebookImportLineup(forms.Form):
lineup = forms.ModelChoiceField(queryset=StartingLineup.objects.all())
| ch0164/lacrosse_scoreboard | core/forms.py | forms.py | py | 16,663 | python | en | code | 0 | github-code | 50 |
34918108394 |
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS, cross_origin
from pyproj import Geod
app = Flask('My_orto')
CORS(app)
@app.route('/')
def hi():
return 'hi'
# @app.route('/get', methods = ['get', 'post'])
# def get():
# parameters_map = request.get_json(force = True)
# parameters_map['a'] = parameters_map['a']*2
# return parameters_map
@app.route("/orto", methods = ['post'])
def ortoHandler():
body = request.get_json()
response = { 'points': orto(body['startPos1'], body['startPos2'], body['n']) }
return response
def orto (start_pos1, start_pos2, n) :
lon0 = start_pos1[0]
lat0 = start_pos1[1]
lon1 = start_pos2[0]
lat1 = start_pos2[1]
n_extra_points = n
geoid = Geod(ellps="WGS84")
extra_points = geoid.npts(lon0, lat0, lon1, lat1, n_extra_points)
return extra_points
app.run(port = 7000, debug = True)
| artkon2712/TestTask | test.py | test.py | py | 977 | python | en | code | 0 | github-code | 50 |
767651859 | import math
def IMT(weight, height):
IMT = weight / (math.pow(height, 2))
if 18.5 <= IMT <= 25:
return "Оптимальная масса"
elif IMT < 18.5:
return "Недостаточная масса"
else:
return "Избыточная масса"
weight = float(input())
height = float(input())
print(IMT(weight, height))
| VisteN2203/PythonBreedACourseForAdvanced.py | 2.0 Повторяем основные конструкции языка Python/2.1 Часть 1/main 2.1-2 title - Индекс массы тела.py | main 2.1-2 title - Индекс массы тела.py | py | 341 | python | ru | code | 0 | github-code | 50 |
13875478564 | # reference/source:
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
E = 2.718281828459045
def funTest(x,args=np.array([])):
return pow(E,x) - 2*x
def dfunTest(x,args=np.array([])):
return pow(E,x) - 2
def quadraticInterpolation(a,h,h0,g0):
numerator=g0*a**2
denominator=2*(g0*a+h0-h)
if abs(denominator)<1e-12:#indicates that a is almost 0
return a
return numerator/denominator
def cubicInterpolation(a0,h0,a1,h1,h,g):
mat=matlib.matrix([[a0**2,-a1**2],[-a0**3,a1**3]])
vec=matlib.matrix([[h1-h-g*a1],[h0-h-g*a0]])
ab=mat*vec/(a0**2*a1**2*(a1-a0))
a=ab[0,0]
b=ab[1,0]
if abs(a)<1e-12:#a=0 and cubic function is a quadratic one
return -g/(2*b)
return (-b+np.sqrt(b**2-3*a*g))/(3*a)
def cubicInterpolationHermite(a0,h0,g0,a1,h1,g1):
d1=g0+g1-3*(h1-h0)/(a1-a0)
d2=np.sign(a1-a0)*np.sqrt(d1**2-g0*g1)
res=a1-(a1-a0)*(g1+d2-d2)/(g1-g0+2*d2)
return res
def zoom(fun,dfun,theta,args,d,a_low,a_high,c1=1e-3,c2=0.9,max_iter=1e4):
if a_low>a_high:
print('low:%f,high:%f'%(a_low,a_high))
raise Exception('Invalid interval of stepsize in zoom procedure')
eps=1e-16
h=fun(theta,args) #h(0)=f(x)
g=np.sum(dfun(theta,args)*d.T) #h'(0)=f'(x)^Td
k=0
h_low=fun(theta+a_low*d,args)
h_high=fun(theta+a_high*d,args)
if h_low>h+c1*a_low*g:
raise Exception('Left endpoint violates Armijo condition in zoom procedure')
while k<max_iter and abs(a_high-a_low)>=eps:
a_new=(a_low+a_high)/2
h_new=fun(theta+a_new*d,args)
if h_new>h+c1*a_new*g or h_new>h_low:
a_high=a_new
h_high=h_new
else:
g_new=np.sum(dfun(theta+a_new*d,args)*d.T)
if abs(g_new)<=-c2*g: #satisfy Wolfe condition
return a_new
if g_new*(a_high-a_low)>=0:
a_high=a_new
h_high=h_new
else:
a_low=a_new
h_low=h_new
k+=1
return a_low #a_low definitely satisfy Armijo condition
class LineSearch():
@staticmethod
def graidentDescent(fun,dfun,theta,_type="ArmijoBackTrack",show=False,maxiter=10,):
x,y,y_ = [theta[0]],[fun(theta)[0]],[dfun(theta)[0]]
i = 0
eps = 1e-6
while i < maxiter:
last_theta = deepcopy(theta)
d = -dfun(theta)
if _type == "WolfLineSearch":
stepsize = LineSearch.WolfeLineSearch(fun,dfun,theta,d)
elif _type == "ArmijoBackTrack":
stepsize = LineSearch.ArmijoBacktrack(fun,dfun,theta,d)
elif _type == "ArmijoLineSearch":
stepsize = LineSearch.ArmijoLineSearch(fun,dfun,theta,d)
else:
stepsize = LineSearch.WolfeLineSearch(fun,dfun,theta,d)
# print("d,stepsize,theta:",d,stepsize,theta)
if abs(d) < eps or abs(stepsize)==0: break
theta = last_theta + stepsize*d
if theta > 1: theta = np.array([1])
if theta < 0: theta = np.array([0])
i = i + 1
x.append(theta[0]),y.append(fun(theta)[0]),y_.append(dfun(theta)[0])
# print(x)
# print(y)
# print(y_)
# print("Final x:%0.6f" % theta)
# print("Final y:%0.6f" % fun(theta))
# print("Final y':%0.6f" % dfun(theta))
# print("Iteration Times':%s" % i)
if show==True:
plt.subplot(1,2,1)
plot_X = np.linspace(-10, 3, 256, endpoint=True)
plt.plot(plot_X,fun(plot_X))
plt.subplot(1,2,2)
plt.plot(np.array(x),np.array(y))
plt.show()
return {"x":theta[0], "y":fun(theta)[0], "y_":dfun(theta)[0], "times": i}
@staticmethod
def ArmijoBacktrack(fun,dfun,theta,d,args=np.array([]),stepsize=1,tau=0.5,c1=1e-3):
slope = np.sum(dfun(theta,args)*d.T)
obj_old = fun(theta,args)
theta_new = theta + stepsize*d
obj_new = fun(theta_new,args)
while obj_new > obj_old + c1*stepsize*slope:
stepsize *= tau
theta_new = theta + stepsize*d
obj_new = fun(theta_new,args)
return stepsize
@staticmethod
def ArmijoLineSearch(fun,dfun,theta,d,args=np.array([]),a0=1,c1=1e-3,a_min=1e-7,max_iter=1e5):
eps=1e-6
c1=min(c1,0.5) #c1 should<=0.5
a_pre=h_pre=g_pre=0
a_cur=a0
f_val=fun(theta,args) #h(0)=f(0)
g_val=np.sum(dfun(theta,args)*d.T) #h'(0)=f'(x)^Td
h_cur=g_cur=0
k=0
while a_cur>a_min and k<max_iter:
h_cur=fun(theta+a_cur*d,args)
g_cur=np.sum(dfun(theta+a_cur*d,args)*d.T)
if h_cur<=f_val+c1*a_cur*g_val: #meet Armijo condition
return a_cur
if not k: #k=0,use quadratic interpolation
a_new=quadraticInterpolation(a_cur,h_cur,f_val,g_val)
else: #k>0,use cubic Hermite interpolation
a_new=cubicInterpolationHermite(a_pre,h_pre,g_pre,a_cur,h_cur,g_cur)
if abs(a_new-a_cur)<eps or abs(a_new)<eps: #safeguard procedure
a_new=a_cur/2
a_pre=a_cur
a_cur=a_new
h_pre=h_cur
g_pre=g_cur
k+=1
return a_min #failed search
@staticmethod
def WolfeLineSearch(fun,dfun,theta,d,args=np.array([]),a0=1,c1=1e-4,c2=0.9,a_min=1e-7,max_iter=1e5):
eps=1e-16
c1=min(c1,0.5)
a_pre=0
a_cur=a0
f_val=fun(theta,args) #h(0)=f(x)
g_val=np.sum(dfun(theta,args)*d.T)
h_pre=f_val #h'(0)=f'(x)^Td
k=0
while k<max_iter and abs(a_cur-a_pre)>=eps:
h_cur=fun(theta+a_cur*d,args) #f(x+ad)
if h_cur>f_val+c1*a_cur*g_val or h_cur>=h_pre and k>0:
return zoom(fun,dfun,theta,args,d,a_pre,a_cur,c1,c2)
g_cur=np.sum(dfun(theta+a_cur*d,args)*d.T)
if abs(g_cur)<=-c2*g_val:#satisfy Wolfe condition
return a_cur
if g_cur>=0:
return zoom(fun,dfun,theta,args,d,a_pre,a_cur,c1,c2)
a_new=quadraticInterpolation(a_cur,h_cur,f_val,g_val)
a_pre=a_cur
a_cur=a_new
h_pre=h_cur
k+=1
return a_min
class Newton(object):
@staticmethod
def BFGS(fun,dfun,theta,args,H=None,mode=0,eps=1e-12,max_iter=1e4):
x_pre=x_cur=theta
g=dfun(x_cur,args)
I=matlib.eye(theta.size)
if not H:#initialize H as an identity matrix
H=I
k=0
while k<max_iter and np.sum(np.abs(g))>eps:
d=-g*H
step=LineSearch(fun,dfun,x_pre,args,d,1,mode)
x_cur=x_pre+step*d
s=step*d
y=dfun(x_cur,args)-dfun(x_pre,args)
ys=np.sum(y*s.T)
if abs(ys)<eps:
return x_cur
change=(ys+np.sum(y*H*y.T))*(s.T*s)/(ys**2)-(H*y.T*s+s.T*y*H)/ys
H+=change
g=dfun(x_cur,args)
x_pre=x_cur
k+=1
return x_cur
@staticmethod
def LBFGS(fun,dfun,theta,args,mode=0,eps=1e-12,max_iter=1e4):
x_pre=x_cur=theta
s_arr=[]
y_arr=[]
Hscale=1
k=0
while k<max_iter:
g=dfun(x_cur,args)
d=LBFGSSearchDirection(y_arr,s_arr,Hscale,-g)
step=LineSearch(fun,dfun,x_pre,args,d,1,mode)
s=step*d
x_cur=x_pre+s
y=dfun(x_cur,args)-dfun(x_pre,args)
ys=np.sum(y*s.T)
if np.sum(np.abs(s))<eps:
return x_cur
x_pre=x_cur
k+=1
y_arr,s_arr,Hscale=LBFGSUpdate(y,s,y_arr,s_arr)
return x_cur
@staticmethod
def LBFGSSearchDirection(y_arr,s_arr,Hscale,g):
histNum=len(s_arr)#number of update data stored
if not histNum:
return g
dim=s_arr[0].size
a_arr=[0 for i in range(histNum)]
rho=[0 for i in range(histNum)]
q=g
for i in range(1,histNum+1):
s=s_arr[histNum-i]
y=y_arr[histNum-i]
rho[histNum-i]=1/np.sum(s*y.T)
a_arr[i-1]=rho[histNum-i]*np.sum(s*q.T)
q-=(a_arr[i-1]*y)
P=Hscale*q
for i in range(histNum,0,-1):
y=y_arr[histNum-i]
s=s_arr[histNum-i]
beta=rho[histNum-i]*np.sum(y*P.T)
P+=s*(a_arr[i-1]-beta)
return P
@staticmethod
def LBFGSUpdate(y,s,oldy,olds,m=1e2):
eps=1e-12
Hscale=np.sum(y*s.T/y*y.T) #a scale to initialize H_{k-m}
if Hscale<eps:#skip update
return oldy,olds,Hscale
cur_m=len(oldy)
if cur_m>=m:
oldy.pop(0)
olds.pop(0)
oldy.append(copy.deepcopy(y))
olds.append(copy.deepcopy(s))
return oldy,olds,Hscale
if __name__ == '__main__':
LineSearch.graidentDescent(fun=funTest,dfun=dfunTest,theta=np.array([-5]))
| seanys/Transportation-and-Optimization-Notes | User-Equilibrium-Project&Line-Search/line_search.py | line_search.py | py | 9,042 | python | en | code | 5 | github-code | 50 |
17508132490 | import tkinter as tk
from tkinter import *
b=str()
rt=Tk()
rt.title("Simple Calculator")
rt.geometry("300x150")
enter= StringVar()
entry= Entry(rt,width='5',textvariable=enter ,font=('Courier',25))
entry.place(x=100,y=10)
def put_data(a):
global b
b+=a
enter.set(b)
def get_sol():
enter.set(eval(b))
def clear():
entry.delete(0,5)
global b
b=''
but1= Button(rt,text="1",fg='red',width='10',height='1',command=lambda:put_data('1'))
but1.place(x=40,y=60)
but2= Button(rt,text="2",fg='red',width='10',height='1',command=lambda:put_data('2'))
but2.place(x=40,y=85)
but3= Button(rt,text="3",fg='red',width='10',height='1',command=lambda:put_data('3'))
but3.place(x=40,y=110)
butx= Button(rt,text="*",fg='red',width='10',height='1',command=lambda:put_data('*'))
butx.place(x=190,y=110)
butx= Button(rt,text="=",fg='red',width='10',height='1',command=lambda:get_sol())
butx.place(x=190,y=85)
butx= Button(rt,text="AC",fg='red',width='10',height='1',command=lambda:clear())
butx.place(x=190,y=60)
rt.mainloop() | TheRexishere/Python | Simple_Calculator GUI.py | Simple_Calculator GUI.py | py | 1,107 | python | en | code | 0 | github-code | 50 |
32657924659 | import logging
from datetime import datetime
from datetime import timedelta
import time
class StatsHandler(object):
def __init__(self, start):
"""
:param start: time when this object is initialised, which is basically when the MainHAndler starts
:return: None
"""
# start time stored as timestamp
self.start_time = time.mktime(start.timetuple())
# running sum of content length transmitting received from origin server
self.bytes_transmitted = 0
self.counters = {"404": 0,
"200": 0,
"206": 0,
"416": 0,
"500": 0
}
self.get_requests_handled = 0
def get_start_time(self):
"""return start time in UTC"""
return datetime.fromtimestamp(self.start_time)
def requests_handled(self):
return self.get_requests_handled
def get_up_time(self):
"""return total up time as a string of the form Hours:1, Minutes:44, Seconds:30"""
current_time = datetime.now()
# str of timedelta returns a string [D day[s], ][H]H:MM:SS[.UUUUUU]
uptime = str(current_time - self.get_start_time())
l1 = uptime.split(",")
if len(l1) > 1:
days = l1[0]
hhmmss = l1[1].split(":")
else:
days = ""
hhmmss = l1[0].split(":")
uptime_string = "Uptime is {0} {1} Hours and {2} Minutes".format(days, hhmmss[0], hhmmss[1])
return uptime_string
def get_bytes_transmitted(self):
return self.bytes_transmitted/1000000
def get_counters(self):
return self.counters
def set_bytes_transmitted(self, bytes_count):
if bytes_count:
self.bytes_transmitted += int(bytes_count)
def set_requests_handled(self, count = 1):
"""
Captures number of get requests received
:param count: represents count of incoming get requests
:return: None
"""
self.get_requests_handled += count
def set_response_type_counter(self, response_code):
"""
:param response_code: HTTP response code
:return: None
"""
counter = self.counters.get(response_code, None)
if counter is not None:
self.counters[response_code] = counter + 1
else:
# response code not found in counter dict
pass
# def get(self):
# allstats = {}
# allstats["Start Time"] = self.start_time
# allstats["Bytes Transmitted"] = self.bytes_transmitted
# allstats["Number of Get Requests Handled"] = self.get_requests_handled
#
# self.render("templates/stats.html", stats=allstats, counters = self.counters) | purbashacg9/asyncproxy | handlers/statshandler.py | statshandler.py | py | 2,809 | python | en | code | 0 | github-code | 50 |
25535439580 | f = open('contacts.txt')
#n = int(input().strip())
n = int(f.readline().strip())
db = dict()
for _ in range(n):
#query = input().strip().split()
query = f.readline().strip().split()
if query[0] == 'add':
i = 1
while i <= len(query[1]):
try:
db[query[1][:i]] += 1
except KeyError:
db[query[1][:i]] = 1
i += 1
else:
try:
print(db[query[1]])
except KeyError:
print(0) | trueneu/algo | hackerrank/data_structures/trie/contacts.py | contacts.py | py | 506 | python | en | code | 0 | github-code | 50 |
18253389039 | from django.conf.urls import url
from ..views import (FrFragasvarListView, FrFragasvarCreateView, FrFragasvarDetailView,
FrFragasvarUpdateView, FrFragasvarDeleteView)
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^create/$', # NOQA
login_required(FrFragasvarCreateView.as_view()),
name="fr_fragasvar_create"),
url(r'^(?P<pk>\d+)/update/$',
login_required(FrFragasvarUpdateView.as_view()),
name="fr_fragasvar_update"),
url(r'^(?P<pk>\d+)/delete/$',
login_required(FrFragasvarDeleteView.as_view()),
name="fr_fragasvar_delete"),
url(r'^(?P<pk>\d+)/$',
FrFragasvarDetailView.as_view(),
name="fr_fragasvar_detail"),
url(r'^$',
FrFragasvarListView.as_view(),
name="fr_fragasvar_list"),
]
| ISOF-ITD/djangoapps | sprakfragan/urls/fr_fragasvar_urls.py | fr_fragasvar_urls.py | py | 844 | python | en | code | 0 | github-code | 50 |
2899665166 | # import csv
# exampleFile = open('report.csv')
# exampleReader = csv.reader(exampleFile)
# # exampleData = list(exampleReader)
# # print(exampleData)
# # print("\n")
# # print("\n")
# for row in exampleReader:
# print('Row #' + str(exampleReader.line_num) + ' ' + str(row))
#
# print("\n")
# print("\n")
import csv
with open('report.csv') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
print('Row #'
+ str(spamreader.line_num)
+ ' ' + str(row))
| kamalkschauhan/Python | CSVExtract.py | CSVExtract.py | py | 513 | python | en | code | 0 | github-code | 50 |
70316996636 | '''
This scripts speaks a random greeting every minute on your Sonos speaker system.
Please see: https://github.com/OH-Jython-Scripters/lucid/blob/master/README.md
To use this, you should set up astro.py as described
here https://github.com/OH-Jython-Scripters/lucid/blob/master/Script%20Examples/astro.py
It also assumes that you've set up an openHAB a contact items to represent the presence of
persons to be greeted. Each item should belong to the item group "G_Presence_Family"
'''
from lucid.rules import rule, addRule
from lucid.triggers import CronTrigger
from lucid.speak import tts
from lucid.utils import greeting, PRIO
import random
@rule
class SayHello(object):
def getEventTriggers(self):
return [
CronTrigger(EVERY_MINUTE), # Runs every minute
]
def execute(self, modules, inputs):
greetings = [greeting(), 'Hello', 'How are you', 'How are you doing', 'Good to see you', 'Long time no see', 'It\’s been a while']
peopleAtHome = []
for member in itemRegistry.getItem('G_Presence_Family').getAllMembers():
if member.state == OPEN: peopleAtHome.append(member.label)
random.shuffle(peopleAtHome)
msg = random.choice(greetings)
for i in range(len(peopleAtHome)):
person = peopleAtHome[i]
msg += ' '+person
if i+2 == len(peopleAtHome):
msg +=' and'
elif i+1 == len(peopleAtHome):
msg +='.'
elif i+2 < len(peopleAtHome):
msg +=','
#tts(msg, PRIO['HIGH'], ttsRoom='Kitchen', ttsVol=42, ttsLang='en-GB', ttsVoice='Brian')
tts(msg, PRIO['HIGH'], ttsRoom='Kitchen', ttsVol=42, ttsLang='en-IN', ttsVoice='Aditi')
#tts(msg, PRIO['HIGH'], ttsRoom='Kitchen', ttsVol=42, ttsLang='en-US', ttsVoice='Matthew')
#tts(msg, None, ttsRoom='All', ttsLang='de-DE', ttsVoice='Vicki')
#tts(msg) # Also works if you accept the defaults
addRule(SayHello())
| openhab-scripters/lucid | Script Examples/greetings.py | greetings.py | py | 1,995 | python | en | code | 3 | github-code | 50 |
30238304712 | import torch
import torch.nn as nn
import torch.nn.functional as F
from text_lab.text_bert import LEAM
from text_lab.channelwise_lstm import cw_lstm_model
class fusion_layer(nn.Module):
def __init__(self,embedding_dim,fusion_dim,dropout,ngram,output_dim = 25):
super(fusion_layer, self).__init__()
self.lab_encoder = cw_lstm_model(ngram,fusion_dim)
self.text_encoder = LEAM(fusion_dim,embedding_dim, output_dim, dropout, ngram)
self.feature_number = 2
self.class_number = output_dim
self.drop_out = nn.Dropout(dropout)
self.sigmoid = nn.Sigmoid()
self.flatten = nn.Flatten()
self.fc = nn.Sequential(
nn.Linear(2*fusion_dim,fusion_dim),
nn.Dropout(dropout))
self.norm2 =nn.LayerNorm(64)
self.dense = nn.Sequential(
(nn.Linear(256,128,bias=True)),
(nn.LeakyReLU()),
(nn.Linear(128,64,bias=True)),
(nn.LeakyReLU()))
self.clss = self.classification_layer(1088,25)
self.text_fc = nn.Sequential(
nn.Linear(fusion_dim, output_dim)
)
self.avgpooling = nn.AvgPool2d(kernel_size=(2, 1))
def classification_layer(self,in_channels,out_channels):
clss = nn.Linear(in_channels,out_channels,bias=True)
return clss
def forward(self,text_x,label_token,task_token,lab_x,length,fixed_label_embedding,fixed_task_embedding,Fixed,Flatten,mode='fusion'):
text_pred,weights,c,t,u,weighted_embed = self.text_encoder(text_x,label_token,task_token)
if Fixed:
lab_predict,fused_score,g,u1 = self.lab_encoder(lab_x,length,fixed_label_embedding,fixed_task_embedding)
else:
lab_predict,fused_score,g,u1 = self.lab_encoder(lab_x,length,c,t)
f_x = torch.cat((lab_predict,text_pred),1)
output = self.sigmoid(self.drop_out(self.text_fc(self.avgpooling(f_x).squeeze(1))))
c_o = c
c = self.text_encoder.dropout(self.text_encoder.fc(c))
return output,c,t,u,weights,fused_score,text_pred,c_o,g,u1
| finnickniu/LDAM | text_lab/fusion_cls.py | fusion_cls.py | py | 2,106 | python | en | code | 2 | github-code | 50 |
39205244861 | from ..interface import Contract, ContractNotRespected
from ..syntax import(add_contract, W, contract_expression, O, S, ZeroOrMore,
Group, add_keyword, Keyword)
from .compositions import or_contract
class Tuple(Contract):
def __init__(self, length=None, elements=None, where=None):
Contract.__init__(self, where)
self.length = length
self.elements = elements
assert elements is None or isinstance(elements, list)
if elements:
for e in elements:
assert isinstance(e, Contract)
def check_contract(self, context, value, silent):
if not isinstance(value, tuple):
error = 'Expected a tuple, got %r.' % value.__class__.__name__
raise ContractNotRespected(contract=self, error=error,
value=value, context=context)
if self.length is not None:
self.length._check_contract(context, len(value), silent)
if self.elements is not None:
if len(value) != len(self.elements):
error = ('Expected a tuple of length '
'%s, got %r of length %s.' %
(len(self.elements), value, len(value)))
raise ContractNotRespected(contract=self, error=error,
value=value, context=context)
for i in range(len(value)):
self.elements[i]._check_contract(context, value[i], silent)
def __repr__(self):
return 'Tuple(%r,%r)' % (self.length, self.elements)
def __str__(self):
s = 'tuple'
if self.length is not None:
s += '[%s]' % self.length
def rep(x):
from .compositions import And
if isinstance(x, And):
return "(%s)" % x
else:
return "%s" % x
if self.elements is not None:
s += '(%s)' % ",".join(rep(x) for x in self.elements)
return s
@staticmethod
def parse_action(s, loc, tokens):
where = W(s, loc)
length = tokens.get('length', [None])[0]
# elements = tokens.get('elements', [None])[0]
if 'elements' in tokens:
elements = list(tokens['elements'])
else:
elements = None
assert elements is None or length is None
assert length is None or isinstance(length, Contract), ("Wrong type %r"
% length)
if elements:
for e in elements:
assert isinstance(e, Contract), ("Wrong type %s (%r)"
% (type(e), e))
return Tuple(length, elements, where=where)
# if you use contract instead of simple_contract, it will be matched as And
inside = (S('(') - contract_expression - S(')')) | or_contract
inside.setName('Any contract for tuple elements (use parenthesis for AND)')
elements = (Group(S('(') - inside - ZeroOrMore(S(',')
- inside) - S(')'))('elements'))
elements.setName('Tuple elements contract.')
length = Group(S('[') - contract_expression - S(']'))('length')
length.setName('Tuple length contract.')
tuple_contract = Keyword('tuple') - O(length | elements)
tuple_contract.setName('tuple contract')
add_contract(tuple_contract.setParseAction(Tuple.parse_action))
add_keyword('tuple')
| AndreaCensi/contracts | src/contracts/library/tuple.py | tuple.py | py | 3,461 | python | en | code | 392 | github-code | 50 |
5622639185 | #Contains telegram functions
#requires telegram user credentials
from difflib import SequenceMatcher
import json
from API_keys import *
from telethon.tl.types import InputPeerUser
from telethon import TelegramClient
from telethon import functions, types
import distance
from Friday_Functions import *
class Methods:
def __init__(self):
self.api_id = api_id,
self.api_hash = api_hash,
self.phone = phone
#authorize user using phone number
async def authorize(self, client):
await client.connect()
if not await client.is_user_authorized():
await client.send_code_request(phone)
await client.sign_in(phone, input('Enter the code: '))
await client.disconnect()
#matches the user specified contact to the contact in the contacts list and sends message
async def sendUserMessage(self, response):
isSent = False
client = TelegramClient('session', api_id, api_hash)
await client.connect()
result = await client(functions.contacts.GetContactsRequest(
hash=0
))
for user in result.users:
try:
s = SequenceMatcher(None, response, user.first_name)
if s.ratio() > 0.75 or distance.levenshtein(response, user.first_name) < 3:
speak("What do you wanna send?")
message = takeCommand()
isSent = True
receiver = InputPeerUser(user.id, 0)
await client.send_message(receiver, message, parse_mode='html')
else:
pass
except Exception:
pass
await client.disconnect()
if isSent:
speak("Message sent successfully")
else:
speak("Could not find that user in your contacts") | Rohith-JN/Friday | Telethon.py | Telethon.py | py | 1,859 | python | en | code | 1 | github-code | 50 |
29337299891 | import argparse
import os
import json
import cv2
import numpy as np
from sklearn.cluster import KMeans
from utils import *
DEFAULT_LABELS = "mask_labels.json"
VERBOSE = False
parser = argparse.ArgumentParser()
parser.add_argument("--head_img", type=str, default=r"sample_img\head", help="Path to folder with head img")
parser.add_argument("--head_mask", type=str, default=r"sample_img\head_mask_lip",
help="Path to folder with head segmentation mask img")
parser.add_argument("--body_img", type=str, default=r"sample_img\body", help="Path to folder with body img")
parser.add_argument("--body_mask", type=str, default=r"sample_img\body_mask_atr",
help="Path to folder with body segmentation mask img")
parser.add_argument("--output_dir", type=str, default=r"output", help="Output path")
args = parser.parse_args()
def get_mask(mask_img, del_labels, verbose=False, verbose_name=""):
"""
:param mask_img: image of mask (ATR or LIP)
:param del_labels: name of labels to delete from label dict [labels in dict should be in RGB format]
:param verbose: save intermediate results in output folder if True
:param verbose_name: name of saved intermediate file
:return: mask img of 1 and 0
"""
f = open(DEFAULT_LABELS)
labels = json.load(f)
f.close()
for key in labels[del_labels]:
current_color = np.array(labels[del_labels][key])
current_color = current_color[::-1]
idxs = np.where(np.all(mask_img == current_color, axis=-1))
if len(idxs) == 2:
mask_img[idxs[0], idxs[1]] = 0
mask_img[np.any(mask_img != [0, 0, 0], axis=-1)] = [1, 1, 1]
if verbose:
cv2.imwrite(os.path.join(args.output_dir, verbose_name + "_skin_mask.png"), 255 * mask_img)
print(f"Skin mask saved: {verbose_name}_skin_mask.png")
return mask_img
def get_skin_color(img, mask):
target_pixels = img[mask != [0, 0, 0]] # pixels under mask
target_pixels = target_pixels.reshape((len(target_pixels) // 3, 3))
clt = KMeans(n_clusters=4)
clt.fit(target_pixels)
hist = centroid_histogram(clt)
skin_color = get_color(hist, clt.cluster_centers_)
return np.int16(skin_color)
def skin_mask_refinement(skin_color, img, mask, verbose=False):
skin_color_hsv = cv2.cvtColor(np.uint8([[skin_color]]), cv2.COLOR_BGR2HSV)[0][0]
img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
blurred_mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
blurred_mask = cv2.pyrUp(255 * blurred_mask)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
blurred_mask = cv2.morphologyEx(blurred_mask, cv2.MORPH_DILATE, kernel)
for i in range(0, 15):
blurred_mask = cv2.medianBlur(blurred_mask, 7)
blurred_mask = cv2.pyrDown(blurred_mask)
_, blurred_mask = cv2.threshold(blurred_mask, 150, 255, cv2.THRESH_BINARY)
blurred_mask = np.clip(blurred_mask / 255.0, 0, 1).astype(np.uint8)
if skinRange(skin_color_hsv):
Hue = 10
Saturation = 65
Value = 50
result = make_lower_upper(skin_color_hsv, Hue, Saturation, Value)
if (result[0]):
# print("I'm here")
lower1 = result[1]
upper1 = result[2]
lower2 = result[3]
upper2 = result[4]
color_skinMask1 = cv2.inRange(img_HSV, lower1, upper1)
color_skinMask2 = cv2.inRange(img_HSV, lower2, upper2)
color_skinMask = cv2.bitwise_or(color_skinMask1, color_skinMask2)
else:
lower = result[1]
upper = result[2]
color_skinMask = cv2.inRange(img_HSV, lower, upper)
final_mask = blurred_mask * color_skinMask
if verbose:
cv2.imwrite(os.path.join(args.output_dir, "03_refined_skin_mask.png"), final_mask)
print("Refined mask saved.")
return final_mask
else:
print("Skin color out of default range. No mask refinement.")
final_mask = blurred_mask
return final_mask
if __name__ == "__main__":
os.makedirs(args.output_dir, exist_ok=True)
# check input arguments
for arg in vars(args):
if arg != "output_dir":
if not os.path.exists(getattr(args, arg)):
print("Cannot find folder {}: {}".format(arg, getattr(args, arg)))
exit()
elif len(os.listdir(getattr(args, arg))) < 1:
print("Folder is empty: {}".format(getattr(args, arg)))
exit()
mask_body_fname = os.path.join(args.body_mask, os.listdir(args.body_mask)[0])
assert os.path.isfile(mask_body_fname)
mask_body = cv2.imread(mask_body_fname, -1)
mask_body = get_mask(mask_img=mask_body, del_labels="LIP_DEL" if args.body_mask.endswith("_lip.png") else "ATR_DEL", verbose=VERBOSE, verbose_name="01_body")
mask_face_fname = os.path.join(args.head_mask, os.listdir(args.head_mask)[0])
assert os.path.isfile(mask_face_fname)
mask_face = cv2.imread(mask_face_fname, -1)
mask_face = get_mask(mask_img=mask_face, del_labels="LIP_DEL" if args.body_mask.endswith("_lip.png") else "ATR_DEL", verbose=VERBOSE, verbose_name="02_head")
img_face_fname = os.path.join(args.head_img, os.listdir(args.head_img)[0])
assert os.path.isfile(img_face_fname)
img_face = cv2.imread(img_face_fname, -1)
if img_face.shape[-1] > 3:
trans_mask = img_face[:, :, 3] == 0
img_face[trans_mask] = [255, 255, 255, 255]
img_face = cv2.cvtColor(img_face, cv2.COLOR_BGRA2BGR)
skin_color_face_BGR = get_skin_color(img_face, mask_face)
img_body_fname = os.path.join(args.body_img, os.listdir(args.body_img)[0])
assert os.path.isfile(img_body_fname)
img_body = cv2.imread(img_body_fname, -1)
if img_body.shape[-1] > 3:
trans_mask = img_body[:, :, 3] == 0
img_body[trans_mask] = [255, 255, 255, 255]
img_body = cv2.cvtColor(img_body, cv2.COLOR_BGRA2BGR)
skin_color_body_BGR = get_skin_color(img_body, mask_body)
mask_body = skin_mask_refinement(skin_color=skin_color_body_BGR, img=img_body, mask=mask_body, verbose=VERBOSE)
# Change the color maintaining the texture.
res_img = doDiff(img=np.copy(img_body),
target_color=skin_color_face_BGR,
src_color=skin_color_body_BGR,
size=img_body.shape)
# Get the two images ie. the skin and the background.
no_skin_img = cv2.bitwise_and(img_body, img_body, mask=cv2.bitwise_not(mask_body))
skin_img = cv2.bitwise_and(res_img, res_img, mask=mask_body)
if VERBOSE:
cv2.imwrite(os.path.join(args.output_dir, "04_body_img.png"), img_body)
cv2.imwrite(os.path.join(args.output_dir, "05_no_skin_img.png"), no_skin_img)
cv2.imwrite(os.path.join(args.output_dir, "06_skin_img.png"), skin_img)
cv2.imwrite(os.path.join(args.output_dir, "07_color_corrected.png"), res_img)
print("Skin/no skin img saved")
skin_swap = cv2.add(no_skin_img, skin_img)
cv2.imwrite(os.path.join(args.output_dir, os.path.basename(img_body_fname)), skin_swap)
print("Finished")
| GreasyGoose/MADE_final_project | skin_color_correction/skin_correct.py | skin_correct.py | py | 7,133 | python | en | code | 0 | github-code | 50 |
17682180983 | '''
combines the distance to Nash equilibrium per time slot for all the runs
'''
import csv
import argparse
from numpy import median
from utility_method import saveToCSV, saveToTxt, computeMovingAverage
parser = argparse.ArgumentParser(description='Combines the distance to Nash equilibrium per time slot for all the runs.')
parser.add_argument('-d', dest="root_dir", required=True, help='root directory where data of all runs are stored')
parser.add_argument('-r', dest="num_run", required=True, help='number of simulation runs')
parser.add_argument('-t', dest="num_time_slot", required=True, help='number of time slots in each simulation run')
parser.add_argument('-w', dest="rolling_avg_window", required=True, help='window for rolling average')
args = parser.parse_args()
rootDir = args.root_dir
numRun = int(args.num_run)
numTimeSlot = int(args.num_time_slot)
window = int(args.rolling_avg_window)
def combineDistanceToNashEquilibrium(rootDir, numRun, numTimeSlot):
distanceToNashEquilibriumPerTimeSlot = [0] * numTimeSlot
numTimeSlotAtNashEquilibriumPerRun = []
for runIndex in range(1, numRun + 1):
numTimeSlotAtNashEquilibrium = 0
filename = rootDir + "run" + str(runIndex) + "/distanceToNashEquilibrium.csv"
with open(filename, newline='') as filename:
fileReader = csv.reader(filename)
count = 0
for row in fileReader: # compute total gain of user and that of each expert
if count != 0:
distance = float(row[0])
distanceToNashEquilibriumPerTimeSlot[count - 1] += distance
if distance == 0: numTimeSlotAtNashEquilibrium += 1
count += 1
filename.close()
numTimeSlotAtNashEquilibriumPerRun.append(numTimeSlotAtNashEquilibrium)
for i in range(len(distanceToNashEquilibriumPerTimeSlot)):
distanceToNashEquilibriumPerTimeSlot[i] = distanceToNashEquilibriumPerTimeSlot[i]/numRun
return distanceToNashEquilibriumPerTimeSlot, numTimeSlotAtNashEquilibriumPerRun
def main():
global rootDir, numRun, numTimeSlot, window
avgDistanceToNashEquilibriumPerTimeSlot, numTimeSlotAtNashEquilibriumPerRun = combineDistanceToNashEquilibrium(rootDir, numRun, numTimeSlot)
avgDistanceToNashEquilibriumPerTimeSlot = computeMovingAverage(avgDistanceToNashEquilibriumPerTimeSlot, window)
print("avgDistanceToNashEquilibriumPerTimeSlot:", avgDistanceToNashEquilibriumPerTimeSlot)
avgDistanceToNashEquilibriumPerTimeSlot = [[x] for x in avgDistanceToNashEquilibriumPerTimeSlot]
saveToCSV(rootDir + "distanceToNashEquilibrium.csv", ["average_distance"], avgDistanceToNashEquilibriumPerTimeSlot)
print("numTimeSlotAtNashEquilibriumPerRun:", numTimeSlotAtNashEquilibriumPerRun)
saveToTxt(rootDir + "timeSpendAtNashEquilibrium.txt", str(numTimeSlotAtNashEquilibriumPerRun) + "\n" + "Time spent at Nash equilibrium per run:-\n"
+ "\tAverage: " + str(sum(numTimeSlotAtNashEquilibriumPerRun)/len(numTimeSlotAtNashEquilibriumPerRun))
+ "\t(" + str((sum(numTimeSlotAtNashEquilibriumPerRun)/len(numTimeSlotAtNashEquilibriumPerRun))*100/numTimeSlot) + "%)" + "\n"
+ "\tMedian: " + str(median(numTimeSlotAtNashEquilibriumPerRun))
+ "\t(" + str(median(numTimeSlotAtNashEquilibriumPerRun)*100/numTimeSlot) + "%)" + "\n"
+ "\tMinimum: " + str(min(numTimeSlotAtNashEquilibriumPerRun)) + "\n"
+ "\t(" + str(min(numTimeSlotAtNashEquilibriumPerRun) * 100 / numTimeSlot) + "%)" + "\n"
+ "\tMaximum: " + str(max(numTimeSlotAtNashEquilibriumPerRun))
+ "\t(" + str(max(numTimeSlotAtNashEquilibriumPerRun) * 100 / numTimeSlot) + "%)")
if __name__ == "__main__": main() | anuja-meetoo/Co-Bandit | combineDistanceToNashEquilibrium.py | combineDistanceToNashEquilibrium.py | py | 3,772 | python | en | code | 0 | github-code | 50 |
1742327708 | #======================================================
# Configuration file for the ensemble storm track segmentation
# Contains the parameters used.
#
# Author: Montgomery Flora (Git username : monte-flora)
# Email : monte.flora@noaa.gov
#======================================================
param_set = [ {'min_thresh': 0,
'max_thresh': 100,
'data_increment': 1,
'delta': 0,
'area_threshold': 800,
'dist_btw_objects': 125 },
{'min_thresh': 30,
'max_thresh': 100,
'data_increment': 1,
'delta': 0,
'area_threshold': 400,
'dist_btw_objects': 30 },
{'min_thresh': 50,
'max_thresh': 100,
'data_increment': 1,
'delta': 0,
'area_threshold': 250,
'dist_btw_objects': 30 },
]
config = {
'deterministic' : {
'params' : ('single_threshold', {'bdry_thresh':10.0}),
'qc_params' : [('min_area',10.)]
},
'ensemble' : {
'params' : {'params' : param_set},
'ensemble_size' : 18
}
}
| NOAA-National-Severe-Storms-Laboratory/frdd-wofs-ml-severe | wofs_ml_severe/conf/segmentation_config.py | segmentation_config.py | py | 1,366 | python | en | code | 0 | github-code | 50 |
33289854164 | import werkzeug
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
werkzeug.cached_property = werkzeug.utils.cached_property
from flask_restplus import Api, Resource
from datetime import datetime
from joblib import load
from bs4 import BeautifulSoup
import requests
import json
import pickle
import joblib
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from flask_cors import CORS
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
api = Api()
app = Flask(__name__)
#CORS(app, resources = {r"/api/*":{}})
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data_base.db'
app.config['SECRET_KEY'] = "secret key"
db = SQLAlchemy(app)
api.__init__(app)
CORS(app, resources={r'/*': {'origins': '*'}})
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.String(350), nullable=False)
news_id = db.Column(db.Integer, nullable=False)
user_name = db.Column(db.String(80), nullable=False)
toxic_num = db.Column(db.Integer, nullable=False)
pub_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __init__(self, comment, news_id, user_name, toxic_num=0):
self.user_name = user_name
self.comment = comment
self.news_id = news_id
self.toxic_num = toxic_num
@property
def serialize(self):
return {'user_name': self.user_name, 'comment': self.comment, 'news_id': self.news_id,
'toxic_num': self.toxic_num, 'date': self.pub_date}
class News(db.Model):
id = db.Column(db.Integer, primary_key=True)
descript = db.Column(db.String(350), nullable=False)
title = db.Column(db.String(80), nullable=False)
news_link = db.Column(db.String(80), nullable=False)
date = db.Column(db.String(80), nullable=False)
descript = db.Column(db.String(80), nullable=False)
img_link = db.Column(db.String(80), nullable=False)
def __init__(self, title, news_link, date, descript, img_link):
self.descript = descript
self.title = title
self.date = date
self.img_link = img_link
self.news_link = news_link
@property
def serialize(self):
return {
'id': self.id,
'descript': self.text,
'title': self.title,
'date': self.date,
'img_link': self.img_link,
'news_link': self.news_link,
}
db.create_all()
#################################################
class Bs:
def __init__(self):
self.url = "https://www.npr.org/sections/world/"
def crawl(self, db):
html = requests.get(self.url)
bsobj = BeautifulSoup(html.text, 'lxml')
news = []
titles = []
news_links = []
dates = []
descripts = []
img_links = []
for i in bsobj.find_all('div', class_="item-info-wrap"):
news.append(list(i.children))
for n in news:
titles.append(n[1].find('h2', class_="title").get_text())
news_links.append(list(n[1].find('h2', class_="title").children)[0]['href'])
dates.append(n[1].find('span', class_="date").get_text().replace("\x95", " : "))
descripts.append(n[1].find('p', class_="teaser").get_text().replace("\x95", " : "))
for i in bsobj.find_all('div', class_="item-image"):
img_links.append(list(i.children)[1].find('img')['data-original'])
for title, news_link, date, descript, img_link in zip(titles, news_links, dates, descripts, img_links):
old_news = News.query.filter(News.title.contains(title)).all()
if not old_news:
new_news = News(title=title, news_link=news_link, date=date, descript=descript, img_link=img_link)
db.session.add(new_news)
db.session.commit()
#################################################
stemmer = PorterStemmer()
stopwords = set(stopwords.words("english"))
def prep(txt):
tmp = [stemmer.stem(t) for t in word_tokenize(txt) if t not in stopwords]
tmp = " ".join(tmp)
return tmp
#################################################
b_soup = Bs()
model = joblib.load('my_model.pkl')
vectorize = joblib.load('vectorize.pkl')
loded_vec = CountVectorizer(decode_error="replace", vocabulary=vectorize)
class GetComment(Resource):
def get(self, idx=1):
sql_result = Comment.query.filter_by(news_id=idx).with_entities(Comment.user_name, Comment.comment,
Comment.toxic_num, Comment.pub_date).all()
res = [{'user_name': r[0],
'comment': r[1],
'toxic_num': r[2],
'date': r[3]
} for r in sql_result]
return jsonify(res)
def post(self, idx=1):
data = api.payload
user_name = data['user_name']
comment = data['text']
toxic_num = model.predict(loded_vec.transform([prep(comment)]))
new_comment = Comment(comment=comment, news_id=idx, user_name=user_name, toxic_num=np.asscalar(toxic_num[0]))
db.session.add(new_comment)
db.session.commit()
sql_result = Comment.query.filter_by(news_id=idx).with_entities(Comment.user_name, Comment.comment, Comment.toxic_num, Comment.pub_date).all()
res = [{'user_name': r[0],
'comment': r[1],
'toxic_num': r[2],
'date': r[3]
} for r in sql_result]
return jsonify(res)
class GetNews(Resource):
def get(self):
b_soup.crawl(db)
sql_result = News.query.with_entities(News.news_link, News.img_link, News.date, News.title, News.descript, News.id).all()
res = [{'news_link': r[0],
'img_link': r[1],
'date': r[2],
'title': r[3],
'descript': r[4],
'id': r[5]
} for r in sql_result]
return jsonify(res)
api.add_resource(GetComment, '/api/comment/<idx>', methods=['GET', 'POST'])
api.add_resource(GetNews, '/api/news/', methods=['GET'])
############################################
@app.route('/index')
def hello_world():
return "first page"
if __name__ == '__main__':
app.run(debug=True)
| saminbassiri/news_and_comment_API | app.py | app.py | py | 6,594 | python | en | code | 0 | github-code | 50 |
26210548918 | import logging
from typing import Iterable, List
import torch
import openai
from tqdm import tqdm
from ..formatter import WhisperTimestampsFormatter
from .base import Engine
from .settings import SYMAI_CONFIG
from pathlib import Path
from openai import OpenAI
class TTSEngine(Engine):
def __init__(self):
super().__init__()
config = SYMAI_CONFIG
self.api_key = config['TEXT_TO_SPEECH_ENGINE_API_KEY']
self.model_id = config['TEXT_TO_SPEECH_ENGINE_MODEL']
self.tokens = []
self.text = []
self.client = OpenAI(api_key=self.api_key)
def command(self, wrp_params):
super().command(wrp_params)
if 'TEXT_TO_SPEECH_ENGINE_API_KEY' in wrp_params:
self.api_key = wrp_params['TEXT_TO_SPEECH_ENGINE_API_KEY']
if 'TEXT_TO_SPEECH_ENGINE_MODEL' in wrp_params:
self.model_id = wrp_params['TEXT_TO_SPEECH_ENGINE_MODEL']
def forward(self, **kwargs) -> List[str]:
prompt = str(kwargs['prompt'])
voice = str(kwargs['voice']).lower()
path = str(kwargs['path'])
input_handler = kwargs.get("input_handler")
if input_handler is not None:
input_handler((prompt, voice, path))
rsp = self.client.audio.speech.create(
model=self.model_id,
voice=voice,
input=prompt
)
output_handler = kwargs['output_handler'] if 'output_handler' in kwargs else None
if output_handler:
output_handler(rsp)
metadata = {}
if 'metadata' in kwargs and kwargs['metadata']:
metadata['kwargs'] = kwargs
metadata['input'] = (prompt, voice, path)
metadata['output'] = rsp
metadata['model'] = self.model_id
metadata['voice'] = voice
metadata['path'] = path
rsp.stream_to_file(path)
return [rsp], metadata
def prepare(self, args, kwargs, wrp_params):
assert 'voice' in wrp_params, "TTS requires voice selection."
assert 'path' in wrp_params, "TTS requires path selection."
voice = str(wrp_params['voice'])
audio_file = str(wrp_params['path'])
wrp_params['voice'] = voice
wrp_params['path'] = audio_file
| kpister/prompt-linter | data/scraping/repos/ExtensityAI~symbolicai/symai~backend~engine_text_to_speech.py | symai~backend~engine_text_to_speech.py | py | 2,275 | python | en | code | 0 | github-code | 50 |
33849826084 | # -*- coding: utf-8 -*-
import io
from core.evaluation.labels import Label
from core.source.synonyms import SynonymsCollection
class OpinionCollection:
""" Collection of sentiment opinions between entities
"""
def __init__(self, opinions, synonyms):
assert(isinstance(opinions, list) or isinstance(opinions, type(None)))
assert(isinstance(synonyms, SynonymsCollection))
self.__opinions = [] if opinions is None else opinions
self.__synonyms = synonyms
self.__by_synonyms = self.__create_index()
def __add_synonym(self, value):
if self.__synonyms.IsReadOnly:
raise Exception((u"Failed to add '{}'. Synonym collection is read only!".format(value)).encode('utf-8'))
self.__synonyms.add_synonym(value)
def __create_index(self):
index = {}
for opinion in self.__opinions:
OpinionCollection.__add_opinion(opinion, index, self.__synonyms, check=True)
return index
@classmethod
def from_file(cls, filepath, synonyms):
assert(isinstance(synonyms, SynonymsCollection))
opinions = []
with io.open(filepath, "r", encoding='utf-8') as f:
for i, line in enumerate(f.readlines()):
if line == '\n':
continue
args = line.strip().split(',')
assert(len(args) >= 3)
entity_left = args[0].strip()
entity_right = args[1].strip()
sentiment = Label.from_str(args[2].strip())
o = Opinion(entity_left, entity_right, sentiment)
opinions.append(o)
return cls(opinions, synonyms)
def has_synonymous_opinion(self, opinion, sentiment=None):
assert(isinstance(opinion, Opinion))
assert(sentiment is None or isinstance(sentiment, Label))
if not opinion.has_synonym_for_left(self.__synonyms):
return False
if not opinion.has_synonym_for_right(self.__synonyms):
return False
s_id = opinion.create_synonym_id(self.__synonyms)
if s_id in self.__by_synonyms:
f_o = self.__by_synonyms[s_id]
return True if sentiment is None else f_o.sentiment == sentiment
return False
def get_synonymous_opinion(self, opinion):
assert(isinstance(opinion, Opinion))
s_id = opinion.create_synonym_id(self.__synonyms)
return self.__by_synonyms[s_id]
def add_opinion(self, opinion):
assert(isinstance(opinion, Opinion))
if not opinion.has_synonym_for_left(self.__synonyms):
self.__add_synonym(opinion.value_left)
if not opinion.has_synonym_for_right(self.__synonyms):
self.__add_synonym(opinion.value_right)
self.__add_opinion(opinion, self.__by_synonyms, self.__synonyms)
self.__opinions.append(opinion)
def save(self, filepath):
sorted_ops = sorted(self.__opinions, key=lambda o: o.value_left + o.value_right)
with io.open(filepath, 'w') as f:
for o in sorted_ops:
f.write(o.to_unicode())
f.write(u'\n')
@staticmethod
def __add_opinion(opinion, collection, synonyms, check=True):
key = opinion.create_synonym_id(synonyms)
assert(isinstance(key, unicode))
if check:
if key in collection:
raise Exception(u"'{}->{}' already exists in collection".format(
opinion.value_left, opinion.value_right).encode('utf-8'))
if key in collection:
return False
collection[key] = opinion
return True
def iter_sentiment(self, sentiment):
assert(isinstance(sentiment, Label))
for o in self.__opinions:
if o.sentiment == sentiment:
yield o
def __len__(self):
return len(self.__opinions)
def __iter__(self):
for o in self.__opinions:
yield o
class Opinion:
""" Source opinion description
"""
def __init__(self, value_left, value_right, sentiment):
assert(isinstance(value_left, unicode))
assert(isinstance(value_right, unicode))
assert(isinstance(sentiment, Label))
assert(',' not in value_left)
assert(',' not in value_right)
self.value_left = value_left.lower()
self.value_right = value_right.lower()
self.sentiment = sentiment
def to_unicode(self):
return u"{}, {}, {}, current".format(
self.value_left,
self.value_right,
self.sentiment.to_str())
def create_synonym_id(self, synonyms):
assert(isinstance(synonyms, SynonymsCollection))
return u"{}_{}".format(
synonyms.get_synonym_group_index(self.value_left),
synonyms.get_synonym_group_index(self.value_right))
def has_synonym_for_left(self, synonyms):
assert(isinstance(synonyms, SynonymsCollection))
return synonyms.has_synonym(self.value_left)
def has_synonym_for_right(self, synonyms):
assert(isinstance(synonyms, SynonymsCollection))
return synonyms.has_synonym(self.value_right)
| nicolay-r/attitudes-extraction-ds | core/source/opinion.py | opinion.py | py | 5,195 | python | en | code | 3 | github-code | 50 |
11588513111 | import logging
from tqdm import tqdm
import os
import re
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import codecs
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal, LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
class PDFExtract:
def extractPdfText(self,filePath='',):
results = ''
f = open(filePath, 'rb')
praser = PDFParser(f)
doc = PDFDocument()
praser.set_document(doc)
doc.set_parser(praser)
doc.initialize()
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
else:
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in doc.get_pages():
interpreter.process_page(page)
layout = device.get_result()
for x in layout:
if (isinstance(x, LTTextBoxHorizontal)):
results += x.get_text()
return results
def preprocess(self,data):
sent_list_0 = sent_tokenize(data)
punc = '[\W\d]'
sent_list_1 = []
for sent in sent_list_0:
sent_list_1.append(re.sub(punc, ' ', sent.lower()))
stop = set(stopwords.words('english'))
sent_list_2 = []
for sent in sent_list_1:
sent_list_2.append([w for w in word_tokenize(
sent) if w not in stop and len(w) > 1])
sent_list_3 = []
for sent in sent_list_2:
if len(sent) > 1:
sent_list_3.append(sent)
return sent_list_3
def get_file_list(self,root, ftype=".pdf"):
FileList = []
FileName = []
for dirPath, dirNames, fileNames in os.walk(root):
for f in fileNames:
if f.find(ftype) > -1:
FileList.append(os.path.join(dirPath, f))
FileName.append(f.replace(ftype, ""))
if len(FileList) > 0:
a = zip(FileList, FileName)
a = sorted(a, key=lambda t: t[1])
FileList, FileName = zip(*a)
return list(FileList), list(FileName)
def pdftoText(self,year):
# Taai Domestic track
FilePath, FileName = self.get_file_list('./pdf/'+str(year)+'/0/')
# Taai International track
FilePath_2, FileName_2 = self.get_file_list('./pdf/'+str(year)+'/1/')
# Taai Special Session track
FilePath_3, FileName_3 = self.get_file_list('./pdf/'+str(year)+'/2/')
logging.getLogger("pdfminer").setLevel(logging.ERROR)
for i in tqdm(range(len(FilePath))):
sent_list = self.preprocess(self.extractPdfText(FilePath[i]))
output = codecs.open('./txt/'+str(year)+'/0/' +
FileName[i]+'.txt', 'w', encoding='utf-8')
for sent in sent_list:
output.write(' '.join(sent))
for i in tqdm(range(len(FilePath_2))):
sent_list = self.preprocess(self.extractPdfText(FilePath_2[i]))
output = codecs.open('./txt/'+str(year)+'/1/' +
FileName_2[i]+'.txt', 'w', encoding='utf-8')
for sent in sent_list:
output.write(' '.join(sent))
for i in tqdm(range(len(FilePath_3))):
sent_list = self.preprocess(self.extractPdfText(FilePath_3[i]))
output = codecs.open('./txt/'+str(year)+'/2/' +
FileName_3[i]+'.txt', 'w', encoding='utf-8')
for sent in sent_list:
output.write(' '.join(sent))
| taai-taiwan/academic-search | PDFExtract.py | PDFExtract.py | py | 3,925 | python | en | code | 0 | github-code | 50 |
27773455016 | from keras.preprocessing.image import ImageDataGenerator
def load_images():
image_path='C:/Users/subhankar nath/desktop/neural_network/flowers_classification/flower_photos'
data_gen= ImageDataGenerator(rescale=1.0/255)
data= data_gen.flow_from_directory(image_path, target_size=(64,64), batch_size=32)
return data
| SubhankarNath/neural_network | flowers_utils.py | flowers_utils.py | py | 354 | python | en | code | 0 | github-code | 50 |
3118044744 | test_pangram_positive = [
"The quick brown fox jumps over the lazy dog.",
"Waltz, bad nymph, for quick jigs vex.",
"Glib jocks quiz nymph to vex dwarf.",
"Sphinx of black quartz, judge my vow.",
"How quickly daft jumping zebras vex!",
"The five boxing wizards jump quickly.",
"Jackdaws love my big sphinx of quartz.",
"Pack my box with five dozen liquor jugs."
]
test_pangram_negative = [
"My name is Ashish.",
"I love programming.",
"Am from Delhi and brown fox jumps over the dog."
]
def check_pangram(in_str):
in_str = in_str.lower()
in_str = [x for x in in_str if x.isalpha()]
in_str = set(in_str)
ret_val = False
if(len(in_str) == 26):
ret_val = True
return ret_val
for i in test_pangram_positive:
print(check_pangram(i))
for i in test_pangram_negative:
print(check_pangram(i))
| ashishjain1547/public_lessons_in_python | Ch 7 - Problems on Strings/Level 1/4_Check if given String is Pangram or not/script - using list, sorted, set and join.py | script - using list, sorted, set and join.py | py | 884 | python | en | code | 0 | github-code | 50 |
72649944156 | import numpy as np
from tensorflow import keras
from tensorflow.keras.layers import Embedding, Masking, Input, Bidirectional, LSTM, Dense, Dropout, concatenate
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
from gensim.models import KeyedVectors
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l1_l2
from utils.constants import ROBERTA_MAX_TOKS
from utils.misc import write_txt
from transformers import RobertaTokenizer, RobertaConfig, TFRobertaModel
from utils.transformers_embedding import BCBEmbedder
import os
import re
import tensorflow as tf
def make_model(emb_path,
sentence_length,
meta_shape,
tags,
train_sent,
l1_l2_pen,
n_units,
n_dense,
dropout,
test_sent=None, # not used
ALdir=None, # not used
embeddings=None, # not used
emb_filename = None # not used
):
# instatiate vectorizer
vectorizer = TextVectorization(max_tokens=None, # unlimited vocab size
output_sequence_length=sentence_length,
standardize=None) # this is CRITICAL --
# default will strip '_' and smash multi-word-expressions together
vectorizer.adapt(np.array(train_sent.fillna("")))
cr_embed = KeyedVectors.load(emb_path, mmap='r')
# get the vocabulary from our vectorized text
vocab = vectorizer.get_vocabulary()
# make a dictionary mapping words to their indices
word_index = dict(zip(vocab, range(len(vocab))))
# create an embedding matrix (embeddings mapped to word indices)
# adding 1 ensures that there is a row of zeros in the embedding matrix
# for words in the text that are not in the embeddings vocab
num_words = len(word_index) + 1
embedding_len = 300
embedding_matrix = np.zeros((num_words, embedding_len))
# add embeddings for each word to the matrix
hits = 0
misses = 0
for word, i in word_index.items():
# by default, words not found in embedding index will be all-zeros
if word in cr_embed.wv.vocab:
embedding_matrix[i] = cr_embed.wv.word_vec(word)
hits += 1
else:
misses += 1
print("Converted %s words (%s misses)" % (hits, misses))
# load embeddings matrix into an Embeddings layer
cr_embed_layer = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
embeddings_initializer=keras.initializers.Constant(
embedding_matrix),
trainable=False)
# make the actual model
nlp_input = Input(shape=(sentence_length,), name='nlp_input')
meta_input = Input(shape=(meta_shape,), name='meta_input')
emb = cr_embed_layer(nlp_input)
mask = Masking(mask_value=0.)(emb)
lstm = Bidirectional(LSTM(n_units, kernel_regularizer=l1_l2(l1_l2_pen)))(mask)
for i in range(n_dense):
d = Dense(n_units, activation='relu',
kernel_regularizer=l1_l2(l1_l2_pen))(lstm if i == 0 else drp)
drp = Dropout(dropout)(d)
penultimate = concatenate([drp, meta_input])
out = [Dense(3, activation='softmax', name=t)(penultimate) for t in tags]
model = Model(inputs=[nlp_input, meta_input], outputs=out)
return model, vectorizer
def make_transformers_model(emb_path,
sentence_length,
meta_shape,
tags,
train_sent,
l1_l2_pen,
n_units,
n_dense,
dropout,
ALdir,
embeddings,
test_sent,
emb_filename
):
'''
creates or loads the embeddings, and builds a model off of that
:return: a model, for training. in place of the vectorizer is a dict with embeddings to feed the model
'''
if "final_test" in emb_filename:
# the final test embeddings are the same for all the batches
ALdir = ALdir[:-3] + "01/"
cond1 = os.path.exists(f"{ALdir}processed_data/{embeddings}/{emb_filename}")
cond2 = os.path.exists(f"{ALdir}processed_data/{embeddings}/{re.sub('_tr', '_va', emb_filename)}")
if not cond1 & cond2:
embedding_tokname = re.sub('embeddings', 'antiClobberToken', emb_filename)
write_txt("I am a token. Why must we anthropomorphize everything?",
f"{ALdir}processed_data/{embeddings}/{embedding_tokname}")
emb = BCBEmbedder(model_type = embeddings)
tr = emb(train_sent.tolist())
np.save(f"{ALdir}processed_data/{embeddings}/{emb_filename}", tr)
if test_sent is not None:
va = emb(test_sent.tolist())
np.save(f"{ALdir}processed_data/{embeddings}/{re.sub('_tr', '_va', emb_filename)}", va)
else:
va = None
os.remove(f"{ALdir}processed_data/{embeddings}/{embedding_tokname}")
else:
tr = np.load(f"{ALdir}processed_data/{embeddings}/{emb_filename}")
if test_sent is not None:
va = np.load(f"{ALdir}processed_data/{embeddings}/{re.sub('_tr', '_va', emb_filename)}")
else:
va = None
structured_input = Input(shape=meta_shape, name='inp_str')
embedding_input = Input(shape=768, name='inp_emb')
drp = Dropout(dropout)(embedding_input)
for i in range(n_dense):
d = Dense(n_units, activation='relu',
kernel_regularizer=l1_l2(l1_l2_pen))(drp)
drp = Dropout(dropout)(d)
penultimate = concatenate([drp, structured_input])
out = [Dense(3, activation='softmax', name=t)(penultimate) for t in tags]
model = Model(inputs=[embedding_input, structured_input], outputs=out)
return model, dict(tr = tr, va = va)
def make_roberta_model(meta_shape,
tags,
l1_l2_pen,
n_units,
n_dense,
dropout,
average_embeddings_no_train=True,
emb_path=None,
sentence_length=None,
train_sent=None,
):
'''outputs a tokenizer and a roberta model'''
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
structured_input = Input(shape=meta_shape, name='inp_str')
input_ids = Input(shape=ROBERTA_MAX_TOKS, dtype=tf.int32, name='inp_ids')
attention_mask = Input(shape=ROBERTA_MAX_TOKS, dtype=tf.int32, name='inp_attnmask')
config = RobertaConfig.from_pretrained('roberta-base', output_hidden_states=False)
encoder = TFRobertaModel.from_pretrained('roberta-base', config=config)
if average_embeddings_no_train == True:
embedding = encoder(input_ids,
attention_mask=attention_mask,
)[0][:, 1:, :]
eavg = tf.reduce_mean(embedding, axis=1)
drp = Dropout(dropout)(eavg)
else:
embedding = encoder(input_ids,
attention_mask=attention_mask,
)[0][:, 0, :]
drp = Dropout(dropout)(embedding)
for i in range(n_dense):
d = Dense(n_units, activation='relu',
kernel_regularizer=l1_l2(l1_l2_pen))(drp)
drp = Dropout(dropout)(d)
penultimate = concatenate([drp, structured_input])
out = [Dense(3, activation='softmax', name=t)(penultimate) for t in tags]
model = Model(inputs=[input_ids, attention_mask, structured_input], outputs=out)
if average_embeddings_no_train == True:
for n, l in enumerate(model.layers):
if 'bert' in l.name:
model.layers[n].trainable = False
return model, tokenizer
if __name__ == "__main__":
pass
| cranedroesch/frailtyclassifier | utils/prefit.py | prefit.py | py | 8,093 | python | en | code | 0 | github-code | 50 |
28640562996 | def is_int_num(num):
try:
# result = int(num)
int(num)
# return True
except Exception as e:
return False
else:
return True
if __name__ == '__main__':
one_num = input("请输入一个整数: ")
print(is_int_num(one_num))
| shiqi0128/My_scripts | python_study/Py28_0410_file_exception/lm_08_examples.py | lm_08_examples.py | py | 282 | python | en | code | 0 | github-code | 50 |
3513243398 | import tkinter as tk
from board import *
from game import Game
class CheckersBoard:
def __init__(self, master, array):
self.master = master
self.canvas = tk.Canvas(master,width=400, height=400)
self.canvas.pack()
self.square_size = 50
self.colors = {
"dark": "#2C3E50",
"light": "#EAEAEA"
}
self.selected_piece = None
self.canvas.bind("<Button-1>", self.on_square_clicked)
self.array = array
# draw the checkerboard
for row in range(8):
for col in range(8):
if (row+col) % 2 == 0:
self.canvas.create_rectangle(col*50, row*50, (col+1)*50, (row+1)*50, fill=self.colors['light'])
else:
self.canvas.create_rectangle(col*50, row*50, (col+1)*50, (row+1)*50, fill=self.colors['dark'])
def update_board(self, array):
self.canvas.delete('piece') # clear any previous pieces on the board
for row in range(8):
for col in range(8):
if array[row][col] != 0:
if array[row][col].color == 'red':
self.canvas.create_oval(col*50+10, row*50+10, (col+1)*50-10, (row+1)*50-10, fill='red', tags='piece')
if array[row][col].king == True:
self.canvas.create_oval(col*50+10, row*50+10, (col+1)*50-10, (row+1)*50-10, fill='red', tags='piece')
self.canvas.create_arc(col*50+10, row*50+10, (col+1)*50-10, (row+1)*50-10, start=90, extent=180, style=tk.ARC, width=4, outline='yellow', tags='piece')
self.canvas.create_arc(col*50+10, row*50+10, (col+1)*50-10, (row+1)*50-10, start=270, extent=180, style=tk.ARC, width=4, outline='yellow', tags='piece')
elif array[row][col].color == 'white':
self.canvas.create_oval(col*50+10, row*50+10, (col+1)*50-10, (row+1)*50-10, fill='white', tags='piece')
if array[row][col].king == True:
self.canvas.create_oval(col*50+10, row*50+10, (col+1)*50-10, (row+1)*50-10, fill='white', tags='piece')
self.canvas.create_arc(col*50+10, row*50+10, (col+1)*50-10, (row+1)*50-10, start=90, extent=180, style=tk.ARC, width=4, outline='yellow', tags='piece')
self.canvas.create_arc(col*50+10, row*50+10, (col+1)*50-10, (row+1)*50-10, start=270, extent=180, style=tk.ARC, width=4, outline='yellow', tags='piece')
def on_square_clicked(self, event): #selects and tells location of piece
self.row = event.y // self.square_size
self.col = event.x // self.square_size
piece = self.array[self.row][self.col]
print(f"you have selected the piece at ({self.row},{self.col})")
# root = tk.Tk()
# array = [
# ['white', 0, 'white', 0, 'white', 0, 'white', 0],
# [0, 'white', 0, 'white', 0, 'white', 0, 'white'],
# ['white', 0, 'white', 0, 'white', 0, 'white', 0],
# [0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0],
# [0, 'red', 0, 'red', 0, 'red', 0, 'red'],
# ['red', 0, 'red', 0, 'red', 0, 'red', 0],
# [0, 'red', 0, 'red', 0, 'red', 0, 'red']
# ]
# board = CheckersBoard(root, array)
# board.update_board(array)
# root.mainloop()
| daniasalman63/CIProject_checkers | mycheckers(with_compulsory_jump_move)/GUI.py | GUI.py | py | 3,424 | python | en | code | 0 | github-code | 50 |
36719698309 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'adison'
# @Time : 2017/12/18
import random
import datetime
from functools import reduce
from app.models import db, User, Article, Source, Category, Tag, Comment, Role, Permission
from .helpers import get_category_ids
from ..ext import redis
from config import Config
def utility_processor():
"""自定义模板处理器"""
def archives():
"""
返回从第一篇文章开始到现在所经历的月份列表
"""
archives = redis.lrange(Config.CACHE_KEY % "archives", 0, -1)
if not archives:
begin_post = Article.query.order_by('created').first()
now = datetime.datetime.now()
begin_s = begin_post.created if begin_post else now
end_s = now
begin = begin_s
end = end_s
total = (end.year - begin.year) * 12 - begin.month + end.month
archives = [begin]
date = begin
for i in range(total):
if date.month < 12:
date = datetime.datetime(date.year, date.month + 1, 1)
else:
date = datetime.datetime(date.year + 1, 1, 1)
archives.append(date)
archives.reverse()
for archive in archives:
redis.lpush(Config.CACHE_KEY % "archives", archive)
return archives
def category_child_lists(parent=None):
"""
返回栏目列表
:param parent:
父级栏目,`None`或者`Category`实例
"""
if parent is None:
return
return Category.query.filter_by(parent=parent).order_by(Category.order).all()
def category_lists():
"""
返回栏目列表
"""
_query = Category.query.filter(Category.parent_id.is_(None)).order_by(Category.order)
return _query.all()
def category_lists_count():
"""
返回栏目列表+数量
"""
cate_list = Category.query.all()
return [{"category": cate, "count": cate.articles.count()} for cate in cate_list]
def tag_lists(limit=None):
"""
返回标签列表
:param limit:
返回的个数,`None`或者正整数
"""
_query = Tag.query
if isinstance(limit, int):
_query = _query.limit(limit)
return _query.all()
def get_related_articles(article_id, limit=10):
"""
返回指定文章的相关文章列表
根据Tag来筛选
:param article_id:
文章ID, 正整数
:param limit:
返回的个数, 正整数,默认为10
"""
# 获取与本文章标签相同的所有文章ID
article = Article.query.get(article_id)
if article:
ids = db.session.query('article_id') \
.from_statement('SELECT article_id FROM '
'article_tags WHERE tag_id IN '
'(SELECT tag_id FROM article_tags '
'WHERE article_id=:article_id)') \
.params(article_id=article_id).all()
article_ids = [_id[0] for _id in ids]
article_ids = list(set(article_ids))
if article_id in article_ids:
article_ids.remove(article_id)
random_ids = random.sample(article_ids, min(limit, len(article_ids)))
if article_ids:
return Article.query.public().filter(Article.id.in_(random_ids)).all()
return None
def get_latest_articles(limit=10):
"""
返回最新文章列表
:param limit:
返回的个数,正整数,默认为10
"""
_query = Article.query.public()
return _query.limit(int(limit)).all()
def get_top_articles(days=365, limit=10):
"""
返回热门文章列表
:param days:
天数的范围,比如:一周7天,一个月30天。默认为一年
:param limit:
返回的个数,正整数,默认为10
"""
criteria = []
_start = datetime.date.today() - datetime.timedelta(days)
criteria.append(Article.created >= _start)
q = reduce(db.and_, criteria)
return Article.query.public().filter(q) \
.order_by(Article.hits.desc()) \
.limit(int(limit)).all()
def get_articles_by_category(category_id=0, limit=10):
"""
根据栏目路径返回文章列表
:param category_id:
栏目id,正整数
:param limit:
返回的个数,整数
"""
_query = Article.query.public()
category = Category.query.filter_by(category_id=category_id).first()
if category:
_query = _query.filter_by(category_id=category.id)
return _query.limit(int(limit)).all()
return dict(
Article=Article,
Category=Category,
Tag=Tag,
archives=archives,
get_category_ids=get_category_ids,
get_latest_articles=get_latest_articles,
get_top_articles=get_top_articles,
get_related_articles=get_related_articles,
get_articles_by_category=get_articles_by_category,
category_lists=category_lists,
category_lists_count=category_lists_count,
category_child_lists=category_child_lists,
tag_lists=tag_lists,
)
| adisonhuang/flask-blog | app/utils/processors.py | processors.py | py | 5,516 | python | en | code | 100 | github-code | 50 |
7745509253 | #!/usr/bin/env python
import sys
last_key = None
last_value = 0
final_key = None
final_value = 0
this_value = 0
for input_line in sys.stdin:
input_line = input_line.strip()
this_key, value = input_line.split("\t", 1)
value = int(value)
if last_key == this_key:
this_value += value
else:
last_value = this_value
last_key = this_key
this_value = value
if last_value > final_value:
final_key = last_key
final_value = last_value
print( "%s\t%d" % (final_key, final_value) ) | ydeng003/High-Perfomance-Computing-Programming | Hadoop Map Reduce/Programming1/reducer.py | reducer.py | py | 574 | python | en | code | 0 | github-code | 50 |
33419576755 | from random import randint, choice
import typing
from discord import Embed
from discord.ext import commands
COLOR = 0xff9933
class Roll(commands.Cog):
"""Introduces some different kinds of random chance."""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def flip(self, ctx):
"""Flip a coin."""
await ctx.channel.send(embed=Embed(
title="Flipped a coin for {}:".format(ctx.author.display_name),
description="It came up {}!".format(choice(["heads", "tails"])),
color=COLOR))
@commands.command()
# , input_str: typing.Optional[str]):
async def roll(self, ctx, maximum: int = 6):
"""Roll some dice."""
await ctx.channel.send(embed=Embed(
title="Rolled a {}-sider for {}:".format(
maximum, ctx.author.display_name
),
description="It was a {}!".format(randint(1, maximum)),
color=COLOR))
# bits = []
# while any([v in input_str for v in ["+", "-"]]):
# bit, add, input_str = input_str.partition(input_str[index])
@commands.command()
async def choose(self, ctx, *, input_str):
"""Choose from a list."""
options = input_str.split(sep="/")
await ctx.channel.send(embed=Embed(
title="Made a choice for {}:".format(ctx.author.display_name),
description="From {} options: {}".format(
len(options),
choice(options)),
color=COLOR))
def setup(bot):
bot.add_cog(Roll(bot))
| SeeWhatSticks/stick_bot | extensions/Roll.py | Roll.py | py | 1,585 | python | en | code | 0 | github-code | 50 |
21955506801 | import subprocess
import boto3
import click
import os
import webbrowser
from frontend import create_frontend_html_file
@click.command()
@click.option('--bucket-name',
default='localhost',
help='Specify existing S3 bucket to upload to (defaults to localhost)')
def deploy(bucket_name: str):
if bucket_name == 'localhost':
localhost_base_url = 'http://localhost:3000'
with open('index.html', 'w') as html_file:
html_file.write(create_frontend_html_file(f'{localhost_base_url}/fib/'))
subprocess.Popen('sam local start-api'.split(), stdout=subprocess.PIPE)
webbrowser.open(f'file://{os.getcwd()}/index.html', new=2)
return
_deploy_to_aws_frankfurt(bucket_name)
def _deploy_to_aws_frankfurt(bucket_name: str):
package_command = f'sam package --output-template-file packaged.yaml --s3-bucket {bucket_name}'
deploy_command = 'sam deploy --template-file packaged.yaml \
--stack-name ashwin-app \
--capabilities CAPABILITY_IAM \
--region eu-central-1'
print('Packaging application...')
subprocess.Popen(package_command.split(), stdout=subprocess.PIPE).wait()
print('Deploying application (this could take a few minutes)...')
subprocess.Popen(deploy_command.split(), stdout=subprocess.PIPE).wait()
_deploy_frontend_to_s3_bucket(bucket_name)
url = f'http://{bucket_name}.s3-website.eu-central-1.amazonaws.com/'
print(f'Deployed application at: {url}')
webbrowser.open(url, new=2)
def _deploy_frontend_to_s3_bucket(bucket_name: str):
s3 = boto3.resource('s3')
with open('index.html', 'w') as html_file:
fib_url, factorial_url, ackermann_url = get_api_urls()
html_file.write(create_frontend_html_file(factorial_url))
website_configuration = {
'ErrorDocument': {'Key': 'error.html'},
'IndexDocument': {'Suffix': 'index.html'},
}
s3.meta.client.upload_file('index.html', bucket_name, 'index.html', ExtraArgs={'ContentType': 'text/html'})
boto3.client('s3').put_bucket_website(Bucket=bucket_name, WebsiteConfiguration=website_configuration)
def get_api_urls():
client = boto3.client('cloudformation')
for outputkey_dict in client.describe_stacks(StackName='ashwin-app')['Stacks'][0]['Outputs']:
if outputkey_dict['OutputKey'] == 'FactorialApi':
return outputkey_dict['OutputValue']
if __name__ == '__main__':
deploy()
| ashwinkumar01/aws_sam_simple_app | manage.py | manage.py | py | 2,504 | python | en | code | 0 | github-code | 50 |
33784838407 | import mmcv
import time
import torch
import warnings
from mmcv.runner.builder import RUNNERS
from mmcv.runner.iter_based_runner import IterBasedRunner, IterLoader
from mmcv.runner.utils import get_host_info
@RUNNERS.register_module()
class IterBasedSSLRunner(IterBasedRunner):
def train(self, lab_data_loader, unlab_data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = lab_data_loader
# TODO: I don't know if the above causes hidden problems
self._epoch = lab_data_loader.epoch
lab_data_batch = next(lab_data_loader)
unlab_data_batch = next(unlab_data_loader)
data_batch = dict()
data_batch.update(
{f'lab_{k}': v for k, v in lab_data_batch.items()})
data_batch.update(
{f'unlab_{k}': v for k, v in unlab_data_batch.items()})
data_batch['img_metas'] = lab_data_batch['img_metas']
# print(data_batch)
self.call_hook('before_train_iter')
outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('model.train_step() must return a dict')
if 'log_vars' in outputs:
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
self.call_hook('after_train_iter')
self._inner_iter += 1
self._iter += 1
@torch.no_grad()
def val(self, data_loader, **kwargs):
torch.cuda.empty_cache() # only thing different here
self.model.eval()
self.mode = 'val'
self.data_loader = data_loader
data_batch = next(data_loader)
self.call_hook('before_val_iter')
outputs = self.model.val_step(data_batch, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('model.val_step() must return a dict')
if 'log_vars' in outputs:
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
self.call_hook('after_val_iter')
self._inner_iter += 1
def run(self, data_loaders, workflow, max_iters=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, iters) to specify the
running order and iterations. E.g, [('train', 10000),
('val', 1000)] means running 10000 iterations for training and
1000 iterations for validation, iteratively.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(workflow) == 1
assert len(data_loaders) == 2 # Labeled & Unlabeled
lab_data_loader, unlab_data_loader = data_loaders
lab_iter_loader = IterLoader(lab_data_loader)
unlab_iter_loader = IterLoader(unlab_data_loader)
if max_iters is not None:
warnings.warn(
'setting max_iters in run is deprecated, '
'please set max_iters in runner_config', DeprecationWarning)
self._max_iters = max_iters
assert self._max_iters is not None, (
'max_iters must be specified during instantiation')
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('Hooks will be executed in the following order:\n%s',
self.get_hook_info())
self.logger.info('workflow: %s, max: %d iters', workflow,
self._max_iters)
self.call_hook('before_run')
self.call_hook('before_epoch')
while self.iter < self._max_iters:
for i, flow in enumerate(workflow):
self._inner_iter = 0
mode, iters = flow
if not isinstance(mode, str) or not hasattr(self, mode):
raise ValueError(
'runner has no method named "{}" to run a workflow'.
format(mode))
iter_runner = getattr(self, mode)
for _ in range(iters):
assert mode == 'train'
if mode == 'train' and self.iter >= self._max_iters:
break
iter_runner(lab_iter_loader, unlab_iter_loader, **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_epoch')
self.call_hook('after_run') | Divadi/DetMatch | mmdet3d/core/runner/iter_based_ssl_runner.py | iter_based_ssl_runner.py | py | 4,696 | python | en | code | 32 | github-code | 50 |
20922587230 | import gzip
import io
import lz4.frame
import struct
import sys
from .event import Event
import proio.proto as proto
from .writer import magic_bytes
class Reader(object):
"""
Reader for proio files
This class can be used with the `with` statement, and it also may be used
as an iterator that sequentially iterates all events. A filename may be
omitted in favor of specifying `fileobj`.
:param string filename: name of input file to read
:param fileobj: file object to read from
:example:
.. code-block:: python
with proio.Reader('input.proio') as reader:
for event in reader:
...
"""
def __init__(self, filename = None, fileobj = None):
if filename is None:
if fileobj is not None:
self._stream_reader = fileobj
else:
self._stream_reader = io.BytesIO(b'')
else:
self._stream_reader = open(filename, 'rb')
self._close_file = True
self._bucket_reader = io.BytesIO(b'')
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def __iter__(self):
return self
def __next__(self):
"""
:return: the next event
:rtype: Event
"""
event = self._read_from_bucket(True)
if event is None:
raise StopIteration
return event
if sys.version_info[0] == 2:
def next(self):
return self.__next__()
def close(self):
"""
closes the underlying input file object.
"""
try:
if self._close_file:
self._stream_reader.close()
except:
pass
def next_header(self):
"""
returns the next event header. This is useful for scanning the
stream/file.
:return: the next event header
:rtype: Event
"""
self._read_bucket(sys.maxsize)
return self._bucket_header
def skip(self, n_events):
"""
skips the next `n_events` events.
:param int n_events: number of events to skip
:return: number of events skipped
:rtype: int
"""
try:
bucket_evts_left = self._bucket_header.nEvents - self._bucket_evts_read
except AttributeError:
bucket_evts_left = 0
n_skipped = 0
if n_events > bucket_evts_left:
n_skipped += bucket_evts_left
while True:
n = self._read_bucket(n_events - n_skipped)
if n == 0:
break
n_skipped += n
while n_skipped < n_events:
if self._read_from_bucket(False) == True:
n_skipped += 1
else:
break
return n_skipped
def seek_to_start(self):
"""
seeks, if possible, to the start of the input file object. This can be
used along with :func:`skip` to directly access events.
"""
if self._stream_reader.seekable():
self._stream_reader.seek(0, 0)
self._bucket_reader = io.BytesIO(b'')
self._bucket_header = None
self._bucket_evts_read = 0
def _read_from_bucket(self, do_unmarshal = True):
proto_size_buf = self._bucket_reader.read(4)
if len(proto_size_buf) != 4:
self._read_bucket()
proto_size_buf = self._bucket_reader.read(4)
if len(proto_size_buf) != 4:
return
proto_size = struct.unpack("I", proto_size_buf)[0]
proto_buf = self._bucket_reader.read(proto_size)
if len(proto_buf) != proto_size:
return
self._bucket_evts_read += 1
if do_unmarshal:
event_proto = proto.Event.FromString(proto_buf)
return Event(proto_obj = event_proto)
return True
def _read_bucket(self, max_skip_events = 0):
self._bucket_evts_read = 0
events_skipped = 0
self._bucket_header = None
n = self._sync_to_magic()
if n < len(magic_bytes):
return events_skipped
header_size = struct.unpack("I", self._stream_reader.read(4))[0]
header_string = self._stream_reader.read(header_size)
if len(header_string) != header_size:
return events_skipped
self._bucket_header = proto.BucketHeader.FromString(header_string)
if self._bucket_header.nEvents > max_skip_events:
bucket = self._stream_reader.read(self._bucket_header.bucketSize)
else:
self._bucket_reader = io.BytesIO(b'')
events_skipped = self._bucket_header.nEvents
try:
self._stream_reader.seek(self._bucket_header.bucketSize, 1)
except OSError:
self._stream_reader.read(self._bucket_header.bucketSize)
return events_skipped
if len(bucket) != self._bucket_header.bucketSize:
return events_skipped
if self._bucket_header.compression == proto.BucketHeader.GZIP:
self._bucket_reader = gzip.GzipFile(fileobj = io.BytesIO(bucket), mode = 'rb')
elif self._bucket_header.compression == proto.BucketHeader.LZ4:
try:
uncomp_bytes, _ = lz4.frame.decompress(bucket)
except ValueError:
uncomp_bytes = lz4.frame.decompress(bucket)
self._bucket_reader = io.BytesIO(uncomp_bytes)
else:
self._bucket_reader = io.BytesIO(bucket)
return events_skipped
def _sync_to_magic(self):
n_read = 0
while True:
magic_byte = self._stream_reader.read(1)
if len(magic_byte) != 1:
return -1
n_read += 1
if magic_byte == magic_bytes[0]:
goodSeq = True
for i in range(1, len(magic_bytes)):
magic_byte = self._stream_reader.read(1)
if len(magic_byte) != 1:
return -1
n_read += 1
if magic_byte != magic_bytes[i]:
goodSeq = False
break
if goodSeq:
break
return n_read
| decibelcooper/proio | py-proio/proio/reader.py | reader.py | py | 6,390 | python | en | code | 2 | github-code | 50 |
28590851793 | # Instructions
# You are going to write a program that tests the compatibility between two people.
# To work out the love score between two people:
# variables
t_true_count = 0
r_true_count = 0
u_true_count = 0
e_true_count = 0
l_love_count = 0
o_love_count = 0
v_love_count = 0
e_love_count = 0
true_total_count = 0
love_total_count = 0
love_digit = ""
# Take both people's names and check for the number of times the letters in the word TRUE occurs.
name_person1 = input("Please enter your name --->").lower()
name_person2 = input("Please enter your SO's name --->").lower()
# Combining strings
combined_name = name_person1 + name_person2
# Then check for the number of times the letters in the word TRUE LOVE occurs.
t_true_count = combined_name.count("t")
r_true_count = combined_name.count("r")
u_true_count = combined_name.count("u")
e_true_count = combined_name.count("e")
l_love_count = combined_name.count("l")
o_love_count = combined_name.count("o")
v_love_count = combined_name.count("v")
e_love_count = combined_name.count("e")
true_total_count = t_true_count + r_true_count + u_true_count + e_true_count
love_total_count = l_love_count + o_love_count + v_love_count + e_love_count
# print("TRUE -----------------")
# print(t_true_count)
# print(r_true_count)
# print(u_true_count)
# print(e_true_count)
# print(" LOVE-----------------")
# print(l_love_count)
# print(o_love_count)
# print(v_love_count)
# print(e_love_count)
# print("TOTAL-----------------")
# print(true_total_count)
# print(love_total_count)
# Then combine these numbers to make a 2 digit number.
love_digit = str(true_total_count) + str(love_total_count)
love_digit = int(love_digit)
# print(type(love_digit))
# print(love_digit)
# For Love Scores less than 10 or greater than 90, the message should be:
# "Your score is **x**, you go together like coke and mentos."
if love_digit < 10 or love_digit > 90:
print(f"Your score is {love_digit}, you go together like coke and mentos")
# For Love Scores between 40 and 50, the message should be:
# "Your score is **y**, you are alright together."
elif love_digit > 40 and love_digit < 50:
print(f"Your score is {love_digit}, you are alright together.")
# Otherwise, the message will just be their score. e.g.:
# "Your score is **z**."
else:
print(f"Your score is {love_digit}") | robsdata/100daysofcode_python-2023 | day_01-10/day-3/love-calculator.py | love-calculator.py | py | 2,340 | python | en | code | 2 | github-code | 50 |
74283401435 | from django.shortcuts import render
from.models import About,Skills,Edeucation,Experience
# Create your views here.
def home(request):
about=About.objects.last()
coding_skills=Skills.objects.filter(type='Coding')
design_skills=Skills.objects.filter(type='Design')
edeucation=Edeucation.objects.all()
experience=Experience.objects.all()
return render(request,'home.html',{
'about':about,
'coding_skills':coding_skills,
'design_skills':design_skills,
'edeucation':edeucation,
'experience':experience
})
| Hammuda007/django-blog | about/views.py | views.py | py | 582 | python | en | code | 2 | github-code | 50 |
25156324356 | import matplotlib.pyplot as plt
import ai
from ai.examples.diffusion.model import DiffusionMLP
EVAL_BS = 1000
def run(outpath, device='cpu', n_steps=5000, train_bs=32, sample_interval=500):
ds = ai.data.toy.moons(n=8000, include_labels=False, mult=2.)
model = DiffusionMLP(2).init().to(device)
opt = ai.opt.AdamW(model, lr=1e-3, grad_clip=True)
trial = ai.Trial(
outpath,
clean=True,
sampler=_save_samples,
sample_interval=sample_interval,
)
ai.Trainer(
ai.train.Diffusion(),
ds.iterator(train_bs, device, train=True),
).train(model, opt, trial.hook(), steplimit=n_steps)
def _save_samples(dir, step, model):
dir = dir / str(step)
dir.mkdir(parents=True, exist_ok=True)
for i, frame in enumerate(model.sample(EVAL_BS, frame_rate=10)):
plt.figure(figsize=(10, 10))
plt.scatter(frame[:, 0], frame[:, 1])
plt.xlim(-6, 6)
plt.ylim(-6, 6)
plt.savefig(dir / f'{i:04}.png')
plt.close()
if __name__ == '__main__':
ai.run(run)
| calvinpelletier/ai | examples/diffusion/main.py | main.py | py | 1,071 | python | en | code | 0 | github-code | 50 |
36488985083 | def roman_to_integer(roman):
roman_numerals = {
'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000
}
result = 0
prev_value = 0
for numeral in reversed(roman):
value = roman_numerals[numeral]
if value < prev_value:
result -= value
else:
result += value
prev_value = value
return result
roman_numeral = "IX"
integer_value = roman_to_integer(roman_numeral)
print(integer_value)
| Leelamanikanta01/assigment1_python | Assignment.py | Assignment.py | py | 481 | python | en | code | 0 | github-code | 50 |
74459541275 | from flask_restful import Resource
from flask_jwt import jwt_required
from models.request import RequestModel
from models.client import ClientModel
from models.parser import Parser
class Request(Resource):
"""Request endpoint for url/request"""
@jwt_required()
def post(self):
"""Post endpoint for adding requests into the database.
Headers: {Authorization: JWT jwt_token, ContentType: application/json}
Body must be json with priority, target_date, product_area, client_name,
title, description fields. Database must have matching client_name or
will return json message 400 error. If error occurs while inserting into
database will return json message and 500 error. On successful insert
into database returns json of request and 201 code."""
parser = Parser()
parser.required_fields(
'priority',
'target_date',
'product_area',
'client_name',
'title',
'description')
data = parser.parse_args()
if not ClientModel.select(data['client_name']):
return {
'message': 'Client does not exist'}, 400
request = RequestModel(**data)
client_name = data['client_name']
priority = data['priority']
update_list = []
try:
while RequestModel.select_same_priority(client_name, priority):
update_list.append(
RequestModel.select_same_priority(
client_name, priority))
priority += 1
request.save_to_db()
for req in update_list:
req.priority += 1
req.save_to_db()
except BaseException:
return {'message': 'Something went wrong'}, 500
return request.json(), 201
class RequestID(Resource):
"""Request endpoint for url/request/<int:request_id>"""
@jwt_required()
def get(self, request_id):
"""Get endpoint for request with matching id in the database.
Headers: {Authorization: JWT jwt_token, ContentType: application/json}
If no request with ID found will return json message 404 error. If found
returns json of request with matching ID and 200 code."""
request = RequestModel.select_by_id(request_id)
if request:
return request.json(), 200
return {'message': 'Request not found'}, 404
@jwt_required()
def delete(self, request_id):
"""Get endpoint for request with matching id in the database.
Headers: {Authorization: JWT jwt_token, ContentType: application/json}
If no request with ID found will return json message and 404 error. If
found and deleted returns 200 code."""
request = RequestModel.select_by_id(request_id)
if request:
request.delete_from_db()
return {'message': 'Request deleted'}, 200
return {'message': 'Request not found'}, 404
class RequestList(Resource):
"""Request endpoint for url/requests"""
@jwt_required()
def get(self):
"""Get endpoint for all requests.
Headers: {Authorization: JWT jwt_token, ContentType: application/json}
Returns json object of all requests and 200 code."""
return {'requests': [request.json()
for request in RequestModel.find_all()]}, 200
| Connor13C/BriteCore_Demo | resources/request.py | request.py | py | 3,431 | python | en | code | 0 | github-code | 50 |
13505764959 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 14:23:31 2020
Script to create directory structure for any project.
@author: rt2
"""
import os
dirs = ["input", "src", "models", "notebooks"]
def main():
for dirName in dirs:
try:
# Create target Directory
os.makedirs(dirName)
print("Directory " , dirName , " Created ")
except FileExistsError:
print("Directory " , dirName , " already exists")
if __name__ == "__main__":
main()
| rahul-trip/DataSc | supporting_scripts/create_dir_struct.py | create_dir_struct.py | py | 511 | python | en | code | 0 | github-code | 50 |
14085193648 | import requests
b = 1
while b == 1:
a = input()
a = str(a)
c = requests.get(
"https://zhuan-ti-hou-duan.onrender.com/bookTouchShelf",
params={"rfid": a, "touchShelf": "A1"},
)
print(c)
print(a)
| kw404/rfid_bookshelf | RFID_code/import requests.py | import requests.py | py | 235 | python | en | code | 0 | github-code | 50 |
5003032733 | from browser import html, window
from typing import Callable, Literal
ACCENTS = Literal['primary', 'secondary', 'success', 'danger', 'warning', 'info', 'light', 'dark', 'link']
########################################################################
class MDCObject():
""""""
# ----------------------------------------------------------------------
def __init__(self, view, mdc, element, name='MDCObject'):
"""Constructor"""
self.__mdc__ = mdc
self.__view__ = view
self.__element__ = element
# ----------------------------------------------------------------------
def __getattr__(self, attr):
""""""
# check first for existence in view model, the method in view model can use MDC call
if hasattr(self.__view__, attr):
def inset(*args, **kwargs):
# must be a classmethod
return getattr(self.__view__, attr)(self.__element__, *args, **kwargs)
return inset
# else, define in the view models then look up in MDC (foundation is not defined here)
elif hasattr(self.__mdc__, attr):
return getattr(self.__mdc__, attr)
# ----------------------------------------------------------------------
def __getitem__(self, item):
""""""
# views can define and use a __getitem__ as a shortcut
return self.__view__[item]
########################################################################
class Base:
""""""
NAME = None
CSS_classes = {}
MDC_optionals = {}
# ----------------------------------------------------------------------
@classmethod
def render(cls, locals_, kwargs, attach_now=True):
""""""
context = locals_.copy()
if 'self' in context:
context.pop('self')
if 'kwargs' in context:
context.pop('kwargs')
context.update(**kwargs)
cls.html_element = cls.__html__(**context)
if attach_now:
cls.attach()
if 'Class' in kwargs:
if cls.html_element.class_name:
cls.html_element.class_name += ' {Class}'.format(
**kwargs)
else:
cls.html_element.class_name = '{Class}'.format(**kwargs)
if 'id' in kwargs:
cls.html_element.attrs['id'] = kwargs['id']
else:
cls.ID = cls.new_id()
kwargs['id'] = cls.ID
if 'style' in kwargs:
if kwargs['style']:
cls.html_element.style = kwargs['style']
if 'attr' in kwargs:
for attr in kwargs['attr']:
cls.html_element.attrs[attr] = kwargs['attr'][attr]
for arg in kwargs:
if arg.startswith('on_'):
# print(f"{arg.replace('on_', '')}, {kwargs[arg]}")
cls.html_element.addEventListener(arg.replace('on_', ''), kwargs[arg])
return cls.html_element
# ----------------------------------------------------------------------
@classmethod
def render_html(cls, code, context):
""""""
classes = []
for key in cls.CSS_classes.keys():
if context.get(key, False):
classes.append(cls.CSS_classes[key])
context['CSS_classes'] = ' '.join(classes)
for key in cls.MDC_optionals.keys():
if context.get(key, False):
try:
# context[key] = cls.MDC_optionals[key].format(**context)
context[key] = eval(f"""f'''{cls.MDC_optionals[key]}'''""", context)
# context[key] = eval(f'f"{cls.MDC_optionals[key]}"', context)
except:
context[key] = cls.MDC_optionals[key]
else:
context[key] = ''
# code = code.format(**context)
code = eval(f"""f'''{code}'''""", context)
return cls.render_str(code)
# ----------------------------------------------------------------------
@classmethod
def render_str(cls, code):
""""""
return html.DIV(code.strip()).children[0]
# ----------------------------------------------------------------------
@classmethod
def bind(self, element, evt, callback: Callable):
""""""
element.addEventListener(evt, callback)
# ----------------------------------------------------------------------
@classmethod
def attach(cls):
""""""
if cls.NAME is None:
cls.html_element.bs = MDCObject(cls, None, cls.html_element)
cls.bs = cls.html_element.bs
return
cls.html_element.bs = MDCObject(cls, None, cls.html_element)
cls.bs = cls.html_element.bs
cls.bs = getattr(window.bootstrap, cls.NAME).new(cls.html_element)
cls.html_element.bs = MDCObject(cls, cls.bs, cls.html_element)
# ----------------------------------------------------------------------
@classmethod
def new_id(cls):
""""""
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
import random
return ''.join([random.choice(ascii_lowercase) for l in range(2**5)])
# ----------------------------------------------------------------------
@classmethod
def get_id(cls, element=None):
""""""
return cls.ID
| UN-GCPDS/radiant-framework | radiant/static/modules/brython/bootstrap/base.py | base.py | py | 5,345 | python | en | code | 5 | github-code | 50 |
28477546383 | import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import glob
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score
import random
import cv2
import sys
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from PIL import Image
tumor = []
healthy = []
for f in glob.iglob("./data/brain_tumor_dataset/yes/*.png"):
img = cv2.imread(f)
img = cv2.resize(img,(128,128))
b, g, r = cv2.split(img)
img = cv2.merge([r,g,b])
tumor.append(img)
for f in glob.iglob("./data/brain_tumor_dataset/no/*.png"):
img = cv2.imread(f)
img = cv2.resize(img,(128,128))
b, g, r = cv2.split(img)
img = cv2.merge([r,g,b])
healthy.append(img)
healthy = np.array(healthy)
tumor = np.array(tumor)
All = np.concatenate((healthy, tumor))
#print(healthy.shape)
#print(tumor.shape)
#print(np.random.choice(10, 5, replace=False))
def plot_random(healthy, tumor, num=5):
healthy_imgs = healthy[np.random.choice(healthy.shape[0], num, replace=False)]
tumor_imgs = tumor[np.random.choice(tumor.shape[0], num, replace=False)]
plt.figure(figsize=(16,9))
for i in range(num):
plt.subplot(1, num, i+1)
plt.title('healthy')
plt.imshow(healthy_imgs[i])
#plt.show()
plt.figure(figsize=(16,9))
for i in range(num):
plt.subplot(1, num, i+1)
plt.title('tumor')
plt.imshow(tumor_imgs[i])
#plt.show()
#plot_random(healthy, tumor, num=5)
#first half of training model
class MRIDataset(Dataset):
def __init__(self, healthy_data, tumor_data):
self.healthy_data = healthy_data
self.tumor_data = tumor_data
def __len__(self):
return len(self.healthy_data) + len(self.tumor_data)
def __getitem__(self, idx):
if idx < len(self.healthy_data):
img = self.healthy_data[idx]
label = 0 # 0 for healthy
else:
img = self.tumor_data[idx - len(self.healthy_data)]
label = 1 # 1 for tumor
# Convert image to tensor
img = torch.tensor(img, dtype=torch.float32).permute(2, 0, 1) / 255.0
return img, label
# Split data into train and validation sets
train_size = 0.8
train_healthy = healthy[:int(len(healthy) * train_size)]
train_tumor = tumor[:int(len(tumor) * train_size)]
val_healthy = healthy[int(len(healthy) * train_size):]
val_tumor = tumor[int(len(tumor) * train_size):]
# Create train and validation datasets using the new MRIDataset class
train_dataset = MRIDataset(train_healthy, train_tumor)
val_dataset = MRIDataset(val_healthy, val_tumor)
# Define model architecture
class MRIClassifier(nn.Module):
def __init__(self):
super(MRIClassifier, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 16 * 16, 128)
self.fc2 = nn.Linear(128, 2)
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = self.pool(nn.functional.relu(self.conv3(x)))
x = x.view(-1, 64 * 16 * 16)
x = nn.functional.relu(self.fc1(x))
x = self.fc2(x)
return x
# Create model instance and optimizer
model = MRIClassifier()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Define loss function
criterion = nn.CrossEntropyLoss()
# Define data loaders using the new MRIDataset class
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)
# Train the model
num_epochs = 30
for epoch in range(num_epochs):
# Train the model on the training set
model.train()
train_loss = 0
train_acc = 0
for images, labels in train_loader:
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item() * images.size(0)
_, preds = torch.max(outputs, 1)
train_acc += torch.sum(preds == labels.data)
train_loss /= len(train_loader.dataset)
train_acc = train_acc.double() / len(train_loader.dataset)
# Evaluate the model on the validation set
model.eval()
val_loss = 0
val_acc = 0
with torch.no_grad():
for images, labels in val_loader:
outputs = model(images)
loss = criterion(outputs, labels)
val_loss += loss.item() * images.size(0)
_, preds = torch.max(outputs, 1)
val_acc += torch.sum(preds == labels.data)
val_loss /= len(val_loader.dataset)
val_acc = val_acc.double() / len(val_loader.dataset)
print(f"Epoch {epoch+1}/{num_epochs}: Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")
| nikunjt0/Brain-MRI-Tumor-Detector | trainingModel.py | trainingModel.py | py | 5,047 | python | en | code | 0 | github-code | 50 |
10214224472 | import json
import argparse
import contextlib
import sh
import subprocess
import pathlib
from allennlp.commands.train import train_model
from allennlp.common import Params
# from smbop.dataset_readers.spider_basic_pkl import SmbopSpiderDatasetReader
from smbop.dataset_readers.spider_retriever_nn import SmbopSpiderRetrieverDatasetReader
from smbop.dataset_readers.spider_retriever_nn_val import SmbopSpiderRetrieverValDatasetReader
# from smbop.vocabulary.vocab_retriever import RetriverSpiderVocabulary
# from smbop.models.smbop_vanilla import SmbopParser
# from smbop.models.cbr_smbop import CBRSmbopParser
# from smbop.models.smbop_pretrain import SmbopSimPretrained
from smbop.models.retriever_soft_no_leafs import SmbopSimPretrained
# from smbop.models.retriever_soft import SmbopSimPretrained
from smbop.modules.relation_transformer import RelationTransformer
from smbop.modules.lxmert import LxmertCrossAttentionLayer
from smbop.data_loaders.data_loader_retriever import RetriverMultiProcessDataLoader
import namegenerator
import os
import torch
def to_string(value):
if isinstance(value, list):
return [to_string(x) for x in value]
elif isinstance(value, bool):
return "true" if value else "false"
else:
return str(value)
def run():
parser = argparse.ArgumentParser(allow_abbrev=True)
parser.add_argument("--name", nargs="?")
parser.add_argument("--force", action="store_true",
help="""If True, we will overwrite the serialization
directory if it already exists.""")
parser.add_argument("--gpu", type=str, default="0")
parser.add_argument("--recover", action="store_true",
help= """If True, we will try to recover a training run
from an existing serialization directory.
This is only intended for use when something
actually crashed during the middle of a run.
For continuing training a model on new data,
see Model.from_archive.""")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--detect_anomoly", action="store_true") #IDK
parser.add_argument("--profile", action="store_true") #IDK: some sort of debugging funct
parser.add_argument("--is_oracle", action="store_true")
parser.add_argument("--tiny_dataset", action="store_true")
parser.add_argument("--load_less", action="store_true")
parser.add_argument("--cntx_rep", action="store_true")
parser.add_argument("--cntx_beam", action="store_true")
parser.add_argument("--disable_disentangle_cntx", action="store_true")
parser.add_argument("--disable_cntx_reranker", action="store_true")
parser.add_argument("--disable_value_pred", action="store_true")
parser.add_argument("--disable_use_longdb", action="store_true")
parser.add_argument("--uniquify", action="store_true")
parser.add_argument("--use_bce", action="store_true")
parser.add_argument("--tfixup", action="store_true")
parser.add_argument("--train_as_dev", action="store_true")
parser.add_argument("--disable_amp", action="store_true")
parser.add_argument("--disable_utt_aug", action="store_true")
parser.add_argument("--should_rerank", action="store_true")
parser.add_argument("--use_treelstm", action="store_true")
parser.add_argument("--disable_db_content", action="store_true",
help="Run with this argument (once) before pre-proccessing to reduce the pre-proccessing time by half \
This argument causes EncPreproc to not perform IR on the largest tables. ")
parser.add_argument("--lin_after_cntx", action="store_true")
parser.add_argument("--optimizer", type=str, default="adam")
parser.add_argument("--rat_layers", type=int, default=8)
parser.add_argument("--beam_size", default=30, type=int)
parser.add_argument("--base_dim", default=32, type=int)
parser.add_argument("--num_heads", default=8, type=int)
parser.add_argument("--beam_encoder_num_layers", default=1, type=int)
parser.add_argument("--tree_rep_transformer_num_layers", default=1, type=int)
parser.add_argument("--dropout", default=0.1, type=float)
parser.add_argument("--rat_dropout", default=0.2, type=float)
parser.add_argument("--lm_lr", default=3e-6, type=float)
parser.add_argument("--lr", type=float, default=0.000186)
parser.add_argument("--batch_size", default=20, type=int)
parser.add_argument("--grad_acum", default=4, type=int)
parser.add_argument("--max_steps", default=60000, type=int)
parser.add_argument("--power", default=0.5, type=float)
parser.add_argument("--temperature", default=1.0, type=float)
parser.add_argument("--grad_clip", default=-1, type=float)
parser.add_argument("--grad_norm", default=-1, type=float)
default_dict = {k.option_strings[0][2:]: k.default for k in parser._actions}
args = parser.parse_args()
diff = "_".join(
[
f"{key}{value}"
for key, value in vars(args).items()
if (key != "name" and value != default_dict[key])
]
) #vars which differ from default
ext_vars = {}
for key, value in vars(args).items():
if key.startswith("disable"):
new_key = key.replace("disable_", "")
ext_vars[new_key] = to_string(not value)
else:
ext_vars[key] = to_string(value)
print(ext_vars) #just a toggle of disabled variables
default_config_file = "configs/retriever.jsonnet" # NOTE: defaults.jsonnet was original value
overrides_dict = {}
if args.profile:
overrides_dict["trainer"]["num_epochs"] = 1
experiment_name_parts = []
experiment_name_parts.append(namegenerator.gen())
if diff:
experiment_name_parts.append(diff)
if args.name:
experiment_name_parts.append(args.name)
experiment_name = "_".join(experiment_name_parts)
print(f"experiment_name: {experiment_name}")
ext_vars["experiment_name"] = experiment_name
overrides_json = json.dumps(overrides_dict)
settings = Params.from_file(
default_config_file,
ext_vars=ext_vars,
params_overrides=overrides_json,
)
print(settings)
prefix = ""
# prefix = "/home/ohadr/"
prefix = "/mnt/infonas/data/alirehan/smbop/try_train/"
assert not pathlib.Path(f"{prefix}experiments/{experiment_name}").exists()
# sh.ln("-s", f"{prefix}/experiments/{experiment_name}", f"experiments/{experiment_name}")
pathlib.Path(f"backup").mkdir(exist_ok=True)
pathlib.Path(f"cache").mkdir(exist_ok=True)
# pathlib.Path(f"experiments/{experiment_name}").mkdir(exist_ok=True)
# subprocess.check_call(
# f"git ls-files | tar Tzcf - backup/{experiment_name}.tgz", shell=True
# )
if args.profile:
pass
else:
cntx = contextlib.nullcontext()
with cntx:
train_model(
params=settings,
serialization_dir=f"{prefix}experiments/{experiment_name}",
recover=args.recover,
force=True,
)
if __name__ == "__main__":
# os.environ['CUDA_VISIBLE_DEVICE']="2"
# os.environ['NVIDIA_VISIBLE_DEVICE']="2"
print(torch.cuda.get_device_name(0))
run()
| ali6947/NeuralSemanticParser | exec_retriever.py | exec_retriever.py | py | 7,408 | python | en | code | 0 | github-code | 50 |
23388235853 | import requests
from bs4 import BeautifulSoup
from weather.models import Weather
from pprint import pprint
# 네이버 날씨 크롤링
def forecast():
cities = ['서울특별시',
'인천광역시',
'부산광역시',
'대구광역시',
'인천광역시',
'광주광역시',
'대전광역시',
'울산광역시',
'세종특별자치시',
'경기도',
'강원도',
'충청북도',
'충청남도',
'전라북도',
'전라남도',
'경상북도',
'경상남도',
'제주']
for city in cities:
url = f"https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query={city}날씨"
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"}
res = requests.get(url, headers)
res.raise_for_status()
soup = BeautifulSoup(res.text, 'lxml')
week = soup.find("div", attrs={"class":"list_box _weekly_weather"})
week = week.find_all("div", attrs={"class":"day_data"})
for day in week:
day_date = day.find("span", attrs={"class":"date"}).get_text() #날짜
day_temperature_lowest = day.find("span", attrs={"class":"lowest"}).get_text() # 최저기온
day_temperature_lowest = day_temperature_lowest.replace("최저기온", "").replace("°", "")
day_temperature_highest = day.find("span", attrs={"class":"highest"}).get_text() # 최고기온
day_temperature_highest = day_temperature_highest.replace("최고기온", "").replace("°", "")
day_blind = day.find("span", attrs={"class":"blind"}).get_text() # 날씨 상태
day_temperature = int(int(day_temperature_highest) + int(day_temperature_lowest)) / 2 # 평균 기온
weather = Weather()
weather.city = city
weather.day_date = day_date
weather.day_temperature_lowest = day_temperature_lowest
weather.day_temperature_highest = day_temperature_highest
weather.day_temperature = day_temperature
weather.day_blind = day_blind
weather.save()
| devjunseok/off_the_outfit_backend | weather/crawling.py | crawling.py | py | 2,358 | python | en | code | 1 | github-code | 50 |
4644130279 | import json
import faiss
import numpy as np
from sklearn import preprocessing
from config.constant import EMBEDDING_DIMENSION
def load_db(db_path, use_gpu = False):
with open(db_path, 'r') as f:
db = json.load(f)
first_time = True
list_feature = []
list_id = []
list_len = []
for k,v in db.items():
list_id.append(k)
list_len.append(len(v))
if first_time:
d = np.array(v[0]).shape[1]
index = faiss.IndexFlatIP(d)
if use_gpu:
device = faiss.StandardGpuResources() # use a single GPU
index = faiss.index_cpu_to_gpu(device, 0, index)
first_time = False
for feature in v:
list_feature.append(np.array(feature).astype('float32').reshape(1,EMBEDDING_DIMENSION))
list_feature = np.concatenate(list_feature , axis=0)
list_feature_new = preprocessing.normalize(list_feature, norm='l2')
index.add(list_feature_new)
return list_len, list_id, index | BarryZM/Dialog_generate_tool | FaceRecognition/utils/load_faiss.py | load_faiss.py | py | 1,026 | python | en | code | 0 | github-code | 50 |
32637069210 |
import csv
import math
from django.core.management.base import BaseCommand
from django.core.exceptions import ValidationError
from hs_core.hydroshare import convert_file_size_to_unit
from theme.models import UserQuota
from hs_core.hydroshare.resource import get_quota_usage_from_irods
class Command(BaseCommand):
help = "Output potential quota inconsistencies between iRODS and Django for all users in HydroShare"
def add_arguments(self, parser):
parser.add_argument('output_file_name_with_path', help='output file name with path')
def handle(self, *args, **options):
quota_report_list = []
for uq in UserQuota.objects.filter(
user__is_active=True).filter(user__is_superuser=False):
used_value = 0.0
try:
used_value = get_quota_usage_from_irods(uq.user.username)
except ValidationError:
pass
used_value = convert_file_size_to_unit(used_value, "gb")
if not math.isclose(used_value, uq.used_value, abs_tol=0.1):
# report inconsistency
report_dict = {
'user': uq.user.username,
'django': uq.used_value,
'irods': used_value}
quota_report_list.append(report_dict)
print('quota incosistency: {} reported in django vs {} reported in iRODS for user {}'.format(
uq.used_value, used_value, uq.user.username), flush=True)
if quota_report_list:
with open(options['output_file_name_with_path'], 'w') as csvfile:
w = csv.writer(csvfile)
fields = [
'User'
'Quota reported in Django',
'Quota reported in iRODS'
]
w.writerow(fields)
for q in quota_report_list:
values = [
q['user'],
q['django'],
q['irods']
]
w.writerow([str(v) for v in values])
| hydroshare/hydroshare | theme/management/commands/report_quota_inconsistency.py | report_quota_inconsistency.py | py | 2,117 | python | en | code | 171 | github-code | 50 |
8122948448 | import os
from os.path import join
from datetime import timedelta
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from keycloak_oidc.default_settings import *
import urllib.parse
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = os.environ.get('DJANGO_DEBUG') == 'True'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party apps
'rest_framework', # utilities for rest apis
'rest_framework.authtoken', # TODO: remove once all user management is done using Grip
'django_filters', # for filtering rest endpoints
'drf_yasg', # for generating real Swagger/OpenAPI 2.0 specifications
'constance',
'constance.backends.database', # for dynamic configurations in admin
'mozilla_django_oidc', # for authentication
'webpack_loader',
'multiselectfield',
'keycloak_oidc',
'web.core',
'web.documents',
'web.timeline',
'web.users',
'web.organizations',
'web.roles',
'web.cases',
'web.profiles',
'web.forms',
'web.feedback',
)
SOURCE_COMMIT = os.environ.get('COMMIT_HASH')
BRANCH_NAME = os.environ.get('BRANCH_NAME')
# https://docs.djangoproject.com/en/2.0/topics/http/middleware/
MIDDLEWARE = (
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'mozilla_django_oidc.middleware.SessionRefresh',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = os.environ.get('DJANGO_ROOT_URLCONF', 'web.urls')
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'local')
WSGI_APPLICATION = 'wsgi.application'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
SENDGRID_KEY = os.environ.get("SENDGRID_KEY")
ADMINS = (
('admin', 'maurice@mgui.nl'),
)
AUTH_USER_MODEL = 'users.User'
# Database
DEFAULT_DATABASE_NAME = 'default'
if os.environ.get('DATABASE_NAME'):
DATABASES = {
DEFAULT_DATABASE_NAME: {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DATABASE_NAME'),
'USER': os.environ.get('DATABASE_USER'),
'PASSWORD': os.environ.get('DATABASE_PASSWORD'),
'HOST': os.environ.get('DATABASE_HOST', 'database'),
'PORT': '5432',
}
}
else:
DATABASES = {
DEFAULT_DATABASE_NAME: {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# General
APPEND_SLASH = True
TIME_ZONE = 'Europe/Amsterdam'
LANGUAGE_CODE = 'nl-nl'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = os.environ.get('DJANGO_STATIC_URL', '/static/')
STATIC_ROOT = os.path.normpath(join(os.path.dirname(BASE_DIR), 'static'))
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.normpath(join(os.path.dirname(BASE_DIR), 'media'))
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'web.core.context_processors.app_settings',
],
# 'loaders': [
# ('django.template.loaders.cached.Loader', [
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# ]),
# ],
},
},
]
# Password Validation
# https://docs.djangoproject.com/en/2.0/topics/auth/passwords/#module-django.contrib.auth.password_validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Django Rest Framework
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': int(os.getenv('DJANGO_PAGINATION_LIMIT', 10)),
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S%z',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication'
)
}
# Mail
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# CORS and allowed hosts
ALLOWED_HOSTS = os.environ.get('DJANGO_ALLOWED_HOSTS', '*').split(',')
CORS_ORIGIN_WHITELIST = os.environ.get('CORS_ORIGIN_WHITELIST', '').split(',')
CORS_ORIGIN_ALLOW_ALL = False
AUTH_GROUPNAME_PROCESS = 'proces'
LOGIN_URL = '/#login'
SWAGGER_SETTINGS = {
'LOGIN_URL': '/admin/login/',
'LOGOUT_URL': '/admin/logout/'
}
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_ALLOW_DATA_ACCESS_KEY = 'ALLOW_DATA_ACCESS'
CONSTANCE_BRK_AUTHENTICATION_TOKEN_KEY = 'BRK_AUTHENTICATION_TOKEN'
CONSTANCE_BRK_AUTHENTICATION_TOKEN_EXPIRY_KEY = 'BRK_AUTHENTICATION_TOKEN_EXPIRY'
CONSTANCE_HOMEPAGE_INTRO_KEY = 'HOMEPAGE_INTRO'
CONSTANCE_FEEDBACK_RECIPIENT_LIST_KEY = 'FEEDBACK_RECIPIENT_LIST'
CONSTANCE_CONFIG = {
CONSTANCE_ALLOW_DATA_ACCESS_KEY: (True, 'Allow data to be accesible through the API'),
CONSTANCE_BRK_AUTHENTICATION_TOKEN_KEY: ('', 'Authentication token for accessing BRK API'),
CONSTANCE_BRK_AUTHENTICATION_TOKEN_EXPIRY_KEY: ('', 'Expiry date for BRK API token'),
CONSTANCE_HOMEPAGE_INTRO_KEY: ('', 'Homepage introduction html'),
CONSTANCE_FEEDBACK_RECIPIENT_LIST_KEY: ('', 'Feedback ontvangers lijst(kommagescheiden)'),
}
# AUTHENTICATION_BACKENDS = [
# 'keycloak_oidc.auth.OIDCAuthenticationBackend',
# ]
OIDC_RP_CLIENT_ID = os.environ.get('IAM_CLIENT_ID')
OIDC_RP_CLIENT_SECRET = os.environ.get('IAM_CLIENT_SECRET')
# OIDC_OP_LOGOUT_URL_METHOD = 'api.users.utils.oidc_op_logout'
# OIDC_USERNAME_ALGO = 'api.users.utils.generate_username'
# OIDC_RP_SIGN_ALGO = 'RS256'
# OIDC_RP_SCOPES = 'openid'
# OIDC_USE_NONCE = False
if os.environ.get("IAM_URL"):
IAM_URL = os.environ.get('IAM_URL', 'https://iam.amsterdam.nl/auth/realms/datapunt-acc/protocol/openid-connect/')
OIDC_OP_AUTHORIZATION_ENDPOINT = urllib.parse.urljoin(IAM_URL, 'auth')
OIDC_OP_TOKEN_ENDPOINT = urllib.parse.urljoin(IAM_URL, 'token')
OIDC_OP_USER_ENDPOINT = urllib.parse.urljoin(IAM_URL, 'userinfo')
OIDC_OP_JWKS_ENDPOINT = urllib.parse.urljoin(IAM_URL, 'certs')
OIDC_OP_LOGOUT_ENDPOINT = urllib.parse.urljoin(IAM_URL, 'logout')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG'
},
},
'loggers': {
'mozilla_django_oidc': {
'handlers': ['console'],
'level': 'DEBUG'
},
}
}
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/', # must end with slash
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': [r'.+\.hot-update.js', r'.+\.map'],
'LOADER_CLASS': 'webpack_loader.loader.WebpackLoader',
}
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(hours=4),
# We don't refresh tokens yet, so we set refresh lifetime to zero
'REFRESH_TOKEN_LIFETIME': timedelta(seconds=0),
}
ACCESS_LOG_EXEMPTIONS = (
'/state/health',
)
# BRK Access request settings
BRK_ACCESS_CLIENT_ID = os.getenv('BRK_ACCESS_CLIENT_ID')
BRK_ACCESS_CLIENT_SECRET = os.getenv('BRK_ACCESS_CLIENT_SECRET')
BRK_ACCESS_URL = os.getenv('BRK_ACCESS_URL')
BRK_API_OBJECT_EXPAND_URL = 'https://acc.api.data.amsterdam.nl/brk/object-expand/'
BAG_API_SEARCH_URL = 'https://api.data.amsterdam.nl/atlas/search/adres/'
# swift storage
if os.environ.get("SWIFT_AUTH_URL"):
SWIFT_BASE_URL = 'https://%s.%s' % (os.environ.get("SWIFT_PROJECT_ID"), os.environ.get("SWIFT_EXTERNAL_DOMAIN"))
SWIFT_AUTH_URL = os.environ.get("SWIFT_AUTH_URL")
SWIFT_USERNAME = os.environ.get("SWIFT_USER")
SWIFT_PASSWORD = os.environ.get("SWIFT_PASSWORD")
SWIFT_TENANT_ID = os.environ.get("SWIFT_TENANT")
SWIFT_TEMP_URL_KEY = os.environ.get("SWIFT_TEMP_URL_KEY")
SWIFT_TEMP_URL_DURATION = os.environ.get("SWIFT_TEMP_URL_DURATION", 30)
SWIFT_USE_TEMP_URLS = os.environ.get("SWIFT_USE_TEMP_URLS", 'True') == 'True'
SWIFT_CONTAINER_NAME = os.environ.get("SWIFT_CONTAINER_NAME", 'media')
DEFAULT_FILE_STORAGE = 'web.core.storage.SwiftStorage'
THUMBNAIL_DEFAULT_STORAGE = 'web.core.storage.SwiftStorage'
CORS_ORIGIN_WHITELIST = [SWIFT_BASE_URL, ]
| tickHub/omslagroute | app/settings/settings.py | settings.py | py | 10,140 | python | en | code | 0 | github-code | 50 |
21351830448 | '''
VectorTracker v1.0 by Jorrit Schulte
Add this to menu.py:
nuke.load('VectorTracker.py')
nuke.menu("Nodes").addCommand('user/VectorTracker', "nuke.createNode('VectorTracker.gizmo')")
'''
def allScriptNodes():
#collect all nodes in the root node graph
nodes = nuke.allNodes()
groups = [node for node in nodes if node.Class() == 'Group' ]
#first collect groups inside groups
for group in groups:
for i in group.nodes():
if i.Class() == 'Group':
groups.append(i)
#then append nodes inside groups to the nodes list
for group in groups:
for node in group.nodes():
nodes.append(node)
#return all nodes on all levels
return nodes
def J_VTT_Track(first, last, pb = True):
node = nuke.thisNode()
vectors = node.input(1)
#bool to check track direction
forward = last > first
#get number of frames
totalFrames = int(max(first, last) - min(first, last))
#check vector type
if vectors is not None:
layers = list( set([channel.split('.')[0] for channel in vectors.channels()]) )
else:
layers = []
if 'smartvector' in layers:
if forward:
u = 'smartvector.fn1vp0_u'
v = 'smartvector.fn1vp0_v'
else:
u = 'smartvector.fp1vp0_u'
v = 'smartvector.fp1vp0_v'
run = True
elif 'forward' in layers and forward:
u = 'forward.u'
v = 'forward.v'
run = True
elif 'backward' in layers and not forward:
u = 'backward.u'
v = 'backward.v'
run = True
else:
run = False
#check if there are vectors
if run:
#collect existing trackers
trackers = [i.name() for i in node.allKnobs() if i.Class() == 'XY_Knob' and 'tracker' in i.name()]
#remove inactive trackers
trackers = [i for i in trackers if node['enable_' + i].value()]
#lists where tracker position data get stored
xpos = []
ypos = []
xsize = []
ysize = []
for tracker in trackers:
#Append initial positions
trackerVal = node[tracker].valueAt(first)
xpos.append(trackerVal[0])
ypos.append(trackerVal[1])
#append sample area sizes
xsize.append(node['sampleArea_'+tracker].value(0))
ysize.append(node['sampleArea_'+tracker].value(1))
#make tracker knobs keyable
node[tracker].setAnimated()
#set frame list
if forward:
rangeList = range(first, last+1)
else:
rangeList = reversed(range(last, first+1))
#set up pogress window
if pb:
task = nuke.ProgressTask("VectorTracker")
count = 0
#cycle through frames
for frame in rangeList:
#stop tracking if process is cancelled
if pb:
if task.isCancelled():
break
#update process window
if frame != last and pb:
count += 1
task.setMessage("sampling frame " + str(frame) + ' (frame ' + str(count) +' of ' + str(totalFrames) + ')')
task.setProgress(count*100/totalFrames)
#execute for each tracker
for i, tracker in enumerate(trackers):
#get current position from list
curx = xpos[i]
cury = ypos[i]
#set keyframe
node[tracker].animations()[0].setKey(frame, curx)
node[tracker].animations()[1].setKey(frame, cury)
if frame != last:
#sample vectors
x = vectors.sample(u, curx+.5, cury+.5, xsize[i],ysize[i], frame)
y = vectors.sample(v, curx+.5, cury+.5, xsize[i],ysize[i], frame)
#set new position
xpos[i] = curx + x
ypos[i] = cury + y
#jump to frame
nuke.frame(frame)
if pb:
del task
#message when there is no vector data
else:
nuke.message('No vectors found!')
def J_VTT_AddTracker():
node = nuke.thisNode()
trackerCount = int(node['count'].value() + 1)
node['count'].setValue(trackerCount)
name = 'tracker' + str(trackerCount)
#enable knob
enableKnob = nuke.nuke.Boolean_Knob('enable_' + name, '')
enableKnob.setValue(True)
enableKnob.setFlag(nuke.STARTLINE)
enableKnob.setTooltip('Enable this tracker for tracking')
node.addKnob( enableKnob )
#position knob
posKnob = nuke.nuke.XY_Knob(name, name)
posKnob.clearFlag(nuke.STARTLINE)
node.addKnob( posKnob )
#area size knob
areaKnob = nuke.nuke.WH_Knob('sampleArea_' + name, 'area')
areaKnob.setValue(1)
areaKnob.setRange(1,50)
areaKnob.clearFlag(nuke.STARTLINE)
areaKnob.clearFlag(0x00000004) #clear LOG_SLIDER flag
areaKnob.setFlag(0x00000010) #set FORCE_RANGE flag
areaKnob.setTooltip('Size of the area that will get sampled in the vector channels')
node.addKnob( areaKnob )
#remove knob
removeKnob = nuke.PyScript_Knob('remove_' + name, '@ColorMult', "node = nuke.thisNode()\nname = '" + name + "'\n\nknobs = ['', 'sampleArea_', 'enable_', 'remove_' ]\n\nfor i in knobs:\n try:\n node.removeKnob(node.knobs()[i + name])\n except:\n pass")
removeKnob.setTooltip('Remove this tracker')
node.addKnob( removeKnob )
def J_VTT_Export():
node = nuke.thisNode()
#deselect nodes
an = allScriptNodes()
selnodes = [i for i in an if i['selected'].value()]
for snode in selnodes:
snode['selected'].setValue(False)
#select VectorTracker node
node['selected'].setValue(True)
#find group name if node is in group
fnn = node.fullName().split('.')
groupName = 'root.'
for i in range(len(fnn)-1):
groupName += fnn[i] + '.'
#get parent
if groupName != 'root.':
parent = nuke.toNode(groupName[:-1])
else:
parent = nuke.root()
#create node in parent root
with parent:
#create tracker node
trackerNode = nuke.nodes.Tracker4()
trackerNode['xpos'].setValue(node['xpos'].value()+100)
trackerNode['ypos'].setValue(node['ypos'].value()+50)
trackerNode.showControlPanel()
#define tracks container knob
container = trackerNode['tracks']
#collect existing trackers
trackers = [i.name() for i in node.allKnobs() if i.Class() == 'XY_Knob' and 'tracker' in i.name()]
#copy keyframes for each tracker
for i, tracker in enumerate(trackers):
#add tracker to container
trackerNode['add_track'].execute()
#remove auto made keys
for inx in [0,2,3,4,5,9]:
container.removeKey(i*31 + inx)
container.setValue(False, i*31 + 6)
#define some stuff
trackerKnob = node[tracker].value()
tracker = node[tracker]
#set keys for x and y curves
for curve in [0,1]:
animcheck = tracker.animation(0+curve) is not None
#set value if there is no animation
if not animcheck:
container.setValue(trackerKnob[0+curve], i*31 + 2+curve)
#if there are keyframes on original tracker knob
if animcheck:
#set x y knobs animated
container.setAnimated(i*31 + 2+curve)
#find frame numbers of first and last keyframe
keys = tracker.animation(0+curve).keys()
first = int(keys[0].x)
last = int(keys[len(keys)-1].x)
#set value at each frame
framerange = range(first, last+1)
for frame in framerange:
#check if current frame is tracked before setting key
if tracker.isKeyAt(frame):
pos = tracker.getValueAt(frame)
#set key
container.setValueAt(pos[0+curve],frame,i*31+2+curve)
#remove key at frame 0 if not used
if 0 not in framerange:
container.removeKeyAt(0,i*31 + 2+curve)
#reselect nodes
trackerNode['selected'].setValue(False)
for snode in selnodes:
snode['selected'].setValue(True)
#deselect VectorTracker node
if node not in selnodes:
node['selected'].setValue(False)
| CreativeLyons/NukeSurvivalToolkit_publicRelease | NukeSurvivalToolkit/python/NST_VectorTracker.py | NST_VectorTracker.py | py | 8,679 | python | en | code | 181 | github-code | 50 |
14620274998 | from youtrack_reporter.app.settings import load_app_settings
from youtrack_reporter.app.message_queue.state import MQAppState
from youtrack_reporter.app.database.instance import db_init
from youtrack_reporter.app.youtrack import YouTrackAsyncAPI
from youtrack_reporter.app.youtrack import YouTrackAsyncAPI
from youtrack_reporter.app.database.instance import db_init
from youtrack_reporter.app.message_queue.state import MQAppState
#Remove, only for testing
from youtrack_reporter.app.database.orm import ORMConfig
from youtrack_reporter.app.database.errors import DatabaseError
import logging
import asyncio
import logging
import coloredlogs
import configparser
async def main():
fmt = "%(asctime)s %(levelname)-8s %(name)-15s %(message)s"
logging.basicConfig(format=fmt, level=logging.DEBUG)
logger = logging.getLogger("main")
coloredlogs.install(level='DEBUG', logger=logger)
settings = load_app_settings()
state: MQAppState = MQAppState()
state.settings = settings
logger.info("Configuring database...")
state.db = await db_init(settings)
logger.info("Configuring database... OK")
state.youtrack_api = YouTrackAsyncAPI()
data = configparser.ConfigParser()
data.read('local/example.ini')
keys = [i for i in data['CONFIG']]
vals = [data['CONFIG'][i] for i in data['CONFIG']]
conf = ORMConfig(**(dict(zip(keys, vals))))
try:
conf = await state.db.configs.insert(conf)
except DatabaseError as e:
logger.warning(f"This config already exists!")
try:
conf = await state.db.configs.get("0")
except DatabaseError as e:
logger.warning(f"Config with id 0 not found")
try:
prev, new = await state.db.configs.update(conf)
logger.debug(f"prev = {prev.dict()}")
logger.debug(f"new = {new.dict()}")
except DatabaseError as e:
logger.warning(f"update failed!")
try:
conf = await state.db.configs.get("0")
logger.debug(f"conf = {conf.dict()}")
except DatabaseError as e:
logger.warning(f"This config already exists!")
try:
await state.db.issues.insert("1", "0-0")
except DatabaseError as e:
logger.warning(f"This issue already exists!")
logger.info("Closing database...")
await state.db.close()
logger.info("Closing database... OK")
logger.info("Closing aiohttp client...")
await state.youtrack_api.__aexit__()
logger.info("Closing aiohttp client... OK")
asyncio.run(main()) | Bondifuzz/youtrack-reporter | local/tests/db_entities.py | db_entities.py | py | 2,533 | python | en | code | 0 | github-code | 50 |
39455613842 | import mysql.connector
#try:
connection = mysql.connector.connect(host='localhost',
database='sys',
user='root',
password='1234')
q = '''create table itemmast
(
ITNO decimal(4),
NAME varchar(20),
QOH decimal(5),
CLASS char(1),
UOM char(4),
ROL decimal(5),
ROQ decimal(5),
RATE decimal(8,2)
);
'''
r = '''
create table ittran(
itno decimal(4) references itemmast(itno),
trantype char(1) check (trantype in ('I','R')),
trandate date,
qty decimal(5)
);
'''
if connection.is_connected():
cursor = connection.cursor()
cursor.execute(r)
print(*cursor.fetchall(),sep = '\n')
##except:
# print("Error") | cdaman123/BTech_6_Lab | dbms/ass1.py | ass1.py | py | 883 | python | en | code | 3 | github-code | 50 |
27211207283 | import torch.nn as nn
class resnet50_Decoder(nn.Module):
"""
CenterNet_neck
"""
def __init__(self, inplanes, bn_momentum=0.1):
super(resnet50_Decoder, self).__init__()
self.bn_momentum = bn_momentum
self.inplanes = inplanes
self.deconv_with_bias = False
# 16,16,2048 -> 32,32,256 -> 64,64,128 -> 128,128,64
# 利用ConvTranspose2d进行上采样。
# 每次特征层的宽高变为原来的两倍。
self.deconv_layers = self._make_deconv_layer(
num_layers=3,
num_filters=[256, 128, 64],
num_kernels=[4, 4, 4],
)
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
layers = []
for i in range(num_layers):
kernel = num_kernels[i]
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=1,
output_padding=0,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=self.bn_momentum))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
return self.deconv_layers(x)
| zranguai/CenterNet-pytorch | models/neck.py | neck.py | py | 1,449 | python | en | code | 0 | github-code | 50 |
1952701848 | #
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess, pickle
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util
import pandas as pd
from scipy.stats import binom
# Default params
# inp_dir = _config.OUT_PLACE + 'ag5_combin_be_adjust/'
inp_dir = _config.OUT_PLACE + 'ag5a3_adjust_inprofile_batch_effects/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
import _data
nts = list('ACGT')
nt_to_idx = {nts[s]: s for s in range(len(nts))}
treat_control_df = pd.read_csv(_config.DATA_DIR + 'treatment_control_design.csv', index_col = 0)
exp_design = pd.read_csv(_config.DATA_DIR + 'master_exp_design.csv', index_col = 0)
exp_nms = exp_design['Name']
editors = exp_design['Editor']
exp_nm_to_editor = {exp_nm: editor for exp_nm, editor in zip(exp_nms, editors)}
profile_design = pd.read_csv(_config.DATA_DIR + 'editor_profiles.csv')
def get_editor_mutations(editor_nm):
row = profile_design[profile_design['Editor name'] == editor_nm].iloc[0]
muts_dict = defaultdict(list)
mut_cols = [col for col in profile_design if col != 'Editor name']
for col in mut_cols:
if type(row[col]) != str:
continue
[ref_nt, obs_nt] = col.replace(' to ', ' ').split()
[start_pos, end_pos] = row[col].replace('(', '').replace(')', '').replace(',', '').split()
start_pos, end_pos = int(start_pos), int(end_pos)
for pos in range(start_pos, end_pos + 1):
mut_col_nm = '%s%s' % (ref_nt, pos)
muts_dict[mut_col_nm].append(obs_nt)
return muts_dict
def profile_subset(exp_nm, start_idx, end_idx):
editor_nm = exp_nm_to_editor[exp_nm]
muts_dict = get_editor_mutations(editor_nm)
editor_mut_set = set(muts_dict.keys())
'''
Identify batch effects from position-wise analysis and remove them from combinatorial df
'''
lib_design, seq_col = _data.get_lib_design(exp_nm)
lib_design = lib_design.iloc[start_idx : end_idx + 1]
nms = lib_design['Name (unique)']
data = pickle.load(open(inp_dir + '%s.pkl' % (exp_nm), 'rb'))
new_data = dict()
nms_shared = [nm for nm in nms if nm in data]
timer = util.Timer(total = len(nms_shared))
for target_nm in nms_shared:
d = data[target_nm]
d = d[d['Count'] > 0]
if 'index' in d.columns:
d = d[[col for col in d.columns if col != 'index']]
# Subset to mutation columns
_mut_cols = [col for col in d.columns if col in editor_mut_set]
d = d[_mut_cols + ['Count']]
for idx, row in d.iterrows():
for col in _mut_cols:
ref_nt = col[0]
obs_nt = row[col]
if obs_nt != ref_nt and obs_nt not in muts_dict[col]:
row[col] = '.'
# Eliminate rows that only contain .
crit = d.apply(
lambda row: sum([row[c] == '.' for c in row.index]) != len(row.index) - 1,
axis = 'columns'
)
d = d[crit]
d = d.reset_index(drop = True)
new_data[target_nm] = d
timer.update()
with open(out_dir + '%s_%s_%s.pkl' % (exp_nm, start_idx, end_idx), 'wb') as f:
pickle.dump(new_data, f)
return
##
# qsub
##
def gen_qsubs():
# Generate qsub shell scripts and commands for easy parallelization
print('Generating qsub scripts...')
qsubs_dir = _config.QSUBS_DIR + NAME + '/'
util.ensure_dir_exists(qsubs_dir)
qsub_commands = []
# Generate qsubs only for unfinished jobs
num_scripts = 0
for idx, row in treat_control_df.iterrows():
exp_nm = row['Treatment']
lib_nm = _data.get_lib_nm(exp_nm)
if 'Cas9' in exp_nm:
continue
if lib_nm == 'LibA':
num_targets = 2000
num_targets_per_split = 200
elif lib_nm == 'CtoGA':
num_targets = 4000
num_targets_per_split = 500
else:
num_targets = 12000
num_targets_per_split = 2000
try:
mb_file_size = os.path.getsize(inp_dir + '%s.pkl' % (exp_nm)) / 1e6
except FileNotFoundError:
mb_file_size = 0
ram_gb = 2
if mb_file_size > 200:
ram_gb = 4
if mb_file_size > 400:
ram_gb = 8
if mb_file_size > 1000:
ram_gb = 16
for start_idx in range(0, num_targets, num_targets_per_split):
end_idx = start_idx + num_targets_per_split - 1
out_pkl_fn = out_dir + '%s_%s_%s.pkl' % (exp_nm, start_idx, end_idx)
if os.path.exists(out_pkl_fn):
if os.path.getsize(out_pkl_fn) > 0:
continue
command = 'python %s.py %s %s %s' % (NAME, exp_nm, start_idx, end_idx)
script_id = NAME.split('_')[0]
# Write shell scripts
sh_fn = qsubs_dir + 'q_%s_%s_%s.sh' % (script_id, exp_nm, start_idx)
with open(sh_fn, 'w') as f:
f.write('#!/bin/bash\n%s\n' % (command))
num_scripts += 1
# Write qsub commands
qsub_commands.append('qsub -V -P regevlab -l h_rt=16:00:00,h_vmem=%sG -wd %s %s &' % (ram_gb, _config.SRC_DIR, sh_fn))
# Save commands
commands_fn = qsubs_dir + '_commands.sh'
with open(commands_fn, 'w') as f:
f.write('\n'.join(qsub_commands))
subprocess.check_output('chmod +x %s' % (commands_fn), shell = True)
print('Wrote %s shell scripts to %s' % (num_scripts, qsubs_dir))
return
##
# Main
##
@util.time_dec
def main(exp_nm = '', start_idx = '', end_idx = ''):
print(NAME)
# Function calls
profile_subset(exp_nm, int(start_idx), int(end_idx))
return
if __name__ == '__main__':
if len(sys.argv) > 1:
main(exp_nm = sys.argv[1], start_idx = sys.argv[2], end_idx = sys.argv[3])
else:
gen_qsubs()
| maxwshen/lib-analysis | ag5a4_profile_subset.py | ag5a4_profile_subset.py | py | 5,731 | python | en | code | 2 | github-code | 50 |
22477861761 | import pygame
from . import settings
from .vector import Vector
import math
class Ray:
def __init__(self, x, y):
self.position = Vector(x, y)
self.direction = Vector(1, 0)
def look_at(self, x, y):
self.direction.x = x - self.position.x
self.direction.y = y - self.position.y
self.direction.normalize()
def set_angle(self, angle):
self.direction.x = math.cos(angle)
self.direction.y = math.sin(angle)
def cast(self, boundary):
x1 = boundary.a.x
y1 = boundary.a.y
x2 = boundary.b.x
y2 = boundary.b.y
x3 = self.position.x
y3 = self.position.y
x4 = self.position.x + self.direction.x
y4 = self.position.y + self.direction.y
denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
if denom == 0:
return None
t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / denom
u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / denom
if t > 0 and t < 1 and u > 0:
point = Vector(x1 + t * (x2 - x1), y1 + t * (y2 - y1))
return point
else:
return None
def draw(self, surface, point=None):
if point:
pygame.draw.line(
surface,
settings.RAY_COLOR,
(self.position.x, self.position.y),
(point.x, point.y),
)
else:
screen_vec = Vector(settings.DISPLAY[0], settings.DISPLAY[1])
pygame.draw.line(
surface,
settings.RAY_COLOR,
(self.position.x, self.position.y),
(
self.position.x + self.direction.x * len(screen_vec),
self.position.y + self.direction.y * len(screen_vec),
),
)
| SnkSynthesis/pyraycaster | pyraycaster/ray.py | ray.py | py | 1,862 | python | en | code | 1 | github-code | 50 |
22064790743 | from flask import Blueprint, render_template, request, flash, redirect, url_for
from flask import Flask
from flask_mail import Mail, Message
email = Blueprint('email', __name__)
@email.route('/email', methods=['POST'])
def send_email():
to_you = request.form['recipient']
from_me = request.form['sender']
the_message = request.form['message']
msg = Message(the_message, sender=from_me, recipients=to_you)
msg.body = the_message
msg.html = the_message
Mail.send(msg)
return 'Email sent!' | TitusCharlie/updated-ticket | website/email.py | email.py | py | 530 | python | en | code | 0 | github-code | 50 |
13829213288 | from django.http import JsonResponse
from django.shortcuts import render, reverse, redirect
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.models import User
from .models import (
PortfolioUser, UserSkill, PortfolioUserSocialMediaLink, Review, NewClient, PortfolioUserAddress, ClientProject
)
from .forms import ContactUs
from portfolio_cum_blog import settings
class GlobalResponse:
"""
Returns global response object
"""
def __init__(self):
self.response_data = {
'response': '',
'responseMessage': '',
'responseMessageInfo': ''
}
def get_response_obj(self):
return self.response_data
class GlobalUser:
"""
Returns global user object
"""
def __init__(self, user_model):
self.pk = settings.DEFAULT_USER
self.user_model = user_model
self.user_obj = None
def get_user_obj(self):
self.user_obj = self.user_model.objects.get(pk=self.pk)
return self.user_obj
def get_global_response(request):
"""
Returns global response object
"""
global_response_obj = GlobalResponse()
response_data = global_response_obj.get_response_obj()
return response_data
def get_global_user(request):
"""
Returns global user object
"""
global_user_obj = GlobalUser(User)
obj_user = global_user_obj.get_user_obj()
return obj_user
def get_user_skills(user_id):
"""
Returns all the skills mapped to a user
"""
obj_user_skills = UserSkill.objects.filter(user__id=user_id)
skills = {}
for each_item in obj_user_skills:
rating = int(each_item.get_rating_out_of_five_display())
if rating == 5:
progress_bar_color = 1
elif rating == 4:
progress_bar_color = 7
elif rating == 3:
progress_bar_color = 4
elif rating == 2:
progress_bar_color = 3
else:
progress_bar_color = 2
if each_item.skill_category.get_category_name_display() in skills:
skills[each_item.skill_category.get_category_name_display()].append(
{
each_item.skill.category_item_name: {
'id': each_item.skill.id,
'summary': each_item.summary,
'description': each_item.description,
'logo': each_item.skill.category_item_image.url,
'total_experience_in_year': each_item.get_total_experience_in_year_display(),
'rating_out_of_five': rating * 20,
'progress_bar_color': progress_bar_color
}
}
)
else:
skills[each_item.skill_category.get_category_name_display()] = [
{
each_item.skill.category_item_name: {
'id': each_item.skill.id,
'summary': each_item.summary,
'description': each_item.description,
'logo': each_item.skill.category_item_image.url,
'total_experience_in_year': each_item.get_total_experience_in_year_display(),
'rating_out_of_five': rating * 20,
'progress_bar_color': progress_bar_color
}
}
]
return skills
def get_customer_reviews():
obj_customer_reviews = Review.objects.all()
customer_reviews = []
for each_review in obj_customer_reviews:
customer_review = {
'reviewer': each_review.reviewer_name,
'rating': [i for i in range(int(each_review.get_reviewer_rating_display()))],
'description': each_review.review_description
}
customer_reviews.append(customer_review)
return customer_reviews
def get_social_media_links(portfolio_user_id):
try:
obj_social_media_links = PortfolioUserSocialMediaLink.objects.get(user_id=portfolio_user_id)
except Exception as err:
obj_social_media_links = None
return obj_social_media_links
def embed_social_media_links_to_context(context, obj_social_media_links):
if obj_social_media_links is not None:
context['github'] = obj_social_media_links.github
context['linkedin'] = obj_social_media_links.linkedin
context['tweeter'] = obj_social_media_links.tweeter
context['instagram'] = obj_social_media_links.instagram
return context
def index(request):
template = 'portfolio/index.html'
try:
if request.user.is_authenticated:
obj_user = User.objects.get(pk=request.user.id)
obj_portfolio_user = PortfolioUser.objects.get(pk=obj_user.user_portfolio.id)
else:
obj_user = get_global_user(request)
obj_portfolio_user = PortfolioUser.objects.get(pk=obj_user.user_portfolio.id)
customer_reviews = get_customer_reviews()
skills = get_user_skills(obj_user.id)
heading = obj_portfolio_user.heading
headline = obj_portfolio_user.headline
about = obj_portfolio_user.about
profile_photo = obj_portfolio_user.profile_photo.url
obj_social_media_links = get_social_media_links(obj_portfolio_user.id)
context = {
'page_title': 'Home',
'heading': heading,
'headline': headline,
'profile_photo': profile_photo,
'about': about,
'skills': skills,
'customer_reviews': customer_reviews
}
context = embed_social_media_links_to_context(context, obj_social_media_links)
except Exception as err:
context = {
'page_title': 'Home',
'exception': err.__str__()
}
return render(request, template, context)
def user_signup(request):
"""
Registers the user with the system
"""
response_data = get_global_response(request)
try:
if request.method == 'POST':
first_name = request.POST['firstName']
last_name = request.POST['lastName']
user_email = request.POST['userMail']
user_mobile = request.POST['userMobile']
user_password = request.POST['password']
if first_name and last_name and user_email and user_mobile and user_password:
user = User.objects.create_user(
username=user_email,
email=user_email,
password=user_password
)
user.first_name = first_name
user.last_name = last_name
user.save()
obj_portfolio_user = PortfolioUser(
user=user,
mobile=user_mobile,
)
obj_portfolio_user.save()
user = authenticate(username=user_email, password=user_password)
if user is not None:
login(request, user)
response_data['response'] = 'success'
response_data['responseMessage'] = 'You have been registered successfully'
response_data['responseMessageInfo'] = 'Redirecting to the dashboard'
else:
response_data['response'] = 'error'
response_data['responseMessage'] = 'Registration Failed'
response_data['responseMessageInfo'] = f'Invalid HTTP method {request.method}'
else:
response_data['response'] = 'warning'
response_data['responseMessage'] = 'Registration Failed'
response_data['responseMessageInfo'] = 'Invalid input/please check if each and every mandatory details' \
'were provided or not!'
except Exception as err:
response_data['response'] = 'error'
response_data['responseMessage'] = 'Registration Failed'
response_data['responseMessageInfo'] = str(err)
return JsonResponse(response_data)
def user_login(request):
"""
Lets the user login to the application
"""
response_data = get_global_response(request)
try:
if request.method == 'POST':
user_email = request.POST['userMailSignIn']
user_password = request.POST['passwordSignIn']
if user_email and user_password:
user = authenticate(username=user_email, password=user_password)
if user is not None:
login(request, user)
response_data['response'] = 'success'
response_data['responseMessage'] = 'Login successful'
response_data['responseMessageInfo'] = 'Redirecting to the dashboard'
else:
response_data['response'] = 'error'
response_data['responseMessage'] = 'Login failed'
response_data['responseMessageInfo'] = 'Invalid user'
else:
response_data['response'] = 'error'
response_data['responseMessage'] = 'Login failed'
response_data['responseMessageInfo'] = f'Invalid HTTP method {request.method}'
except Exception as err:
response_data['response'] = 'error'
response_data['responseMessage'] = 'Login failed'
response_data['responseMessageInfo'] = str(err)
return JsonResponse(response_data)
def user_logout(request):
"""
Lets the logged-in user logout of the application
"""
logout(request)
return redirect(reverse('index'))
def tech_details(request, tech_id):
"""
Loads the technology details page
"""
template = 'portfolio/tech_details.html'
context = {
"page_title": "Technology details",
"banner_heading_pref": "Technology",
'page_details': {}
}
try:
if request.method == "GET":
if request.user.is_authenticated:
user_id = request.user.id
obj_user = User.objects.get(pk=user_id)
else:
obj_user = get_global_user(request)
obj_tech_details = UserSkill.objects.get(user__id=obj_user.id, skill__id=tech_id)
obj_social_media_links = get_social_media_links(obj_user.user_portfolio.id)
context = embed_social_media_links_to_context(context, obj_social_media_links)
breadcrumb_mid_item_cat_list = obj_tech_details.skill_category.get_category_name_display().split()
if len(breadcrumb_mid_item_cat_list) > 1:
context['page_details']['heading'] = f'{obj_tech_details.skill.category_item_name} ' \
f'{breadcrumb_mid_item_cat_list[0]} ' \
f'{breadcrumb_mid_item_cat_list[1]}'
else:
context['page_details']['heading'] = f'{obj_tech_details.skill.category_item_name} ' \
f'{obj_tech_details.skill_category.get_category_name_display()}'
context['page_details']['breadcrumb_mid_item'] = obj_tech_details.skill.category_item_name
context['page_details']['summary'] = obj_tech_details.summary
context['page_details']['description'] = obj_tech_details.description
context['page_details']['rating'] = [i for i in range(int(obj_tech_details.rating_out_of_five))]
context['page_details']['total_experience'] = obj_tech_details.get_total_experience_in_year_display()
context['page_details']['last_used'] = obj_tech_details.last_used
context['page_details']['image'] = obj_tech_details.skill.category_item_image.url
else:
context['error'] = f'Invalid request method: {request.method}!'
except Exception as err:
context['page_details'] = None
context['exception'] = err.__str__()
return render(request, template, context)
def new_client_feed(request):
"""
Stores the email address of the client who shown interest by submitting their email address
"""
response_data = get_global_response(request)
try:
if request.method == 'POST':
client_email = request.POST['new_client_email']
obj_new_client_email = NewClient(client_email=client_email)
obj_new_client_email.save()
response_data['response'] = 'success'
response_data['responseMessage'] = 'We got that'
response_data['responseMessageInfo'] = 'Thanks for showing interest in availing our services'
else:
response_data['response'] = 'error'
response_data['responseMessage'] = 'Something went wrong'
response_data['responseMessageInfo'] = 'Invalid HTTP request'
except Exception as err:
pass
response_data['response'] = 'error'
response_data['responseMessage'] = err.__str__()
response_data['responseMessageInfo'] = 'Please try after sometime'
return JsonResponse(response_data)
def contact_us(request):
"""
Loads contact us page
"""
template = 'portfolio/contact_us.html'
context = {
'page_title': 'Contact us'
}
try:
if request.method == 'POST':
form = ContactUs(request.POST, request.FILES)
if form.is_valid():
form.save()
context['response'] = 'success'
else:
context['response'] = 'error'
else:
form = ContactUs()
if request.user.is_authenticated:
obj_user = User.objects.get(pk=request.user.id)
else:
obj_user = get_global_user(request)
obj_user_address = PortfolioUserAddress.objects.get(user_id=obj_user.user_portfolio.id)
obj_social_media_links = get_social_media_links(obj_user.user_portfolio.id)
context = embed_social_media_links_to_context(context, obj_social_media_links)
context['country'] = obj_user_address.country
context['state'] = obj_user_address.state
context['city'] = obj_user_address.city
context['mobile'] = obj_user.user_portfolio.mobile
context['email'] = obj_user.email
context['work_days'] = obj_user.user_portfolio.get_work_days_display()
context['work_shift'] = obj_user.user_portfolio.get_work_shift_display()
context['form'] = form
except Exception as err:
context['exception'] = err.__str__()
return render(request, template, context)
def about_me(request):
"""
Renders the about page
"""
template = 'portfolio/about-me.html'
context = {
"page_title": "About me",
}
try:
if request.method == "GET":
if request.user.is_authenticated:
user_id = request.user.id
obj_user = User.objects.get(pk=user_id)
else:
obj_user = get_global_user(request)
context['about'] = obj_user.user_portfolio.about
context['profile_photo'] = obj_user.user_portfolio.profile_photo.url
obj_customer_reviews = Review.objects.all()
customer_reviews = []
for each_review in obj_customer_reviews:
customer_review = {
'reviewer': each_review.reviewer_name,
'rating': [i for i in range(int(each_review.get_reviewer_rating_display()))],
'description': each_review.review_description
}
customer_reviews.append(customer_review)
obj_social_media_links = get_social_media_links(obj_user.user_portfolio.id)
context = embed_social_media_links_to_context(context, obj_social_media_links)
context['customer_reviews'] = customer_reviews
else:
context['error'] = 'Invalid HTTP method detected'
except Exception as err:
context['exception'] = err.__str__()
return render(request, template, context)
def user_portfolio(request):
"""
Loads the portfolio page
"""
template = 'portfolio/portfolio.html'
context = {
'page_title': 'User Portfolio'
}
try:
if request.method == 'GET':
if request.user.is_authenticated:
obj_user = User.objects.get(pk=request.user.id)
else:
obj_user = get_global_user(request)
obj_social_media_links = get_social_media_links(obj_user.user_portfolio.id)
skills = get_user_skills(obj_user.id)
context['heading'] = obj_user.user_portfolio.heading
context['headline'] = obj_user.user_portfolio.headline
context['skills'] = skills
context = embed_social_media_links_to_context(context, obj_social_media_links)
else:
context['error'] = 'Invalid HTTP method detected'
except Exception as err:
context['exception'] = err.__str__()
return render(request, template, context)
def user_profile_details(request, user_id):
"""
Loads user profile details page
"""
template = 'portfolio/profile_details.html'
context = {
'page_title': 'Profile details'
}
try:
if request.method == 'GET':
obj_portfolio_user = PortfolioUser.objects.get(user_id=user_id)
obj_social_media_links = get_social_media_links(obj_portfolio_user.id)
skills = get_user_skills(user_id)
obj_client_projects = ClientProject.objects.filter(user_id=obj_portfolio_user.id)
client_project_list = []
for each_client_project in obj_client_projects:
tools_and_tech_list = []
obj_tools_and_tech = each_client_project.tools_and_technologies_used.all()
for each_tool_and_tech in obj_tools_and_tech:
tools_and_tech_list.append(each_tool_and_tech.skill.category_item_name)
client_project_list.append(
{
'project_title': each_client_project.project_title,
'client_name': each_client_project.client_name,
'project_url': each_client_project.project_url,
'tools_and_technologies_used': tools_and_tech_list,
'project_description': each_client_project.project_description
}
)
context['client_project_list'] = client_project_list
context['role'] = obj_portfolio_user.role
context['profile_short_description'] = obj_portfolio_user.profile_short_description
context['mobile'] = obj_portfolio_user.mobile
context['email'] = obj_portfolio_user.user.email
context['about'] = obj_portfolio_user.about
context['profile_photo'] = obj_portfolio_user.profile_photo.url
context['user_first_name'] = obj_portfolio_user.user.first_name
context['skills'] = skills
context = embed_social_media_links_to_context(context, obj_social_media_links)
else:
context['error'] = 'Invalid HTTP method detected'
except Exception as err:
context['exception'] = err.__str__()
return render(request, template, context)
| gautamw3/portfolio_cum_blog | portfolio/views.py | views.py | py | 19,382 | python | en | code | 0 | github-code | 50 |
40134368210 | import FWCore.ParameterSet.Config as cms
process = cms.Process("QcdHighPtDQM")
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.dqmSaver.workflow = cms.untracked.string('/Physics/QCDPhysics/Jets')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_1_0_pre4/RelValQCD_Pt_50_80/GEN-SIM-RECO/STARTUP_30X_v1/0002/E827B5EF-FE16-DE11-8C9F-00304867C136.root',
'/store/relval/CMSSW_3_1_0_pre4/RelValQCD_Pt_50_80/GEN-SIM-RECO/STARTUP_30X_v1/0002/9C49DDC4-5C18-DE11-96DB-001A92811736.root'
)
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
process.QcdHighPtDQM = DQMEDAnalyzer('QcdHighPtDQM',
jetTag = cms.untracked.InputTag("sisCone5CaloJets"),
metTag1 = cms.untracked.InputTag("met"),
metTag2 = cms.untracked.InputTag("metHO"),
metTag3 = cms.untracked.InputTag("metNoHF"),
metTag4 = cms.untracked.InputTag("metNoHFHO")
)
process.p = cms.Path(process.QcdHighPtDQM+process.dqmSaver)
| cms-sw/cmssw | DQM/Physics/test/qcdHighPtDQM_cfg.py | qcdHighPtDQM_cfg.py | py | 1,378 | python | en | code | 985 | github-code | 50 |
43404396725 | import cv2 as cv
import numpy as np
start = '7/blue.png'
end = '7/red.png'
img_start = cv.imread(start)
img_end = cv.imread(end)
ran = 10
for x in range(1,ran):
scale = x / float(ran)
cha = (img_end.astype(np.int)-img_start.astype(np.int))
img_inter = img_start + cha * scale
path = '{}/img_{}_{}_inter_{}.png'.format(start.split('/')[0],start.split('/')[-1].split('.')[0],end.split('/')[-1].split('.')[0],scale)
cv.imwrite(path, img_inter)
print('img_inter is saved to', path)
| youyuge34/PI-REC | scripts/color_inter.py | color_inter.py | py | 490 | python | en | code | 2,006 | github-code | 50 |
25522928386 | # Programa que utiliza as funções de data do calendário baseadas nas configurações atuais da CPU
# Quando executado o código ele imprime o dia o mês e o ano no console
from datetime import date
def trabalhando_com_data():
data_atual = date.today()
data_atual_str = data_atual.strftime('%d/%m/%Y')
print(type(data_atual))
print(data_atual_str)
print(type(data_atual_str))
if __name__ == '__main__':
#trabalhando com date()
trabalhando_com_data()
| riangomesz/Pyhton-exercises-3 | funcao_data.py | funcao_data.py | py | 498 | python | pt | code | 2 | github-code | 50 |
26378079390 | #https://leetcode.com/problems/partition-array-into-three-parts-with-equal-sum/submissions/
class Solution:
def canThreePartsEqualSum(self, arr: List[int]) -> bool:
if sum(arr)%3!=0:
return False
avg=int(sum(arr)/3)
part, cnt = 0,0
for num in arr:
part+=num
if part==avg:
part=0
cnt+=1
return cnt>=3
| 724thomas/CodingChallenge_Python | LeetCode/1013PartitionArrayIntoThreePartsWithEqualSum.py | 1013PartitionArrayIntoThreePartsWithEqualSum.py | py | 421 | python | en | code | 0 | github-code | 50 |
29780157398 | from PyQt5 import QtWidgets
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWidgets import QApplication, QTableView, QMainWindow
import sys
from TelefonRehberi import Ui_MainWindow
class TelApp(QtWidgets.QMainWindow):
def __init__(self):
super(TelApp, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.btnEkle.clicked.connect(self.ekle)
def ekle(self):
sender = self.sender().text()
if (sender == "Ekle"):
name = self.ui.txtAd.text()
Surname = self.ui.txtSoyad.text()
TelNo = self.ui.txtTelNo.text()
self.ui.tableView = QTableView(self)
self.ui.tableView.model = QStandardItemModel()
list = [
[name, Surname, TelNo]
]
for i in list:
self.ui.tableView.model.appendRow(
[QStandardItem(str(val)) for val in i])
self.ui.tableView.setModel(self.ui.tableView.model)
self.setCentralWidget(self.ui.tableView)
def AppShow():
app = QtWidgets.QApplication(sys.argv)
win = TelApp()
win.show()
sys.exit(app.exec_())
AppShow() | celilcavus/CelilCavus.PYQT5.TelefonRehberi | Rehber.py | Rehber.py | py | 1,258 | python | en | code | 0 | github-code | 50 |
44170686304 | url = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com/instances/f817b9d7-38b4-491a-86b7-1a0ea5888912'
apikey = 'R4KLNIlE97YzWbcdoMRQRJd_yCZWkVndMOB7Vbm0eiMS'
from ibm_watson import TextToSpeechV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
apikey ='Ji5U7aNqFY7GNQy1Ae3eb2yLRXcicIS_ayweLvE1WgA1'
url ='https://api.us-south.speech-to-text.watson.cloud.ibm.com/instances/c685fde4-f9a7-44bf-b5fa-e77e3e969e54'
authenticator = IAMAuthenticator(apikey)
tts = TextToSpeechV1(authenticator = authenticator)
tts.set_service_url(url)
with open('voice.mp3' , 'wb') as audio_file:
res = tts.recognize('Hello world' , accept='audio/mp3' , voice='en-US_AllisonV3Voice').get_result()
audio_file.write(res.content)
with open('Hello.txt' , 'r') as f:
text = f.readlines()
text
with open('./voice.mp3' , 'wb') as audio_file :
res = tts.synthesize(text, accept='audio/mp3' , voice='en-US_AllisonV3Voice').get_result()
audio_file.write[res.content]
| munira4x/TextTospeech | convert.py | convert.py | py | 1,019 | python | en | code | 0 | github-code | 50 |
19930582404 | from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.urls import reverse
from django.shortcuts import get_object_or_404, render, redirect
from .models import food, Meal, Entry
from django.db.models import Q
from accounts.views import keep_session_active
import requests
import datetime
# Create your views here.
def check_meal_owner(request, meal_id):
owner = False
for i in request.session.get('meal_set'):
if i == int(meal_id):
owner = True
return owner
def index(request):
food_list = food.objects.order_by('calories')[:15]
output = ', '.join([q.name for q in food_list])
keep_session_active(request)
return render(request, 'food/index.html', {'food_list': food_list})
# return render(request, 'food/index.html')
def detail(request, name):
queryset = food.objects.filter(name__startswith=name)
this_food = get_object_or_404(queryset)
meal_set = request.session.get('meal_set', [])
for i in range(len(meal_set)):
meal_set[i] = Meal.objects.get(id=meal_set[i])
keep_session_active(request)
return render(request, 'food/details.html', {'food': this_food, 'meal_set': meal_set})
def compare(request):
# try:
# this_food1 = food.objects.get(name=request.GET.get(
# 'q'))
# this_food2 = food.objects.get(name=request.GET.get(
# 'q2'))
# except food.DoesNotExist:
# this_food1 = None
# this_food2 = None
if request.GET.get('q'):
food1x = request.GET.get('q')
list_food1 = searchFDA(food1x)
food1 = list_food1[0]
# print(food1)
else:
food1 = None
if request.GET.get('q2'):
food2x = request.GET.get('q2')
list_food2 = searchFDA(food2x)
food2 = list_food2[0]
else:
food2 = None
context = {'food1': food1, 'food2': food2}
return render(request, 'food/compare.html', context)
def compare_search(request):
keep_session_active(request)
return render(request, 'food/compare_search.html')
def search(request):
query = request.GET.get('q')
object_list = food.objects.filter(
Q(name__icontains=query) | Q(ingredients__icontains=query)
)
keep_session_active(request)
return render(request, 'food/search.html', {'foods': object_list})
def update_meal(request, meal_id):
if check_meal_owner(request, meal_id): #make sure the person making changes is in the session that owns the meal
if bool(request.POST.get('delete', False)): #Meal was designated to be deleted
Meal.objects.filter(id=meal_id).delete()
request.session['meal_set'].remove(meal_id)
request.session.modified=True
return HttpResponseRedirect(reverse('index'))
meal = Meal.objects.get(id=meal_id)
meal.name = request.POST.get('Mealname')
try:
meal.date_eaten = datetime.datetime.strptime(str(request.POST.get('Eatdate'))[:10], '%Y-%m-%d')
except ValueError:
meal.date_eaten = datetime.datetime.now()
for entry in meal.entry_set.all():
quantity = int(request.POST.get("e{}".format(entry.id)))
#print(entry)
#print(entry.id)
#print(quantity)
if quantity == 0:
Entry.objects.filter(id=entry.id).delete()
else:
entry.quantity = quantity
entry.save()
meal.save()
keep_session_active(request)
return HttpResponseRedirect(reverse('view_meal', args=(meal_id,)))
else:
#Yell at them for being hackers lol
print("blocked {}".format(request.session.get('meal_set')))
keep_session_active(request)
return Http404()
def add_food_to_meal(request, food_id):
if request.POST.get("yes") == None:
keep_session_active(request)
return HttpResponseRedirect(reverse('detail', args=(food.objects.get(id=food_id).name,)))
meal_id = int(request.POST.get("yes"))
meal_set = request.session.get('meal_set', [])
f = food.objects.get(id=food_id)
if len(meal_set) == 0 or meal_id == 0:
this_meal = Meal.create("blank_meal")
this_meal.save()
#must create new entry for blank meal to be populated
entry = Entry.create(f, this_meal)
entry.save()
if len(meal_set) == 0:
request.session['meal_set'] = [this_meal.id]
else:
request.session['meal_set'].append(this_meal.id)
else:
if check_meal_owner(request, meal_id):
this_meal = Meal.objects.get(id=meal_id)
entry = this_meal.entry_set.filter(food=f)
if not entry: #this food is not yet a part of the meal
entry = Entry.create(f, this_meal)
entry.save()
else: #this food already has an entry in the meal
entry.quantity += 1
entry.save()
else:
#yell at them, for being hackers lol
print("blocked {}".format(request.session.get('meal_set')))
keep_session_active(request)
return Http404()
request.session.modified = True
context = {'meal': this_meal}
keep_session_active(request)
return HttpResponseRedirect(reverse('view_meal', args=(this_meal.id,)))
def view_meal(request, meal_id):
keep_session_active(request)
return render(request, 'food/meal.html', {'meal': Meal.objects.get(id=meal_id)})
def search_fda(request):
keep_session_active(request)
return render(request, 'food/search_fda.html')
def search_fda_list(request):
# get request to search the fda db here
# print top 15 items (maybe do display pages?)
# show name and couple facts
# click on it will bring up new full details page
# have to copy over an html view
query = request.GET.get('q')
foodz = searchFDA(query)
#print(foodz)
context = {
'foodz': foodz,
'query': query
}
keep_session_active(request)
return render(request, 'food/search_fda_list.html', context)
def searchFDA(query):
url = 'https://api.nal.usda.gov/fdc/v1/foods/search?api_key=rpIgP2LfGafdahKmgf3PqLAbtj7fpdQZnzPMtsQg&query=' + query
response = requests.get(url)
foodData = response.json()
#print(foodData['foods'][0]['lowercaseDescription'])
foodStuffs = {}
# return first 15 best matching results
# abbreviated results
def isitinFoods(index, attribute):
if attribute in foodData['foods'][index]:
return foodData['foods'][index][attribute]
else:
return "None"
def isitinNutrients(index, jndex, attribute):
if attribute in foodData['foods'][index]['foodNutrients'][jndex]:
return foodData['foods'][index]['foodNutrients'][jndex][attribute]
else:
return "None"
def getNutrients(foodItem):
nutrients = {}
for i in range(len(foodData['foods'][foodItem]['foodNutrients'])):
# add nutrient properties to list of singles
nutrients.update(
{
i:
{
"nutrientName": isitinNutrients(foodItem, i, "nutrientName"),
"nutrientNumber": isitinNutrients(foodItem, i, "nutrientNumber"),
"units": isitinNutrients(foodItem, i, "unitName"),
"derivedFrom": isitinNutrients(foodItem, i, "derivationDescription"),
"value": isitinNutrients(foodItem, i, "value")
}
}
)
return nutrients
for i in range(15):
#print(foodData['foods'][i]['fdcId'])
foodStuffs.update(
{
i:
{
"fdcId": isitinFoods(i, 'fdcId'),
"name": isitinFoods(i, 'lowercaseDescription'),
"brandOwner": isitinFoods(i, "brandOwner"),
"ingredients": isitinFoods(i, "ingredients"),
#for every nutrient in foodItem
"nutrients": getNutrients(i)
}
}
)
# abbrv. list, / cleaned up one.
return foodStuffs
# use specific fda id
def search_fda_details(request):
# returns foodz[i]
query = request.GET.get('q')
foodz = searchFDAbyID(query)
#print(foodz)
context = {
'foodz': foodz,
'query': query
}
keep_session_active(request)
return render(request, 'food/search_fda_details.html', context)
def searchFDAbyID(query):
# note food/ not foods/
url = 'https://api.nal.usda.gov/fdc/v1/food/' + query + '?api_key=rpIgP2LfGafdahKmgf3PqLAbtj7fpdQZnzPMtsQg'
response = requests.get(url)
foodData = response.json()
foodStuffs = {}
# double stack down for possible other listings
def isitinFoods(attribute):
if attribute in foodData:
return foodData[attribute]
else:
return "None"
def isitinNutrients(index, attribute):
if attribute in foodData['foodNutrients'][index]['nutrient']:
return foodData['foodNutrients'][index]['nutrient'][attribute]
else:
return "None"
def isitinDerived(index, attribute):
if "foodNutrientDerivation" in foodData['foodNutrients'][index]:
if attribute in foodData['foodNutrients'][index]["foodNutrientDerivation"]:
return foodData['foodNutrients'][index]["foodNutrientDerivation"][attribute]
else:
return "None"
else:
return "None"
def isitAmount(index, attribute):
if attribute in foodData['foodNutrients'][index]:
return foodData['foodNutrients'][index][attribute]
else:
return "None"
def getNutrients():
nutrients = {}
for i in range(len(foodData["foodNutrients"])):
# add nutrient properties to list of singles
nutrients.update(
{
i:
{
"nutrientName": isitinNutrients(i, "name"),
"nutrientNumber": isitinNutrients(i, "number"),
"amount": isitAmount(i, "amount"),
"units": isitinNutrients(i, "unitName"),
"derivedFrom": isitinDerived(i, "description")
}
}
)
return nutrients
foodStuffs.update(
{
'0' :
{
"fdcId": isitinFoods('fdcId'),
"name": str.lower(isitinFoods('description')),
"brandOwner": isitinFoods("brandOwner"),
"ingredients": isitinFoods("ingredients"),
#for every nutrient in foodItem
"nutrients": getNutrients()
},
'nutrientsLength' : len(foodData["foodNutrients"])
}
)
return foodStuffs
| GittyDawg/Health4Wellness | Health4Wellness/food/views.py | views.py | py | 11,078 | python | en | code | 2 | github-code | 50 |
33909565637 | from cdwa import Item, Image, PersonOrCorporateBody, PlaceOrLocation, GenericConcept, Subject
class HarvardItem(Item):
def __init__(self,rec,relevance):
# Imago Mundi administrative metadata
self.pid = u"-".join(['Harvard',str(rec['objectnumber']),"item"])
self.relevance = relevance
# 3.1. Title Text
self.titleText = rec['title']
# 21.2. Repository/Geographic Location
self.currentRepositoryGeographicLocation = rec['creditline'].split('/')[0]
# 26. RELATED VISUAL DOCUMENTATION
# [references to Object/Work]
# 26.1. Image References
# 26.1.1. Image to Work Relationship Type
self.imageReferences = {}
try:
self.imageReferences['primaryImage'] = rec['primaryimageurl']
except:
self.imageReferences['primaryImage'] = rec['url']
| joemull/aby | final-proj-si-507/museums/harvard.py | harvard.py | py | 874 | python | en | code | 0 | github-code | 50 |
11399118396 | """
거스름돈으로 사용할 500원, 100원, 50원, 10원짜리 동전이 무한히 존재한다.
손님에게 거슬러 줘야 할 돈이 N원이 일때, 거슬러 줘야 할 동전의 **최소 개수**를 구하여라
단, 거슬러 줘야 할 돈 N은 항상 10의 배수이다.
"""
N = int(input()) # 손님에게 거슬러 줘야 할 금액
cnt = 0 # 거슬러 줘야 할 돈전의 개수
jandon = [500, 100, 50, 10]
for i in jandon:
cnt += N // i
N = N % i
print(cnt) | song7351/algorithm_study | 1.이코테/1.greedy/3-1.py | 3-1.py | py | 504 | python | ko | code | 0 | github-code | 50 |
38021908518 | """
*packageName :
* fileName : 전력망을 둘로 나누기
* author : qkrtkdwns3410
* date : 2022-09-15
* description :
* ===========================================================
* DATE AUTHOR NOTE
* -----------------------------------------------------------
* 2022-09-15 qkrtkdwns3410 최초 생성
"""
from collections import deque
def bfs(v, n, grid):
cnt = 0
q = deque()
q.append(v)
visited = [0 for _ in range(n + 1)]
visited[v] = 1
while q:
v = q.popleft()
for w in grid[v]:
if not visited[w]:
visited[w] = 1
q.append(w)
cnt += 1
return cnt
def solution(n, wires):
answer = float('inf')
# 인접 리스트
grid = [[] for _ in range(n + 1)]
# 양방향 연결 그리드
for i in range(n - 1):
grid[wires[i][0]].append(wires[i][1])
grid[wires[i][1]].append(wires[i][0])
for i in range(n - 1):
grid[wires[i][0]].remove(wires[i][1])
grid[wires[i][1]].remove(wires[i][0])
cnt_node1 = bfs(wires[i][0], n, grid)
cnt_node2 = bfs(wires[i][1], n, grid)
answer = min(answer, abs(cnt_node1 - cnt_node2))
grid[wires[i][0]].append(wires[i][1])
grid[wires[i][1]].append(wires[i][0])
return answer
# solution(9, [[1, 3], [2, 3], [3, 4], [4, 5], [4, 6], [4, 7], [7, 8], [7, 9]])
# print(solution(4, [[1, 2], [2, 3], [3, 4]]))
solution(7, [[1, 2], [2, 7], [3, 7], [3, 4], [4, 5], [6, 7]])
| guqtls14/python-algorism-study | 박상준/프로그래머스/카카오/전력망을 둘로 나누기.py | 전력망을 둘로 나누기.py | py | 1,641 | python | en | code | 0 | github-code | 50 |
9971901238 | alpha = input()
alpha = alpha.upper()
alpha = list(alpha)
count={}
for i in alpha:
try: count[i] += 1
except: count[i]=1
result = set()
for i in count:
if count[i] >= max(count.values()):
result.add(i)
if len(result) >= 2:
print('?')
# else:
# print(max(count,key=count.get))
# print(list(count.values()).count(2))
# 밸류의 수 중에서 /세라 /밸류의 가장큰 수
# list(freq.values()).count(max(freq.values()))
# >>> 밸류의 가장 큰 수 x가 몇개가 있는지 카운트
# words = input().upper()
# unique_words = list(set(words))
# cnt_list = []
# for x in unique_words :
# cnt = words.count(x) # aazzzz도 2, 4 zzzzaa도 2,4
# cnt_list.append(cnt)
# if cnt_list.count(max(cnt_list)) > 1 :
# print('?')
# else :
# max_index = cnt_list.index(max(cnt_list))
# print(unique_words)
# print(cnt_list)
# print(max_index)
# print(unique_words[max_index])
# 용재 코드
# n = input()
# a = list(n.upper())
# l = set(a)
# maxNum = 0
# q = False
# g = ''
# for i in l:
# c = a.count(i)
# if c > maxNum:
# g = i
# maxNum = c
# elif c == maxNum:
# q = True
# if q == True:
# print('?')
# else:
# print(g) | portals2/prectice_baekjoon | 0_bronze/b1#1157.py | b1#1157.py | py | 1,261 | python | ko | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.