blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
31095c919199aa953d40fd54e71b5b590297a967 | Python | DanielNeris/to-do-py | /courses/models.py | UTF-8 | 1,089 | 2.546875 | 3 | [] | no_license | from django.db import models
# Create your models here.
class Base(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
abstract = True
class Course(Base):
title = models.CharField(max_length=255)
url = models.URLField(unique=True)
class Meta:
verbose_name = 'Course'
verbose_name_plural = 'Courses'
def __str__(self):
return self.title
class Review(Base):
course = models.ForeignKey(
Course, related_name='reviews', on_delete=models.CASCADE)
name = models.CharField(max_length=255)
email = models.EmailField()
comment = models.TextField(blank=True, default='')
review = models.DecimalField(max_digits=2, decimal_places=1)
class Meta:
verbose_name = 'Review'
verbose_name_plural = 'Reviews'
unique_together = ['email', 'course']
def __str__(self):
return f'{self.name} review the course {self.course} with note {self.review}'
| true |
c52620e68d044dde23d866bf647df63ab6fb3333 | Python | CCOMJHC/asv_sim | /src/asv_sim/coastal_surveyor.py | UTF-8 | 1,125 | 2.5625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python3
# Roland Arsenault
# Center for Coastal and Ocean Mapping
# University of New Hampshire
# Copyright 2017, All rights reserved.
# Engine: 200 bHP -> *.5 for lossed * 745 to watts = 74500
# 2400 rpm: 200 bHP
# 2200 rpm: 185 bHP
# draft 5.5ft
# 40' (12.19m) long, 12' (3.66m) wide, and has a draft of 5.5' (1.8m).
# Tonnage: 16 GRT, 11 DWT
# Top Speed: 10 knots (5.14444444 m/s)
# Minimum speed for full roll stabilization: 5 knots
# Minimum survey speed: 2.5 knots
# Propulsion: 1 x Caterpillar 3116; 200HP Marine Diesel; 2.57:1 reduction
# prop pitch of 20 is just a random number for testing, mass is random as well
coastal_surveyor = {'max_rpm':2400,
'max_power':74500,
'idle_rpm':100,
'gearbox_ratio':0.389105058,
'prop_pitch':20,
'max_rpm_change_rate':1000,
'max_speed':5.14444444,
'mass':5000,
'max_rudder_angle':30,
'rudder_distance':6,
'rudder_coefficient':.25,
}
| true |
e03b3d39cbc088dc5e852b346c7d6f56a36ecc01 | Python | Jay07/Workshop4 | /areaRectangle.py | UTF-8 | 209 | 3.984375 | 4 | [] | no_license | def calcArea(width, height):
area = width * height
return area
width = float(input("Enter width: "))
height = float(input("Enter height: "))
area = calcArea(width, height)
print("Area:", int(area)) | true |
0743309966aac246f5c2bdb6c61d97abe00ad876 | Python | saurabhban/beginner | /codes/ex41.py | UTF-8 | 384 | 3.4375 | 3 | [] | no_license | class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print (line)
happy_bday = Song(["Happy birthday to youI don't want to get sued So I'll stop right there"])
bulls_on_parade = Song("They rally around tha familyWith pockets full of shells")
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song() | true |
381e03764d6f727f255c3b17f8a8011ee0774fd0 | Python | bigeast/ProjectEuler | /(#169)PowersOf2.py | UTF-8 | 297 | 3.15625 | 3 | [] | no_license | def findPows(x):
p=[]
n=2
y=0
while n**y<x:
p.append(n**y)
y+=1
return p
def main(x):
p=findPows(x)
a=x
adds=[]
for y in reversed(p):
while a-y>=0:
adds.append(y)
a-=y
print adds, a
main(10**25)
| true |
4d6203c2a738feae16d4f4b4ce57c44b08544742 | Python | yannickbijl/RFSeq | /GUI_RFSeq_Input.py | UTF-8 | 1,901 | 3.140625 | 3 | [] | no_license | import wx
class GUI_RFSeq_Input(wx.Panel):
def __init__(self, bb_parent):
def explain():
text = ("Give a file with a single line containing the sequence." +
" Only A, T, C, G are allowed. The program generates all" +
" six reading frames. The first three on the forward " +
"strand, and the last three on the complement strand. " +
"Please note that these are reading frames, not open " +
"reading frames.")
return text
wx.Panel.__init__(self, bb_parent, style=wx.BORDER_SUNKEN)
# Input parameters
self.filename = wx.FilePickerCtrl(self, path="")
# Buttons
self.stop = wx.Button(self, label="Quit")
self.next = wx.Button(self, label="Next")
# Text
self.explain = wx.StaticText(self, label=explain())
# Placing of items in frame
box = wx.BoxSizer(wx.VERTICAL)
box.Add(self.explain, 2, wx.EXPAND | wx.ALL)
box.Add(self.filename, 1, wx.EXPAND | wx.ALL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.stop, 1, wx.EXPAND | wx.ALL)
hbox.Add(self.next, 1, wx.EXPAND | wx.ALL)
box.Add(hbox, 1, wx.EXPAND | wx.ALL)
self.SetSizer(box)
if __name__ == "__main__":
class Frame(wx.Frame):
def __init__(self, s_parent, s_title="GUI_RFSeq_Input"):
wx.Frame.__init__(self, s_parent, title=s_title, size=(200, 300))
panel = wx.Panel(self)
panel1 = GUI_RFSeq_Input(panel)
box = wx.BoxSizer()
box.Add(panel1, 1, wx.EXPAND | wx.ALL)
panel.SetSizer(box)
self.Centre()
self.Show(True)
app = wx.App(False)
Frame(None)
app.MainLoop()
| true |
1dc9d9634ba640b062d2623dc087d83450370d13 | Python | meso2/HRRR_archive_download | /HRRR_archive.py | UTF-8 | 26,947 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python3
## Brian Blaylock
## June 26, 2020
"""
=========================
Download HRRR GRIB2 files
=========================
Can download HRRR files from the University of Utah HRRR archive on Pando
or from the NOMADS server.
reporthook
Prints download progress when downloading full files.
searchString_help
Prints examples for the `searchString` argument when there is an error.
download_HRRR_subset
Download parts of a HRRR file.
download_HRRR
Main function for downloading many HRRR files.
get_crs
Get cartopy projection object from xarray.Dataset
get_HRRR
Read HRRR data as an xarray Dataset with cfgrib engine.
"""
import os
import re
from datetime import datetime, timedelta
import numpy as np
import urllib.request # Used to download the file
import requests # Used to check if a URL exists
import warnings
import cartopy.crs as ccrs
import cfgrib
import xarray as xr
def reporthook(a, b, c):
"""
Report download progress in megabytes (prints progress to screen).
Parameters
----------
a : Chunk number
b : Maximum chunk size
c : Total size of the download
"""
chunk_progress = a * b / c * 100
total_size_MB = c / 1000000.
print(f"\r Download Progress: {chunk_progress:.2f}% of {total_size_MB:.1f} MB\r", end='')
def searchString_help(searchString):
msg = [
f"There is something wrong with [[ searchString='{searchString}' ]]",
"\nHere are some examples you can use for `searchString`",
" ================ ===============================================",
" ``searchString`` Messages that will be downloaded",
" ================ ===============================================",
" ':TMP:2 m' Temperature at 2 m.",
" ':TMP:' Temperature fields at all levels.",
" ':500 mb:' All variables on the 500 mb level.",
" ':APCP:' All accumulated precipitation fields.",
" ':UGRD:10 m' U wind component at 10 meters.",
" ':(U|V)GRD:' U and V wind component at all levels.",
" ':.GRD:' (Same as above)",
" ':(TMP|DPT):' Temperature and Dew Point for all levels .",
" ':(TMP|DPT|RH):' TMP, DPT, and Relative Humidity for all levels.",
" ':REFC:' Composite Reflectivity",
" ':surface:' All variables at the surface.",
" '((U|V)GRD:10 m|TMP:2 m|APCP)' 10-m wind, 2-m temp, and precip.",
" ================ ===============================================",
"\n If you need help with regular expression, search the web",
" or look at this cheatsheet: https://www.petefreitag.com/cheatsheets/regex/",
"PLEASE FIX THE `searchString`"
]
return '\n'.join(msg)
def download_HRRR_subset(url, searchString, SAVEDIR='./',
dryrun=False, verbose=True):
"""
Download a subset of GRIB fields from a HRRR file.
This assumes there is an index (.idx) file available for the file.
Parameters
----------
url : string
The URL for the HRRR file you are trying to download. There must be an
index file for the GRIB2 file. For example, if
``url='https://pando-rgw01.chpc.utah.edu/hrrr/sfc/20200624/hrrr.t01z.wrfsfcf17.grib2'``,
then ``https://pando-rgw01.chpc.utah.edu/hrrr/sfc/20200624/hrrr.t01z.wrfsfcf17.grib2.idx``
must also exist on the server.
searchString : str
The string you are looking for in each line of the index file.
Take a look at the
.idx file at https://pando-rgw01.chpc.utah.edu/hrrr/sfc/20200624/hrrr.t01z.wrfsfcf17.grib2.idx
to get familiar with what is in each line.
Also look at this webpage: http://hrrr.chpc.utah.edu/HRRR_archive/hrrr_sfc_table_f00-f01.html
for additional details.**You should focus on the variable and level
field for your searches**.
You may use regular expression syntax to customize your search.
Check out this regulare expression cheatsheet:
https://link.medium.com/7rxduD2e06
Here are a few examples that can help you get started
================ ===============================================
``searchString`` Messages that will be downloaded
================ ===============================================
':TMP:2 m' Temperature at 2 m.
':TMP:' Temperature fields at all levels.
':500 mb:' All variables on the 500 mb level.
':APCP:' All accumulated precipitation fields.
':UGRD:10 m' U wind component at 10 meters.
':(U|V)GRD:' U and V wind component at all levels.
':.GRD:' (Same as above)
':(TMP|DPT):' Temperature and Dew Point for all levels .
':(TMP|DPT|RH):' TMP, DPT, and Relative Humidity for all levels.
':REFC:' Composite Reflectivity
':surface:' All variables at the surface.
================ ===============================================
SAVEDIR : string
Directory path to save the file, default is the current directory.
dryrun : bool
If True, do not actually download, but print out what the function will
attempt to do.
verbose : bool
If True, print lots of details (default)
Returns
-------
The path and name of the new file.
"""
# Ping Pando first. This *might* prevent a "bad handshake" error.
if 'pando' in url:
try:
requests.head('https://pando-rgw01.chpc.utah.edu/')
except:
print('bad handshake...am I able to on?')
pass
# Make SAVEDIR if path doesn't exist
if not os.path.exists(SAVEDIR):
os.makedirs(SAVEDIR)
print(f'Created directory: {SAVEDIR}')
# Make a request for the .idx file for the above URL
idx = url + '.idx'
r = requests.get(idx)
# Check that the file exists. If there isn't an index, you will get a 404 error.
if not r.ok:
print('❌ SORRY! Status Code:', r.status_code, r.reason)
print(f'❌ It does not look like the index file exists: {idx}')
# Read the text lines of the request
lines = r.text.split('\n')
# Search expression
try:
expr = re.compile(searchString)
except Exception as e:
print('re.compile error:', e)
raise Exception(searchString_help(searchString))
# Store the byte ranges in a dictionary
# {byte-range-as-string: line}
byte_ranges = {}
for n, line in enumerate(lines, start=1):
# n is the line number (starting from 1) so that when we call for
# `lines[n]` it will give us the next line. (Clear as mud??)
# Use the compiled regular expression to search the line
if expr.search(line):
# aka, if the line contains the string we are looking for...
# Get the beginning byte in the line we found
parts = line.split(':')
rangestart = int(parts[1])
# Get the beginning byte in the next line...
if n+1 < len(lines):
# ...if there is a next line
parts = lines[n].split(':')
rangeend = int(parts[1])
else:
# ...if there isn't a next line, then go to the end of the file.
rangeend = ''
# Store the byte-range string in our dictionary,
# and keep the line information too so we can refer back to it.
byte_ranges[f'{rangestart}-{rangeend}'] = line
if len(byte_ranges) == 0:
# Loop didn't find the searchString in the index file.
print(f'❌ WARNING: Sorry, I did not find [{searchString}] in the index file {idx}')
print(searchString_help(searchString))
return None
# What should we name the file we save this data to?
# Let's name it something like `subset_20200624_hrrr.t01z.wrfsfcf17.grib2`
runDate = list(byte_ranges.items())[0][1].split(':')[2][2:-2]
outFile = '_'.join(['subset', runDate, url.split('/')[-1]])
outFile = os.path.join(SAVEDIR, outFile)
for i, (byteRange, line) in enumerate(byte_ranges.items()):
if i == 0:
# If we are working on the first item, overwrite the existing file.
curl = f'curl -s --range {byteRange} {url} > {outFile}'
else:
# If we are working on not the first item, append the existing file.
curl = f'curl -s --range {byteRange} {url} >> {outFile}'
num, byte, date, var, level, forecast, _ = line.split(':')
if dryrun:
if verbose: print(f' 🐫 Dry Run: Found GRIB line [{num:>3}]: variable={var}, level={level}, forecast={forecast}')
#print(f' 🐫 Dry Run: `{curl}`')
else:
if verbose: print(f' Downloading GRIB line [{num:>3}]: variable={var}, level={level}, forecast={forecast}')
os.system(curl)
if dryrun:
if verbose: print(f'🌵 Dry Run: Success! Searched for [{searchString}] and found [{len(byte_ranges)}] GRIB fields. Would save as {outFile}')
else:
if verbose: print(f'✅ Success! Searched for [{searchString}] and got [{len(byte_ranges)}] GRIB fields and saved as {outFile}')
return outFile
def download_HRRR(DATES, searchString=None, fxx=range(0, 1), *,
model='hrrr', field='sfc',
SAVEDIR='./', dryrun=False, verbose=True):
"""
Downloads full HRRR grib2 files for a list of dates and forecasts.
Files are downloaded from the University of Utah HRRR archive (Pando)
or NOAA Operational Model Archive and Distribution System (NOMADS). This
function will automatically change the download source for each datetime
requested.
Parameters
----------
DATES : datetime or list of datetimes
A datetime or list of datetimes that represent the model
initialization time for which you want to download.
searchString : str
The string that describes the variables you want to download
from the file. This is used as the `searchString` in
``download_hrrr_subset`` to looking for sepecific byte ranges
from the file to download.
Default is None, meaning to not search for variables, but to
download the full file. ':' is an alias for None, becuase
it is equivalent to identifying every line in the .idx file.
Read the details below for more help on defining a suitable
``searchString``.
Take a look at the .idx file at
https://pando-rgw01.chpc.utah.edu/hrrr/sfc/20200624/hrrr.t01z.wrfsfcf17.grib2.idx
to get familiar with what an index file is.
Also look at this webpage: http://hrrr.chpc.utah.edu/HRRR_archive/hrrr_sfc_table_f00-f01.html
for additional details.**You should focus on the variable and level
field for your searches**.
You may use regular expression syntax to customize your search.
Check out this regulare expression cheatsheet:
https://link.medium.com/7rxduD2e06
Here are a few examples that can help you get started
================ ===============================================
``searchString`` Messages that will be downloaded
================ ===============================================
':TMP:2 m' Temperature at 2 m.
':TMP:' Temperature fields at all levels.
':500 mb:' All variables on the 500 mb level.
':APCP:' All accumulated precipitation fields.
':UGRD:10 m' U wind component at 10 meters.
':(U|V)GRD:' U and V wind component at all levels.
':.GRD:' (Same as above)
':(TMP|DPT):' Temperature and Dew Point for all levels .
':(TMP|DPT|RH):' TMP, DPT, and Relative Humidity for all levels.
':REFC:' Composite Reflectivity
':surface:' All variables at the surface.
''
================ ===============================================
fxx : int or list of ints
Forecast lead time or list of forecast lead times to download.
Default only grabs analysis hour (f00), but you might want all
the forecasts hours, in that case, you could set ``fxx=range(0,19)``.
model : {'hrrr', 'hrrrak', 'hrrrX'}
The model type you want to download.
- 'hrrr' HRRR Contiguous United States (operational)
- 'hrrrak' HRRR Alaska. You can also use 'alaska' as an alias.
- 'hrrrX' HRRR *experimental*
field : {'prs', 'sfc', 'nat', 'subh'}
Variable fields you wish to download.
- 'sfc' surface fields
- 'prs' pressure fields
- 'nat' native fields ('nat' files are not available on Pando)
- 'subh' subhourly fields ('subh' files are not available on Pando)
SAVEDIR : str
Directory path to save the downloaded HRRR files.
dryrun : bool
If True, instead of downloading the files, it will print out the
files that could be downloaded. This is set to False by default.
verbose :bool
If True, print lots of information (default).
If False, only print some info about download progress.
Returns
-------
The file name for the HRRR files we downloaded and the URL it was from.
(i.e. `20170101_hrrr.t00z.wrfsfcf00.grib2`)
"""
#**************************************************************************
## Check function input
#**************************************************************************
# Force the `field` input string to be lower case.
field = field.lower()
# Ping Pando first. This *might* prevent a "bad handshake" error.
try:
requests.head('https://pando-rgw01.chpc.utah.edu/')
except Exception as e:
print(f'Ran into an error: {e}')
print('bad handshake...am I able to on?')
pass
# `DATES` and `fxx` should be a list-like object, but if it doesn't have
# length, (like if the user requests a single date or forecast hour),
# then turn it item into a list-like object.
if not hasattr(DATES, '__len__'): DATES = np.array([DATES])
if not hasattr(fxx, '__len__'): fxx = [fxx]
assert all([i < datetime.utcnow() for i in DATES]), "🦨 Whoops! One or more of your DATES is in the future."
## Set the download SOURCE for each of the DATES
## ---------------------------------------------
# HRRR data is available on NOMADS for today's and yesterday's runs.
# I will set the download source to get HRRR data from pando if the
# datetime is for older than yesterday, and set to nomads for datetimes
# of yesterday or today.
yesterday = datetime.utcnow() - timedelta(days=1)
yesterday = datetime(yesterday.year, yesterday.month, yesterday.day)
SOURCE = ['pando' if i < yesterday else 'nomads' for i in DATES]
# The user may set `model='alaska'` as an alias for 'hrrrak'.
if model.lower() == 'alaska': model = 'hrrrak'
# The model type and field available depends on the download SOURCE.
available = {'pando':{'models':{}, 'fields':{}}, 'nomads':{'models':{}, 'fields':{}}}
available['pando']['models'] = {'hrrr', 'hrrrak', 'hrrrX'}
available['pando']['fields'] = {'sfc', 'prs'}
available['nomads']['models'] = {'hrrr', 'hrrrak'}
available['nomads']['fields'] = {'sfc', 'prs', 'nat', 'subh'}
# Make SAVEDIR if path doesn't exist
if not os.path.exists(SAVEDIR):
os.makedirs(SAVEDIR)
print(f'Created directory: {SAVEDIR}')
#**************************************************************************
# Build the URL path for every file we want
#**************************************************************************
# An example URL for a file from Pando is
# https://pando-rgw01.chpc.utah.edu/hrrr/sfc/20200624/hrrr.t01z.wrfsfcf17.grib2
#
# An example URL for a file from NOMADS is
# https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.20200624/conus/hrrr.t00z.wrfsfcf09.grib2
URL_list = []
for source, DATE in zip(SOURCE, DATES):
if source == 'pando':
base = f'https://pando-rgw01.chpc.utah.edu/{model}/{field}'
URL_list += [f'{base}/{DATE:%Y%m%d}/{model}.t{DATE:%H}z.wrf{field}f{f:02d}.grib2' for f in fxx]
if model not in available[source]['models']:
warnings.warn(f"model='{model}' is not available from [{source}]. Only {available[source]['models']}")
if field not in available[source]['fields']:
warnings.warn(f"field='{field}' is not available from [{source}]. Only {available[source]['fields']}")
elif source == 'nomads':
base = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod'
if model == 'hrrr':
URL_list += [f'{base}/hrrr.{DATE:%Y%m%d}/conus/hrrr.t{DATE:%H}z.wrf{field}f{f:02d}.grib2' for f in fxx]
elif model == 'hrrrak':
URL_list += [f'{base}/hrrr.{DATE:%Y%m%d}/alaska/hrrr.t{DATE:%H}z.wrf{field}f{f:02d}.ak.grib2' for f in fxx]
if model not in available[source]['models']:
warnings.warn(f"model='{model}' is not available from [{source}]. Only {available[source]['models']}")
if field not in available[source]['fields']:
warnings.warn(f"field='{field}' is not available from [{source}]. Only {available[source]['fields']}")
#**************************************************************************
# Ok, so we have a URL and filename for each requested forecast hour.
# Now we need to check if each of those files exist, and if it does,
# we will download that file to the SAVEDIR location.
n = len(URL_list)
if dryrun:
print(f'🌵 Info: Dry Run {n} GRIB2 files\n')
else:
print(f'💡 Info: Downloading {n} GRIB2 files\n')
# For keeping track of total time spent downloading data
loop_time = timedelta()
all_files = []
for i, file_URL in enumerate(URL_list):
timer = datetime.now()
# Time keeping: *crude* method to estimate remaining time.
mean_dt_per_loop = loop_time/(i+1)
est_rem_time = mean_dt_per_loop * (n-i+1)
if not verbose:
# Still show a little indicator of what is downloading.
print(f"\r Download Progress: ({i+1}/{n}) files {file_URL} (Est. Time Remaining {str(est_rem_time):16})\r", end='')
# We want to prepend the filename with the run date, YYYYMMDD
if 'pando' in file_URL:
outFile = '_'.join(file_URL.split('/')[-2:])
outFile = os.path.join(SAVEDIR, outFile)
elif 'nomads' in file_URL:
outFile = file_URL.split('/')[-3][5:] + '_' + file_URL.split('/')[-1]
outFile = os.path.join(SAVEDIR, outFile)
# Check if the URL returns a status code of 200 (meaning the URL is ok)
# Also check that the Content-Length is >1000000 bytes (if it's smaller,
# the file on the server might be incomplete)
head = requests.head(file_URL)
check_exists = head.ok
check_content = int(head.raw.info()['Content-Length']) > 1000000
if verbose: print(f"\nDownload Progress: ({i+1}/{n}) files {file_URL} (Est. Time Remaining {str(est_rem_time):16})")
if check_exists and check_content:
# Download the file
if searchString in [None, ':']:
if dryrun:
if verbose: print(f'🌵 Dry Run Success! Would have downloaded {file_URL} as {outFile}')
all_files.append(None)
else:
# Download the full file.
urllib.request.urlretrieve(file_URL, outFile, reporthook)
all_files.append(outFile)
if verbose: print(f'✅ Success! Downloaded {file_URL} as {outFile}')
else:
# Download a subset of the full file based on the seachString.
if verbose: print(f"Download subset from [{source}]:")
thisfile = download_HRRR_subset(file_URL,
searchString,
SAVEDIR=SAVEDIR,
dryrun=dryrun,
verbose=verbose)
all_files.append(thisfile)
else:
# The URL request is bad. If status code == 404, the URL does not exist.
print()
print(f'❌ WARNING: Status code {head.status_code}: {head.reason}. Content-Length: {int(head.raw.info()["Content-Length"]):,} bytes')
print(f'❌ Could not download {head.url}')
loop_time += datetime.now() - timer
print(f"\nFinished 🍦 (Time spent downloading: {loop_time})")
if len(all_files) == 1:
return all_files[0], URL_list[0] # return a string, not list
else:
return np.array(all_files), np.array(URL_list) # return the list of file names and URLs
def get_crs(ds):
"""
Get the cartopy coordinate reference system from a cfgrib's xarray Dataset
Parameters
----------
ds : xarray.Dataset
An xarray.Dataset from a GRIB2 file opened by the cfgrib engine.
"""
# Base projection on the attributes from the 1st variable in the Dataset
attrs = ds[list(ds)[0]].attrs
if attrs['GRIB_gridType'] == 'lambert':
lc_HRRR_kwargs = {
'globe': ccrs.Globe(ellipse='sphere'),
'central_latitude': attrs['GRIB_LaDInDegrees'],
'central_longitude': attrs['GRIB_LoVInDegrees'],
'standard_parallels': (attrs['GRIB_Latin1InDegrees'],\
attrs['GRIB_Latin2InDegrees'])}
lc = ccrs.LambertConformal(**lc_HRRR_kwargs)
return lc
else:
warnings.warn('GRIB_gridType is not "lambert".')
return None
def get_HRRR(DATE, searchString, *, fxx=0, DATE_is_valid_time=False,
remove_grib2=True, add_crs=True, **download_kwargs):
"""
Download HRRR data and return as an xarray Dataset (or Datasets)
Only request one `DATE` and `fxx` (forecast lead time).
Parameters
----------
DATE : datetime
A single datetime object.
searchString: string
A string representing a field or fields from the GRIB2 file.
See more details in ``download_hrrr`` docstring.
Some examples:
================ ===============================================
``searchString`` Messages that will be downloaded
================ ===============================================
':TMP:2 m' Temperature at 2 m.
':TMP:' Temperature fields at all levels.
':500 mb:' All variables on the 500 mb level.
':APCP:' All accumulated precipitation fields.
':UGRD:10 m' U wind component at 10 meters.
':(U|V)GRD:' U and V wind component at all levels.
':.GRD:' (Same as above)
':(TMP|DPT):' Temperature and Dew Point for all levels .
':(TMP|DPT|RH):' TMP, DPT, and Relative Humidity for all levels.
':REFC:' Composite Reflectivity
':surface:' All variables at the surface.
================ ===============================================
fxx : int
Forecast lead time. Default will get the analysis, F00.
DATE_is_valid_time: bool
False - (default) The DATE argument represents the model
initialization datetime.
True - The DATE argument represents the model valid time.
This is handy when you want a specific forecast leadtime
that is valid at a certian date.
remove_grib2 : bool
True - (default) Delete the GRIB2 file after reading into a Dataset.
This requires a copy to memory, so it might slow things down.
False - Keep the GRIB2 file downloaded.
This might be a better option performance-wise, because it
does not need to copy the data but keeps the file on disk.
You would be responsible for removing files when you don't
need them.
add_crs : bool
True - (default) Append the Cartopy coordinate reference system (crs)
projection as an attribute to the Dataset.
**download_kwargs :
Any other key word argument accepted by ``download_HRRR`.
{model, field, SAVEDIR, dryrun, verbose}
"""
inputs = locals()
assert not hasattr(DATE, '__len__'), "`DATE` must be a single datetime, not a list."
assert not hasattr(fxx, '__len__'), "`fxx` must be a single integer, not a list."
if DATE_is_valid_time:
# Change DATE to the model run initialization DATE so that when we take
# into account the forecast lead time offset, the the returned data
# be valid for the DATE the user requested.
DATE = DATE - timedelta(hours=fxx)
# Download the GRIB2 file
grib2file, url = download_HRRR(DATE, searchString, fxx=fxx, **download_kwargs)
# Some extra backend kwargs for cfgrib
backend_kwargs = {'indexpath':'',
'read_keys': ['parameterName', 'parameterUnits'],
'errors': 'raise'}
# Use cfgrib.open_datasets, just in case there are multiple "hypercubes"
# for what we requested.
H = cfgrib.open_datasets(grib2file, backend_kwargs=backend_kwargs)
# Create a cartopy projection object
if add_crs:
crs = get_crs(H[0])
for ds in H:
ds.attrs['get_HRRR inputs'] = inputs
ds.attrs['url'] = url
if add_crs:
# Add the crs projection info as a Dataset attribute
ds.attrs['crs'] = crs
# ...and add attrs for each variable for ease of access.
for var in list(ds):
ds[var].attrs['crs'] = crs
if remove_grib2:
H = [ds.copy(deep=True) for ds in H]
os.remove(grib2file)
if len(H) == 1:
H = H[0]
else:
warnings.warn('⚠ ALERT! Could not load grib2 data into a single xarray Dataset. You might consider refining your `searchString` if you are getting data you do not need.')
return H
| true |
5a557aa082e388f3fcc54711f5d4652e40d5bfc6 | Python | Aasthaengg/IBMdataset | /Python_codes/p03853/s241500488.py | UTF-8 | 142 | 3 | 3 | [] | no_license | h,w = map(int,input().split())
list = []
for i in range(h):
list.append(input())
for i in range(h):
print(list[i])
print(list[i]) | true |
cadf18b58a4afd5a5f170050f7698ae192840763 | Python | xishuzhi/qd_sign_in | /qd_utils.py | UTF-8 | 17,603 | 3 | 3 | [] | no_license | # -*- coding:utf-8 -*-
from urllib import request
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import os
import gzip
import json
# 制作字符替换字典
def make_dict(s_in, s_out):
d = dict()
if len(s_in) <= len(s_out):
l = len(s_in)
for i in range(l):
d.update(str.maketrans(s_in[i], s_out[i]))
else:
l = len(s_out)
for i in range(l):
if i < l:
d.update(str.maketrans(s_in[i], s_out[i]))
else:
d.update(str.maketrans(s_in[i], ''))
return d
# 替换字符串,路径或文件名
def replace_text(text):
t = make_dict('1234567890,.!?/\\*?!\n', '1234567890,。!?___?! ')
text = text.translate(t)
text = text.strip()
text = text.lstrip()
return text
# 替换标题不用做路径和文件名
def replace_title(text):
t = make_dict('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890,.!?!\n',
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890,。!?! ')
text = text.translate(t)
text = text.strip()
text = text.lstrip()
return text
# 替换字符串
def replace_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '?')
path = path.replace('!', '!')
path = path.replace('\n', '')
path = path.replace('', ' ')
path = path.replace('|', '_')
path = path.replace(':', ':')
path = path.strip()
path = path.lstrip()
return path
# 从免费书列表中获取限免书籍信息 return{'name':书名,'url':'https://book.qidian.com/info/0000000#Catalog",'id':书ID}
def get_limit_list():
fp = request.urlopen("https://f.qidian.com/")
html = fp.read()
metaSoup = BeautifulSoup(html, "html.parser")
# print(metaSoup)
limit_list = metaSoup.find('div', attrs={'id': 'limit-list'})
# print(limit_list)
book_info_list = limit_list.findAll('div', attrs={'class': 'book-mid-info'})
book = []
for i in book_info_list:
id_link = i.h4.a['href']
id = i.h4.a['data-bid']
# print(id_link.split('/')[-1])
data = {'name': i.h4.get_text(), 'url': 'https://book.qidian.com/info/' + id + "#Catalog", 'id': id}
book.append(data)
# print(book)
return book
def get_limit_list_from_qidian():
fp = request.urlopen("https://www.qidian.com/free")
html = fp.read()
metaSoup = BeautifulSoup(html, "html.parser")
# print(metaSoup)
ulll = metaSoup.find('div', attrs={'class': 'book-img-text'})
limit_list = ulll.find_all('h4')
book = []
for i in limit_list:
id = i.a['data-bid']
n = i.a.text
data = {'name': n, 'url': 'https://book.qidian.com/info/' + id + "#Catalog", 'id': id}
book.append(data)
# print(book)
return book
# 从书页源码中获取书名,作者,总章节数量,return 书名,作者,章节数量
def get_book_info(text):
if text:
# 打开网页转为系统编码
data = text
name = actor = count = ''
try:
# 转换为系统编码后给bs4解析
metaSoup = BeautifulSoup(data, "html.parser")
# 查找 #查找书籍信息
book_info = metaSoup.find('div', attrs={'class': 'book-info'})
if book_info == None:
err = metaSoup.find('div', attrs={'class': 'error-text fl'})
if err != None:
print(err.get_text())
return "", "", ""
# 书名 #获取书名字符串
name = book_info.h1.em.get_text()
# print(book_info.h1.em.get_text())
# 作者 #获取作者字符串
# print(book_info.h1.a.get_text())
actor = book_info.h1.a.get_text()
# 查找 #查找章节数量
catalogCount = metaSoup.find('li', attrs={'class': 'j_catalog_block'}).i
# 总章节 #获取总章节数量
# print(catalogCount.get_text())
count = catalogCount.get_text()
count = count[1:-2]
# info_text = u'书名:%s,作者:%s,总章节:%s' % name,actor,count
# info_text = name + actor + count
# info_text = '书名:{0},作者:{1},总章节:{2}'.format(name.encode('utf-8'),actor.encode('utf-8'),count.encode('utf-8'))
info_text = '书名:{0},作者:{1},总章节:{2}'.format(name, actor, count)
# print(info_text)
##获取书名标签Tag
# print(book_info.h1.em.prettify('utf-8'))
# 保存书名时需要转换为系统编码
# saveText(book_info.h1.em.prettify(sys_code))
# saveText(info_text,"info.txt")
# return name.encode('utf-8'),actor.encode('utf-8'),count.encode('utf-8')
except:
print("error")
return name, actor, count
# 输入id获取信息,return{'name':书名,'url':'https://book.qidian.com/info/0000000#Catalog"}
def get_book_by_id(id):
url = 'https://book.qidian.com/info/%s' % id
html = get_html(url)
if not html == '404':
name, actor, count = get_book_info(html)
else:
name = 'None'
book = [{'name': name, 'url': url + "#Catalog"}]
return book
# 打开链接获取页面源码,return utf-8编码的网页源码
def get_html(url, count=0):
try:
req = request.Request(url)
req.add_header('Accept-encoding', 'gzip,deflate,sdch')
# req.add_header('User-Agent', 'Mozilla QDReaderAndroid/6.2.0/232/qidian/000000000000000')
req.add_header('User-Agent',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3033.0 Safari/537.36')
# 返回页面内容
doc = request.urlopen(req).read() # python3.x read the html as html code bytearray
# 解码
try:
html = gzip.decompress(doc).decode("utf-8")
# print('返回gzip格式的文件')
except:
html = doc.decode("utf-8")
# print('返回正常格式的文件')
except Exception as e:
print('页面打开失败:[%s] error:%s' % (url, e))
if (count > 5):
return '404'
return get_html(url, count + 1)
return html
# 用浏览器打开网页获得源码
def get_html_by_browser(url):
browser = webdriver.Chrome()
browser.get(url)
time.sleep(5)
# browser.implicitly_wait(10)
html_source = browser.page_source
browser.quit()
return html_source
# 从章节目录中提取章节名和章节链接 return [{'name':章节名,'url':章节连接},],总章节数量
def get_volume_list(url='', count=0):
try:
html = ''
if count == 0:
html = get_html(url)
elif count == 1:
html = get_html_by_browser(url)
metaSoup = BeautifulSoup(html, "html.parser")
# 查找章节数量
catalogCount = metaSoup.find('li', attrs={'class': 'j_catalog_block'}).i
v_count = catalogCount.get_text()
v_count = v_count[1:-2]
volume_wrap = metaSoup.findAll('div', attrs={'class': 'volume-wrap'})
v_list = []
v_v = 0
v_volume = {}
for li in volume_wrap:
volume_list = li.findAll('li')
# print(volume_list)
l_tmp = []
for i in volume_list:
# print("章节名:%s , 链接:%s" % (i.get_text(),i.a['href']))
d = {'name': i.get_text(), 'url': 'http:' + i.a['href']}
v_list.append(d)
l_tmp.append(d)
v_volume[v_v] = l_tmp
v_v += 1
if len(v_list) == 0 or len(v_list) < int(v_count) and count == 0:
# print("count="+str(count))
return get_volume_list(url, count + 1)
return v_list, v_volume
except:
print('error url = %s' % url)
if count == 0:
return get_volume_list(url, count + 1)
else:
print("write to file!")
with open(metaSoup.title.get_text() + '.log', 'wb') as f:
if f:
f.write(metaSoup.prettify('utf-8'))
f.close()
return []
# 获取章节内容,return 章节名,txt文本,html文本
def get_volume(url):
ht = get_html(url)
src_text = """
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%s</title>
</head>
<body>
<h1>%s</h1>
<div>
%s
</div>
<div><br/></div>
</body>
</html>
"""
try:
metaSoup = BeautifulSoup(ht, "html.parser") # BeautifulSoup(ht, "html.parser")
book_info = metaSoup.find('h3', attrs={'class': 'j_chapterName'})
book_data = metaSoup.find('div', attrs={'class': 'read-content j_readContent'})
# print(book_info)
# print(book_data)
text = ''
html = ''
tital = ''
volume_name = book_info.get_text()
volume_data = book_data.get_text()
text += volume_name
text += (volume_data.replace(' ', '\n '))
text = replace_title(text)
v_n = replace_title(volume_name)
htm = book_data.prettify()
htm = htm.replace('<p>\n', '<p> ')
html = src_text % (v_n, v_n, htm)
tital = replace_file_path(v_n)
except:
print("except error")
finally:
# return book_info.get_text().encode('utf-8')
return tital, text, html
def path_win(path):
path = path.replace('/', '\\')
if path[:-1] == '\\':
path = path[0:-1]
return path
def path_linux(path):
path = path.replace('\\', '/')
if path[:-1] == '/':
path = path[0:-1]
return path
def path_format(path):
if os.name == 'nt':
path = path_win(path)
elif os.name == 'Android' or os.name == 'posix':
path = path_linux(path)
return path
def getPath():
path = './'
if os.name == 'nt':
path = os.getcwd()
elif os.name == 'Android' or os.name == 'posix':
path = os.path.dirname(__file__)
if path == './':
path = '/storage/emulated/0/qpython/scripts3/projects3/qidian'
return path
def save_file(path, data):
try:
path = path_format(path)
# data = data.replace('\ue844',' ')
with open(path, 'w', encoding='utf-8') as f:
f.write(str(data))
f.close()
return True
except Exception as e:
print('error:file(%s):%s' % (path, e))
return False
pass
def open_file(path):
try:
path = path_format(path)
with open(path, 'r', encoding='utf-8') as f:
data = f.read()
f.close()
return data;
except Exception as e:
print('error:file(%s):%s' % (path, e))
return ''
pass
def save_gzip(path, data):
try:
path = path_format(path)
content = str(data).encode('utf-8')
with gzip.open(path, 'wb') as f:
f.write(content)
f.close()
return True
except Exception as e:
print('save_gzip error:file(%s):%s' % (path, e))
return False
pass
def open_gzip(path):
try:
with gzip.open(path, 'rb') as f:
data = f.read().decode('utf-8')
f.close
return data
except Exception as e:
print('open_gzip error file:(%s);%s' % (path, e))
return ''
# 获取书籍信息和目录的JSON
def getBookInfoData(bookID):
url = 'http://4g.if.qidian.com/Atom.axd/Api/Book/GetChapterList?BookId=%s' % bookID
req = request.Request(url)
req.add_header('Accept-encoding', 'gzip')
req.add_header('User-Agent', 'Mozilla/mobile QDReaderAndroid/6.6.6/269/qidian/000000000000000')
# req.add_header('User-Agent', 'Mozilla QDReaderAndroid/6.2.0/232/qidian/000000000000000')
# req.add_header('User-Agent','Mozilla/mobile QDReaderAndroid/6.6.0/264/1000023/000000000000000')
response = request.urlopen(req)
# print(response.read())
data = response.read()
# json_str = json.dumps(t)
# print(response.info())
html = gzip.decompress(data).decode("utf-8")
# print(html)
json_data = json.loads(html)
return json_data
# 整理过的的json,原始json,是否限免
# 获取章节详细信息 return [{'v_vip': 0, 'v_cid': 0000000, 'v_name': '章节名', 'v_url': 'https://vipreader.qidian.com/chapter/书ID_id/章节ID_cid'}, ]
def getBookVolumeInfoJson(bookID):
book_id = bookID
book_info_json = getBookInfoData(book_id)
if book_info_json['Message'] == '成功':
Data = book_info_json['Data']
Volumes = Data['Volumes']
Chapters = Data['Chapters']
is_free_limit = Data['IsFreeLimit']
book_info_data = []
count = 0
for c in Chapters:
volume_name = c['n']
volume_cid = c['c']
volume_vip = c['v']
volume_url = 'https://vipreader.qidian.com/chapter/%s/%s' % (book_id, volume_cid)
if volume_cid > 0:
book_info_data.append(
{'v_name': volume_name, 'v_cid': volume_cid, 'v_vip': volume_vip, 'v_url': volume_url,
'count': count})
count += 1
# print('章节名:%s,章节ID:%s,vip:%s' % (volume_name,volume_cid,volume_vip))
# print(book_info_data)
return book_info_data, book_info_json, is_free_limit
else:
print('ID=%s的书籍不存在!' % bookID)
return [], book_info_json, ''
# 合并文本
def join_text(name, file_list):
try:
with open(name, 'w', encoding='utf-8') as f:
for i in file_list:
t = path_format(str(i))
if os.path.exists(t):
with open(t, 'r', encoding='utf-8') as a:
f.write(a.read())
f.write('\n')
f.write('\n')
a.close()
elif os.path.exists(t + '.gz'):
with gzip.open(t + '.gz', 'rb') as a:
data = a.read().decode('utf-8')
f.write(data)
f.write('\n')
f.write('\n')
a.close
f.close()
except Exception as e:
print('join_text_error : %s : %s' % (f, e))
pass
def join_text_gz(name, file_list):
try:
with gzip.open(name, 'w') as f:
for i in file_list:
t = path_format(str(i))
if os.path.exists(t):
with open(t, 'r', encoding='utf-8') as a:
txt = a.read() + '\n\n'
f.write(txt.encode('utf-8'))
a.close()
elif os.path.exists(t + '.gz'):
with gzip.open(t + '.gz', 'rb') as a:
txt = a.read().decode('utf-8') + '\n\n'
f.write(txt.encode('utf-8'))
a.close
f.close()
except Exception as e:
print('join_text_error : %s : %s' % (f, e))
pass
# 获取客户端形式的的JSON结果,适用于免费章节
def getTextData(bookID, ChepterID):
url = 'http://4g.if.qidian.com/Atom.axd/Api/Book/GetContent?BookId=%s&ChapterId=%s' % (bookID, ChepterID)
req = request.Request(url)
req.add_header('Accept-encoding', 'gzip')
req.add_header('User-Agent', 'Mozilla QDReaderAndroid/6.2.0/232/qidian/000000000000000')
res = request.urlopen(req)
data = res.read()
html = gzip.decompress(data).decode("utf-8")
# print(html)
result = json.loads(html)
if (result['Message']) == '失败':
print("error:%s" % url)
return ''
return result
if __name__ == "__main__":
pass
# # 时间戳转换
# ts = 1529035341000
# if len(str(ts)) == 13:
# ts /= 1000
# timeArray = time.localtime(ts)
# print(timeArray)
# dt = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
# print(dt)
# print(get_limit_list_from_qidian())
# #测试,根据id获取书籍名称和目录章节
# print(get_book_by_id(1005188549))
# #测试,从限免章节页面获取书籍名称和目录章节
# print(get_limit_list())
# print(getBookInfoData(1005188549))
# #测试,用浏览器打开目录,获取书名作者总章节
# print(get_book_info(get_html_by_browser('http://book.qidian.com/info/3600493#Catalog')))
# 获取书籍的章节和连接
# print(get_volume_list('http://book.qidian.com/info/3600493#Catalog'))
# tital, text, html= get_volume('http://read.qidian.com/chapter/mXVR4wuK70o1/EMQ5k8jKRMwex0RJOkJclQ2')
# print(text)
# save_file('t.txt',text)
# save_file('t.txt.xhtml', html)
# abcdefghijklmnopqrstuvwxyz
# ABCDEFGHIJKLMNOPQRSTUVWXYZ
#
# abcdefghijklmnopqrstuvwxyz
# ABCDEFGHIJKLMNOPQRSTUVWXYZ
| true |
8fa87d082963712c6e9803a442dadbb5c6a50d32 | Python | mattbellis/stanford-foothill-research-project-2011-dark-matter-and-gpus | /Foothill_research_project_2011/code_for_CPU_conference/plotting_scripts/calc_bin_edges_for_log_binning.py | UTF-8 | 274 | 2.859375 | 3 | [] | no_license | import numpy as np
output = "{0.0000,"
npts = 1
for i in range(-3,2):
x = np.logspace(i, i+1, 6)
npts += len(x)-1
for i,n in enumerate(x):
if i<len(x)-1:
output += "%f," % (n)
output += "%f}" % (100.00)
npts += 1
print npts
print output
| true |
80bb51357faaad7444b8a4cc9cd35aa25fc59aef | Python | jsdiesel/comp-1531-temp | /integer.py | UTF-8 | 171 | 3.53125 | 4 | [] | no_license | integers = [1, 2, 3, 4, 5]
integers.append(6)
counter = 0
for i in integers:
counter = counter + i
print (counter)
print(sum(integers))
| true |
468dd68b875c2a774810880814984dc30181ac55 | Python | WmHHooper/aima-python | /submissions/Everett/mySearches.py | UTF-8 | 7,995 | 3.09375 | 3 | [
"MIT"
] | permissive |
import search
import numpy as np
from math import(cos, pi)
# A sample map problem
from utils import is_in
madison_map = search.UndirectedGraph(dict(
# Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
# Cottontown=dict(Portland=18),
#Fairfield=dict(Mitchellville=21, Portland=17),
#Mitchellville=dict(Portland=7, Fairfield=21),
Jackson=dict(Humboldt=27),
Humboldt=dict(Jackson=27, ThreeWay=8),
ThreeWay=dict(Humboldt=8, Medon=34),
Medon=dict(Jackson=17, Humboldt=43,ThreeWay=34),
SpringCreek=dict(ThreeWay=18, Medon=34, Humboldt=29)
))
# Coordinates for map. May not be entirely accurate but as close as possible
madison_map.locations = (dict(
Jackson=(485, 512),
Humboldt=(482, 482),
ThreeWay=(474, 474),
Medon=(495, 501),
SpringCreek=(474, 464)))
madison_puzzle = search.GraphProblem('Jackson', 'ThreeWay', madison_map)
madison_puzzle1 = search.GraphProblem('SpringCreek', 'Jackson', madison_map)
madison_puzzle.label = 'Madison'
madison_puzzle.description = '''
An abbreviated map of Madison County, TN.
This map is unique, to the best of my knowledge.
'''
madison_puzzle1.label = 'Madison1'
madison_puzzle1.description = '''
An abbreviated map of Madison County, TN.
This map is unique, to the best of my knowledge.
'''
romania_map = search.UndirectedGraph(dict(
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
romania_puzzle = search.GraphProblem('A', 'B', romania_map)
romania_puzzle.label = 'Romania'
romania_puzzle.description = '''
The simplified map of Romania, per
Russall & Norvig, 3rd Ed., p. 68.
'''
# 0s Represent Walls
# 1s Represent Path
# 9 Represents Start
# 8 Represents Exit
Labyrinth2 = np.array([[9, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 1],
[8, 1, 1, 1, 1, 1]])
# Above is the visual representation of the below labyrinth. It has multiple paths to traverse but only
# one entrance and one exit. The costs right now may seem a bit random due to time constraint
# but, if I return back to this project for SURS, I'll try to make it more reasonable.
# Also, instead of using a 2D array to create a 2D maze. I decided to use a dictionary of dictionaries to make
# maze/labyrinth of points that connect to each other. So really its more of a path maze rather than a traditional maze.
Labyrinth_path = (dict(
Start=dict(B=2),
B=dict(C=2, Start=2),
C=dict(D=5, Q=20, B=2),
D=dict(E=8, C=5),
E=dict(F=14, D=8),
F=dict(G=13, E=14),
G=dict(H=24, F=13),
H=dict(I=34, G=24),
I=dict(J=78, H=34),
J=dict(AW=54, I=78),
AW=dict(K=56, AC=21, J=54),
K=dict(L=87, AW=56),
L=dict(M=6, K=87),
M=dict(N=43, L=6),
N=dict(O=64, M=43),
O=dict(W=80, P=12, N=64),
P=dict(Q=12, O=20),
Q=dict(C=20, U=20, R=45, P=12),
R=dict(T=62, Q=45),
T=dict(AJ=32, R=62),
U=dict(V=96, Q=20),
V=dict(W=52, AF=20, U=96),
AF=dict(AE=51, V=20),
AE=dict(AD=46, AF=51),
AD=dict(AG=12, AE=46),
AG=dict(AH=52, AM=46, AD=12),
AH=dict(AI=21, AG=52),
AI=dict(AJ=21, AH=21),
AJ=dict(T=32, AI=21),
AM=dict(Finish=65, AG=46),
W=dict(V=52, X=23, O=80),
X=dict(Y=56, W=23),
Y=dict(Z=12, X=56),
Z=dict(AB=21, Y=12),
AB=dict(AC=12, Z=21),
AC=dict(AB=12, AW=21),
Finish=dict(AM=65),
))
Maze = np.array([[0,0,9,0,0],
[1,1,1,1,1],
[1,0,1,0,1],
[1,0,0,0,1],
[1,1,8,0,0]])
# Above is a visual representation of the below maze. Unlike the labyrinth, it has only one solution
# to get from the start to finish.
maze_path = (dict(
Start=dict(A=0),
A=dict(Start=0, C=0, B=0, F=0),
B=dict(A=0, G=0),
C=dict(A=0, D=0),
D=dict(M=0, C=0),
M=dict(L=0, D=0),
L=dict(K=0, M=0),
K=dict(L=0, Finish=0),
Finish=dict(K=0),
G=dict(B=0, H=0),
H=dict(G=0, J=0),
J=dict(I=0, H=0),
I=dict(J=0),
F=dict(A=0, O=0),
O=dict(F=0, N=0),
N=dict(O=0, Q=0),
Q=dict(N=0,P=0),
P=dict(Q=0),
))
# A trivial Problem definition
class LightSwitch(search.Problem):
def actions(self, state):
return ['up', 'down']
def result(self, state, action):
if action == 'up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
# This problem definition solves any size maze and labyrinth if given enough memory space.
# However, labyrinths may take longer to solve and give more interesting outputs due to the amount of paths.
class Maze2(search.Problem):
def __init__(self, initial, goal, maze):
self.maze = maze
self.initial = initial
self.goal = goal
def actions(self, state):
bob = self.maze[state]
keys = bob.keys()
return keys
def result(self, state, action):
return action
def goal_test(self, state):
return state == self.goal
def path_cost(self, c, state1, action, state2):
bob = self.maze[state1]
cost = bob[state2]
return c + cost
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
# Problem defintion for the Map coordinates. I could have combined it with the one above, but there were plenty of errors
# because of the added location attribute.
from grid import distance
class Map4(search.Problem):
def __init__(self, initial, goal, map2, location):
self.map2 = map2
self.location = location
self.initial = initial
self.goal = goal
def actions(self, state):
bob = self.map2[state]
keys = bob.keys()
return keys
def result(self, state, action):
return action
def goal_test(self, state):
return state == self.goal
def path_cost(self, c, state1, action, state2):
bob = self.map2[state1]
cost = bob[state2]
return c + cost
def h(self, node):
state = node.state
coor1 = self.location[state]
coor2 = self.location[self.goal]
return distance(coor1,coor2)
# def h(self, node):
# state = node.action
# state1 = self.initial
#state2 = self.map
maze_puzzle2 = Maze2('Start', 'Finish', maze_path)
maze_puzzle2.label = 'Maze'
Labyrinth_puzzle = Maze2('Start','Finish', Labyrinth_path)
Labyrinth_puzzle.label = 'Labyrinth'
#swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
# Puzzle using coordinates
madison_puzzle4 = Map4('SpringCreek','Jackson', madison_map.dict, madison_map.locations)
madison_puzzle4.label = 'Madison1 w/ Coordinates'
madison_puzzle4.description = 'Coordinates'
mySearches = [
madison_puzzle,
# romania_puzzle,
# switch_puzzle,
madison_puzzle1,
madison_puzzle4,
maze_puzzle2,
Labyrinth_puzzle
]
#def The_Shining(problem):
# node = search.Node(problem.initial)
# count = 0
# while not problem.goal_test(node.state):
# for child in node.expand(problem):
# count += 1
# bob8=child
# currentnode = child.expand(problem)
# if count == 50:
# return currentnode.state
#if problem.goal_test(child.state):
# return child
mySearchMethods = [
#The_Shining(maze_puzzle2)
] | true |
2dce03ac1b8aa8accf5831ed6fca7012dca3f34d | Python | suyanzhou626/UNet-Zoo | /torchlayers.py | UTF-8 | 3,105 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | """Custom layers with activation and norm for code readability"""
import torch
import torch.nn as nn
import revtorch as rv
class Conv2D(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, stride=1, padding=1, activation=torch.nn.ReLU, norm=torch.nn.BatchNorm2d,
norm_before_activation=True):
super(Conv2D, self).__init__()
if kernel_size == 3:
padding = 1
else:
padding = 0
layers = []
layers.append(nn.Conv2d(input_dim, output_dim, kernel_size=kernel_size, stride=stride, padding=padding))
if norm_before_activation:
layers.append(norm(num_features=output_dim, eps=1e-3, momentum=0.01))
layers.append(activation())
else:
layers.append(activation())
layers.append(norm(num_features=output_dim, eps=1e-3, momentum=0.01))
self.convolution = nn.Sequential(*layers)
def forward(self, x):
return self.convolution(x)
class Conv2DSequence(nn.Module):
"""Block with 2D convolutions after each other with ReLU activation"""
def __init__(self, input_dim, output_dim, kernel=3, depth=2, activation=torch.nn.ReLU, norm=torch.nn.BatchNorm2d, norm_before_activation=True):
super(Conv2DSequence, self).__init__()
assert depth >= 1
if kernel == 3:
padding = 1
else:
padding = 0
layers = []
layers.append(Conv2D(input_dim, output_dim, kernel_size=kernel, padding=padding, activation=activation, norm=norm))
for i in range(depth-1):
layers.append(Conv2D(output_dim, output_dim, kernel_size=kernel, padding=padding, activation=activation, norm=norm))
self.convolution = nn.Sequential(*layers)
def forward(self, x):
return self.convolution(x)
class ReversibleSequence(nn.Module):
"""This class implements a a reversible sequence made out of n convolutions with ReLU activation and BN
There is an initial 1x1 convolution to get to the desired number of channels.
"""
def __init__(self, input_dim, output_dim, reversible_depth=3, kernel=3):
super(ReversibleSequence, self).__init__()
if input_dim != output_dim:
self.inital_conv = Conv2D(input_dim, output_dim, kernel_size=1)
else:
self.inital_conv = nn.Identity()
blocks = []
for i in range(reversible_depth):
#f and g must both be a nn.Module whos output has the same shape as its input
f_func = nn.Sequential(Conv2D(output_dim//2, output_dim//2, kernel_size=kernel, padding=1))
g_func = nn.Sequential(Conv2D(output_dim//2, output_dim//2, kernel_size=kernel, padding=1))
#we construct a reversible block with our F and G functions
blocks.append(rv.ReversibleBlock(f_func, g_func))
#pack all reversible blocks into a reversible sequence
self.sequence = rv.ReversibleSequence(nn.ModuleList(blocks))
def forward(self, x):
x = self.inital_conv(x)
return self.sequence(x)
| true |
2f23f6f4cebc3d00006ff743072195229120376a | Python | JeroenMerks/BAPGC | /pdf_generator.py | UTF-8 | 14,505 | 3.515625 | 4 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Melissa van Wieringen
# s1079422
# Python PDF generator
# Last changes: 2 februari 2016
# Imports
import pickle
import re
from reportlab.lib.pagesizes import letter
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import mm
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak, \
Image
# Functie die een Pickle Object uitpakt en returnt.
def unpack_pickle(pickle_object_location):
pickle_file_object = open(pickle_object_location, "rb")
results_list = pickle.load(pickle_file_object)
return results_list
# Class generator om pdf bestanden mee te creeeren.
class generator:
# Init.
def __init__(self):
# Maak een nieuw document
self.doc = SimpleDocTemplate("BAPGC.pdf", pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
# Maak een nieuwe story list.
self.story = []
# Maak een nieuw StyleSheet om styles aan toe te voegen
self.styles = getSampleStyleSheet()
self.styles.add(ParagraphStyle(name='Header1', fontSize=18, leading=24,
spaceAfter=10))
self.styles.add(ParagraphStyle(name='Header2', fontSize=16, leading=24,
spaceAfter=10))
self.styles.add(ParagraphStyle(name='Header3', fontSize=14, leading=24,
spaceAfter=10))
# add_pagenumber. Ontvangt het canvas en het document en zet een pagina
# nummer onderin de pagina
def add_pagenumber(self, canvas, doc):
# Krijgt het paginanummer terug
pagina_nummer = canvas.getPageNumber()
text = "Pagina %s" % pagina_nummer
# Tekent het nummer rechtsonderin de pagina
canvas.drawRightString(200 * mm, 20 * mm, text)
# generate_page. Creeert een pagina. Ontvangt een chapter list.
# Zoekt naar de volgende keywords:
# __SPACER_y_xxx waar y de width is en xxx de height is.
# (y is altijd 1 getal!)
# __PICTURE_x_y__zz waar x de heigth is, y de width
# en zz de url is (in de huidige dir!)
# Opmerking: plaats altijd twee underscores (_) tussen
# de width en de url!!
# __HEADER_y_xx waar y 1, 2 of 3 is
# 1 = grote header; 2 = middel header; 3 = kleine header)
# en xx de string is
def generate_page(self, chapter):
# Controleer of chapter wel inhoud heeft en of het wel een lijst is
if len(chapter) <= 0 or type(chapter) != list:
print "FOUTMELDING: Je geeft een lege pagina aan generate_page."
return False
# Append de inhoud lijst aan de story, maar sla de eerste index
# (de parameters) en de tweede index (titel) over.
for paragraph in chapter:
if "__SPACER" in paragraph:
# Geef een spacer mee
self.story.append(
Spacer(float(paragraph[9]), int(paragraph[11:])))
# Zorg dat deze parameter niet geprint wordt
continue
elif "__HEADER" in paragraph:
if int(paragraph[9]) in [1, 2, 3]:
temp_var = "Header" + paragraph[9]
# Geef deze regel een groot letterype
self.story.append(
Paragraph(paragraph[11:], self.styles[temp_var]))
else:
print "FOUTMELDING: Je hebt geen 1, 2 of 3 op positie 9 " \
"van " + paragraph + " staan!"
return False
# Zorg dat deze parameter niet geprint wordt
continue
elif "__PICTURE" in paragraph:
# Alles behalve __PICTURE_
x = paragraph[10:]
# Alles gesplit op de _
splitted = x.split("_")
width = float(splitted[0])
height = float(splitted[1])
# Het splitten op __ om de naam van het plaatje te achterhalen
name = re.compile("__").split(paragraph)
name = name[-1]
# Maakt een nieuw image object aan met de meegegeven url
# en width en height
im = Image(name, width * mm, height * mm)
self.story.append(im)
# Zorg dat deze parameter niet geprint wordt
continue
self.story.append(Paragraph(paragraph, self.styles["Normal"]))
# Na elke pagina (chapter) komt een pageBreak om een nieuwe pagina
# te beginnen
self.story.append(PageBreak())
# generate_pdf. Creeert de pdf met de story.
def generate_pdf(self):
# Bouw de pdf met de story.
self.doc.build(self.story, onLaterPages=self.add_pagenumber)
# main. Bouwt de pdf met behulp van de generator class.
def main(pickle_file_location):
results = unpack_pickle(pickle_file_location)
# results = [
# 'Melissa van Wieringen, Jeroen Merks, Koen van Diemen, Rick de Graaf en Christian van der Niet',
# 's1072614', 'hsa04915', 'data/results/met_pathway_picture/hsa04915.png',
# 'MA0481.1.pfm',
# ['LOC105376066', 'DAPK3', 'SMARCA4', 'VMAC', 'DNASE2', 'MINK1',
# 'C17orf53', 'ARMC6', 'LOC101928572', 'COL26A1', 'DAND5', 'ZSWIM4',
# 'FTL', 'PRR12', 'MIR3188', 'FAHD1', 'KIF1C', 'ARRDC1', 'RNF207',
# 'PUS1', 'VPS37D', 'VARS', 'SCAMP4', 'KRI1', 'GIPR', 'TALDO1', 'CARNS1',
# 'RPL23AP5', 'LOC105376712', 'DCAF15', 'SLC12A4', 'CCDC151', 'PDLIM7',
# 'ADAT3', 'RASA4CP', 'MIR4516', 'CENPM', 'LOC100419925', 'ZNF414',
# 'BTBD2', 'TJP1P', 'BCKDK', 'RPS15P9', 'LOC101928543', 'PKN1', 'ZNF487',
# 'LTC4S', 'ATAD3B', 'MAP2K2', 'SELV', 'ZNF101', 'LOC100507373', 'CDC34',
# 'RPL32P34', 'DGCR8', 'MIR5090', 'MIR1281', 'LOC100129352', 'KLF1',
# 'CORO1A', 'FBN3', 'EP300', 'MIR1199', 'MIR6798', 'C19orf43', 'PPP1R26',
# 'LOC100419924', 'PLP2', 'SPG7', 'APOBEC3D', 'SPIRE2', 'APBA3',
# 'CATSPERD', 'LOC105370690', 'TSSK6', 'C16orf90', 'LOC105372266',
# 'APC2', 'NDUFA13', 'ZDHHC12', 'ATP1A3', 'GALR3', 'MAPK8IP2', 'CARM1',
# 'GMIP'], 'SMARCA4', 0.0,
# 'data/results/proteins/protein_charge_distribution.png',
# 'data/results/phylo_tree/phylogenetic_tree_paralogs.png',
# 'data/results/intron_exon_scatter_plot/intron_exon_scatter_plot.png']
########################################
# HIER KOMEN DE PAGINA'S VOOR IN DE PDF#
########################################
front_page = [
"__HEADER_1_Onderzoek coregulatie genen buiten metabolische route:",
"__HEADER_2_" + results[2], "__PICTURE_150_150__" + results[3],
"KEGG representatie van de metabole route " + results[2],
"__SPACER_1_100", "Auteurs: " + results[0],
"Studentnummer: " + results[1], "Datum: Februari 2016"]
second_page = ["__HEADER_1_Inleiding",
"Dit is een automatisch gegenereerd rapport van een "
"analyse op coregulatie van genen buiten de volgende "
"metabolische route: " + results[2] + ".",
"De conclusie en discussie van de resultaten zijn mogelijk "
"later met de hand toegevoegd.", "__SPACER_1_10",
"Het doel van de pipeline is het vinden van genen buiten de"
" pathway die door eenzelfde transcriptiefactor worden "
"gereguleerd.",
"Aan de hand van 5 onderzoeksvragen worden deze "
"medegereguleerde genen verder onderzocht.",
"Achtergrondinformatie over de gekozen metabolische route", ]
third_page = ["__HEADER_1_Materialen en methoden",
"114 Motifs van transcriptiefactoren gerelateerd aan de "
"mens, zoals bekend bij de laatste JASPAR CORE datebase ("
"minus motifs MA0528.1 en MA0050.2 vanwege overrepresentatie "
"in eerder gedane tests), ",
"zijn met een p-value van 0.0001 gescant over álle "
"promotorregionen (6000 basenparen vóór de start van het "
"gen en 500 ná) van alle genen die volgens KEGG gerelateert "
"zijn aan de opgegeven metabole route.",
"Er is ervoor gekozen om de beste transcriptiefactor te "
"selecteren om over de promotorregionen van de rest van het "
"genoom te scannen. Dit om het onderzoek duidelijk af te "
"kaderen en er met minder ruis in de resultaten betere "
"conclusies te trekken zijn op mogelijke corelaties tussen "
"de pathway en gevonden genen waar motifs van de "
"transcriptiefactoren sterk op hitten.",
"Het criterium voor het bepalen van de beste coregulerende "
"transcriptiefactor is een zo groot mogelijke overlap van "
"hits over alle promotorregionen van de pathway.",
"Indien meerdere transcriptiefactoren een coverage van 100%"
" bleken te hebben, werd de transcriptiefactor met "
"cumulatief het hoogste aantal hits genomen.",
"__SPACER_1_10",
"De resultaten van de scan op het hele genoom zijn "
"vervolgens stringent geselecteerd, zodat een handje vol "
"genen over zouden blijven. Hier is voor gekozen zodat "
"vervolgonderzoek kan worden gedaan op basis van resultaten "
"die sterk uit de analyse naar voren zijn gekomen en het "
"waarschijnlijk waard zijn om met de hand verder te "
"onderzoeken.", "__SPACER_1_10",
"Bij een p-value van 0.0001 en een minimum aantal hits "
"van 100 van de transcriptiefactor in kwestie, kwam dit "
"neer op " + str(len(results[5])) + " promotors."]
fourth_page = ["__HEADER_1_Resultaten",
"De volgende motif van transcriptiefactor " + results[
4] + " had de hoogste coverage over de "
"promotorregionen van de pathway.",
"De gensymbolen van de volgende genen hadden minimaal 70 "
"hits van deze transcriptiefactor op de promotorregio: " + str(
results[5]), "__SPACER_1_10",
"Van de 50 beste medegereguleerde genen is een local "
"multiple sequence alignment gemaakt met behulp van "
"ClustalW 2.1, gebruikmakende van de standaard parameters.",
"Alle genen korter dan gen: " + results[
6] + " vormden een geconserveerde regio van" + str(
results[7]) + "% ten opzichte van dat gen.",
"__SPACER_1_10",
"De 10 beste medegereguleerde genen zijn getransleert naar "
"hun respectievelijke eiwit met de SeqIO.translate("
"cds=True) functie van BioPython 1.66.",
"Niet van elk gen dat in de pipeline was onderzocht was de"
" CDS (CoDing Sequence) bekend. Indien dit het geval was "
"dan werd het eerstvolgende "
"(minder hoor scorende) gen uit de gesorteerde lijst van "
"hoog scorende genen geselecteerd.", "__SPACER_1_10",
"Eiwitten bestaan uit aminozuren die elk of hydrofiel, "
"hydrofoob of neutraal zijn.",
"Van de 10 eiwitten is een staafdiagram, te zien in Fig. "
"1, gemaakt waar de verhoudingen van de hydrofobiciteit "
"van de aminozuren uit af te lezen is.",
"__PICTURE_120_80__" + results[8],
"Fig. 1, staafdiagram van de verhoudingen van de "
"hydrofobiciteit van de aminozuren van de 10 eiwitten.",
"__SPACER_1_10",
"Voor elk van de 10 beste medegereguleerde genen zijn de 4 "
"meest verwante paraloge genen bepaald.",
"Deze paraloge genen zijn vervolgens in een fylogenetische "
"boom, te zien in Fig. 2, uiteengezet.",
"__PICTURE_80_80__" + results[9],
"Fig. 2, de phylogenetische boom van de 4 meest verwante "
"paraloge genen per gen van de 10 beste genen, gemaakt met "
"ClustalW.", "__SPACER_1_10",
"Van de 20 beste medegereguleerde genen zijn de cumulatieve"
" intron- en exonlengte in een scatterplot, te zien in "
"Fig. 3, uiteen gezet.",
"Ook van deze genen was het niet altijd bekend waar de"
" exonen (en dus ook intronen) zich bevonden. Deze zijn "
"uit de scatterplot weggelaten.", "__SPACER_1_10",
"__PICTURE_150_120__" + results[10],
"Fig. 3, een scatterplot van de de cumulatieve intron- en "
"exonlengtes van de 20 beste genen."]
fifth_page = ["__HEADER_1_Conclusie & Discussie"]
sixth_page = ["__HEADER_1_Referenties"]
print "Generating PDF of results..."
# Maak een nieuwe generator.
pdf_maker = generator()
# Genereer al de pagina's, een voor een.
# Op het einde wordt de pdf gecreeerd met al de pagina's.
try:
pdf_maker.generate_page(front_page)
pdf_maker.generate_page(second_page)
pdf_maker.generate_page(third_page)
pdf_maker.generate_page(fourth_page)
pdf_maker.generate_page(fifth_page)
pdf_maker.generate_page(sixth_page)
pdf_maker.generate_pdf()
except IOError:
print "FOUTMELDING: Je hebt ergens geen juiste bestandsnaam meegegeven."
except AttributeError:
print "FOUTMELDING: Attribuut error."
except ValueError:
print "FOUTMELDING PDF generator is gestopt door een fout."
print "PDF generator is done working. Thank you for your cooperation and " \
"come again!"
# main()
| true |
fa1883065e01db3447a7e0ca9295b68e46d8f7c0 | Python | Abdullahr29/Y1-TA-Scripts | /sinplotter.py | UTF-8 | 561 | 3.78125 | 4 | [] | no_license | import math
x = 0
period = 30.0
pi = 3.1415927
for i in range(-10,11):
print("-", end = "")
print("\n", end = "")
def printLine(y):
"""Output a line, drawing the axes and the function value"""
funcPos = round(y*10)
for i in range(-10,11):
if i == funcPos:
print("*", end = "")
elif i == 0:
print("|", end = "")
else:
print(".", end = "")
while True:
y = math.sin((2*pi/period)*x)
x = x + 1
printLine(y)
print("\n", end = "")
| true |
c93e15b7e75e1a5788aa4de28289dc37f393d2c5 | Python | vikasptl07/DataBricks | /Notebooks/Learning-Spark/Python/Chapter11/11-4 Distributed IoT Model Training.py | UTF-8 | 4,459 | 3.171875 | 3 | [] | no_license | # Databricks notebook source
# MAGIC
# MAGIC %md
# MAGIC ## Distributed IoT Model Training with the Pandas Function API
# MAGIC
# MAGIC This notebook demonstrates how to scale single node machine learning solutions with the pandas function API.
# COMMAND ----------
# MAGIC %md
# MAGIC Create dummy data with:
# MAGIC - `device_id`: 10 different devices
# MAGIC - `record_id`: 10k unique records
# MAGIC - `feature_1`: a feature for model training
# MAGIC - `feature_2`: a feature for model training
# MAGIC - `feature_3`: a feature for model training
# MAGIC - `label`: the variable we're trying to predict
# COMMAND ----------
import pyspark.sql.functions as f
df = (spark.range(1000*1000)
.select(f.col("id").alias("record_id"), (f.col("id")%10).alias("device_id"))
.withColumn("feature_1", f.rand() * 1)
.withColumn("feature_2", f.rand() * 2)
.withColumn("feature_3", f.rand() * 3)
.withColumn("label", (f.col("feature_1") + f.col("feature_2") + f.col("feature_3")) + f.rand())
)
display(df)
# COMMAND ----------
# MAGIC %md
# MAGIC Define the return schema
# COMMAND ----------
import pyspark.sql.types as t
trainReturnSchema = t.StructType([
t.StructField('device_id', t.IntegerType()), # unique device ID
t.StructField('n_used', t.IntegerType()), # number of records used in training
t.StructField('model_path', t.StringType()), # path to the model for a given device
t.StructField('mse', t.FloatType()) # metric for model performance
])
# COMMAND ----------
# MAGIC %md
# MAGIC Define a function that takes all the data for a given device, train a model, saves it as a nested run, and returns a DataFrame with the above schema.
# MAGIC
# MAGIC We are using MLflow to track all of these models.
# COMMAND ----------
import mlflow
import mlflow.sklearn
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
def train_model(df_pandas: pd.DataFrame) -> pd.DataFrame:
'''
Trains an sklearn model on grouped instances
'''
# Pull metadata
device_id = df_pandas['device_id'].iloc[0]
n_used = df_pandas.shape[0]
run_id = df_pandas['run_id'].iloc[0] # Pulls run ID to do a nested run
# Train the model
X = df_pandas[['feature_1', 'feature_2', 'feature_3']]
y = df_pandas['label']
rf = RandomForestRegressor()
rf.fit(X, y)
# Evaluate the model
predictions = rf.predict(X)
mse = mean_squared_error(y, predictions) # Note we could add a train/test split
# Resume the top-level training
with mlflow.start_run(run_id=run_id):
# Create a nested run for the specific device
with mlflow.start_run(run_name=str(device_id), nested=True) as run:
mlflow.sklearn.log_model(rf, str(device_id))
mlflow.log_metric("mse", mse)
artifact_uri = f"runs:/{run.info.run_id}/{device_id}"
# Create a return pandas DataFrame that matches the schema above
returnDF = pd.DataFrame([[device_id, n_used, artifact_uri, mse]],
columns=["device_id", "n_used", "model_path", "mse"])
return returnDF
# COMMAND ----------
# MAGIC %md
# MAGIC Use applyInPandas to grouped data
# COMMAND ----------
with mlflow.start_run(run_name="Training session for all devices") as run:
run_id = run.info.run_uuid
modelDirectoriesDF = (df
.withColumn("run_id", f.lit(run_id)) # Add run_id
.groupby("device_id")
.applyInPandas(train_model, schema=trainReturnSchema)
)
combinedDF = (df
.join(modelDirectoriesDF, on="device_id", how="left")
)
display(combinedDF)
# COMMAND ----------
# MAGIC %md
# MAGIC Define a function to apply the model. *This needs only one read from DBFS per device.*
# COMMAND ----------
applyReturnSchema = t.StructType([
t.StructField('record_id', t.IntegerType()),
t.StructField('prediction', t.FloatType())
])
def apply_model(df_pandas: pd.DataFrame) -> pd.DataFrame:
'''
Applies model to data for a particular device, represented as a pandas DataFrame
'''
model_path = df_pandas['model_path'].iloc[0]
input_columns = ['feature_1', 'feature_2', 'feature_3']
X = df_pandas[input_columns]
model = mlflow.sklearn.load_model(model_path)
prediction = model.predict(X)
returnDF = pd.DataFrame({
"record_id": df_pandas['record_id'],
"prediction": prediction
})
return returnDF
predictionDF = combinedDF.groupby("device_id").applyInPandas(apply_model, schema=applyReturnSchema)
display(predictionDF)
| true |
4c27587f9c92b783953c56ce699546efee645371 | Python | Haouach11/Oops | /assignment23.py | UTF-8 | 343 | 3.234375 | 3 | [] | no_license | class unique_subsets:
def sub_sets(self, set):
return self.sub_sets_Recur([], sorted(set))
def sub_sets_Recur(self, current, set):
if set:
return self.sub_sets_Recur(current, set[1:]) + self.sub_sets_Recur(current + [set[0]], set[1:])
return [current]
print(unique_subsets().sub_sets([4, 5, 6]))
| true |
4eaf40a386f8702c4378735c96f5d40bcce36af4 | Python | prudhvireddym/CS5590-Python | /Source/Python/ICP 3/Web Scraping.py | UTF-8 | 464 | 3.390625 | 3 | [] | no_license | from bs4 import BeautifulSoup
import urllib.request
import os
#imported libraries
url="https://en.wikipedia.org/wiki/Deep_learning"
#parsing the source code
source_code = urllib.request.urlopen(url)
plain_text = source_code
soup = BeautifulSoup(plain_text, "html.parser")
#Printing the page title
print(soup.title.string)
#finding the links in the page with 'a' tag and return the attribute 'href'
for link in soup.find_all('a'):
print(link.get('href'))
| true |
f31213c38b23488dfc4e8f3ac5a59b8be34d0720 | Python | Mordeaux/CorpusGUI | /POS.py | UTF-8 | 1,223 | 2.671875 | 3 | [] | no_license | import random, re
from Corpus import *
#instead of p(label|word) (discriminative model) we will learn p(word, label) which is equal to p(label)p(word|label) (Generative Model)
class GenerativeModel:
def __init__(self, corpus):
self.corpus = corpus
self.getSents()
def getSents(self):
lines = ''
training = ''
testing = ''
for work in self.corpus.worksSet:
for line in work.anno['pos'][0]:
for word in line:
lines += ' '+word[0] + '/' + word[1] + ' '
lines = ' '.join(lines.split())
lines = re.sub(r'(\./.? ?)+', r'./.\n', lines, re.U)
for line in lines.split('\n'):
#line = ' '.join(line.split())
rand = random.randrange(0, 10)
if rand == 9:
testing += line + '\n'
else:
training += line + '\n'
self.training = training
self.testing = testing
def emmissions(self):
eDict = {}
for word in text.split():
pass
rgn = pickle.load(open('corpus.p', 'rb'))['English']
model = GenerativeModel(rgn)
| true |
71a41c6e3a41657f6db9e83ec10a8cdfa594c3f3 | Python | kaursim722/beginners_projects | /TicTacToe_draft1.py | UTF-8 | 1,863 | 4 | 4 | [] | no_license |
class Game:
#row = 3
#col = 3
#game_board = [[0,0,0],[0,0,0],[0,0,0]]
def __init__(self, r = 3, c= 3):
self.row = r
self.col = c
self.u1 = ''
self.u2 = ''
self.game_board = [[1,0,-1],[0,1,0],[-1,1,0]]
def set_name(self, user1, user2):
self.u1 = user1
self.u2 = user2
def play_game():
print("playing")
def menu(self):
option = 0
print("1. Level 1: multi-player\n")
print("2. Level 2: against computer\n")
print("3. Level 3: against computer competitive\n")
print("4. Exit\n\n")
option = input("Enter an option from above: ")
if type(option) != int:
print("Please enter an integer number\n")
else:
option = int(option )
if option > 4:
print("Please pick an option from the given list\n")
if int(option) ==1:
name = input("User1 name: ")
name2 = input("User2 name: ")
self.set_name(name, name2)
print("Hi, "+name+" and "+name2, end= "\n")
elif int(option) == 2:
print("Easy mode")
elif int(option) ==3:
print("Hard mode")
elif int(option) == 4:
print("Thank you for playing")
def __str__(self):
printer = ''
for i in range(self.row):
for j in range(self.col):
printer += "|"
if (self.game_board[i][j] == 0):
printer += "___"
elif (self.game_board[i][j] == 1):
printer += "_X_"
elif (self.game_board[i][j] == -1):
printer+= "_O_"
printer += "|\n"
return printer
def clear_board(self):
global game_board;
game_board = [[0]*self.col]*self.row
def move(self, user,row, col):
if(user == self.u1):
game_board[row, col] = 1
elif(user == self.u2):
game_board[row, col] = -1
#def play():
# enter the whole game print
# check for tile filled or not
# should be an infinte loop?
def main():
tester = Game()
tester.menu()
print(tester)
#tester.printboard()
main() | true |
f50f927109bd6d2ce72e28b4b317442b8eb3322e | Python | acspike/PlasmaControl | /PlasmaControl.py | UTF-8 | 5,596 | 2.515625 | 3 | [] | no_license | from Tkinter import *
import time
from serial import Serial
COM_LEFT = 'COM1'
COM_RIGHT = 'COM2'
COMMANDS = {}
COMMANDS['Power'] = dict([('On','PON'), ('Off','POF')])
COMMANDS['Source'] = dict([('Video','IIS:VID'), ('PC VGA','IIS:PC1')])
COMMANDS['Mode'] = dict([('Normal','DAM:NORM'), ('Zoom','DAM:ZOOM'),
('Full','DAM:FULL'), ('Justified','DAM:JUST'), ('Auto','DAM:SELF')])
class FakePort(object):
def __init__(self, *args, **kwargs):
self.port = kwargs.get('port', 'NoName')
self.timeout = kwargs.get('timeout', 1)
self.modes = set(['PON','POF','IIS:VID','IIS:PC1','DAM:NORM','DAM:ZOOM','DAM:FULL','DAM:JUST','DAM:SELF'])
self.current = {'PO':'POF','DA':'DAM:FULL','II':'IIS:PC1'}
self.buf = ""
def write(self, data):
print(self.port + ': ' + repr(data))
mode = data[1:-1]
if mode in self.modes:
if mode != self.current[mode[:2]]:
self.buf += '\x02' + mode[:3] +'\x03'
self.current[mode[:2]] = mode
else:
pass
else:
self.buf += '\x02ER401\x03'
def read(self, *args, **kwargs):
if self.buf:
val = self.buf[0]
self.buf = self.buf[1:]
else:
time.sleep(self.timeout)
val = ''
return val
# For testing
PORT = FakePort
#PORT = Serial
class Panel(object):
def __init__(self, port_name, status_var):
self.port_name = port_name
self.status_var = status_var
self.status = {'Power':'On', 'Source':'PC VGA', 'Mode':'Full'}
self.port = None
def port_open(self):
if self.port:
return True
else:
try:
# Open Serial Port
self.port = PORT(port=self.port_name, timeout=1)
return True
except:
self.status_var.set('Error Opening\n' + self.port_name)
return False
def update_status(self):
var = ''
for x in ['Power','Source','Mode']:
var += x + ': ' + self.status[x] + '\n'
self.status_var.set(var)
def send(self, cat, val):
self._send(cat, val)
if cat=='Power' and val=='On':
self._send('Source',self.status['Source'])
self._send('Mode',self.status['Mode'])
def _send(self, cat, val):
if self.port_open():
self.port.write('\x02' + COMMANDS[cat][val] + '\x03')
byte = ''
buffer = ''
while True:
byte = self.port.read()
buffer += byte
if byte in ('\x03',''):
break
if buffer == '\x02' + COMMANDS[cat][val][:3] + '\x03':
self.status[cat] = val
self.update_status()
elif byte == '' and buffer == '':
#no reply if status already set
pass
else:
var = 'Error Setting\n' + cat + ' to ' + val
self.status_var.set(var)
class Dispatcher(object):
def __init__(self, left_panel, right_panel, panel_selection):
self.left_panel = left_panel
self.right_panel = right_panel
self.panel_selection = panel_selection
def send(self, cat, val):
sel = self.panel_selection.get()
if sel in ('left','both'):
self.left_panel.send(cat, val)
if sel in ('right','both'):
self.right_panel.send(cat, val)
def make_sender(self, cat, val):
def sender(*args, **kwargs):
self.send(cat, val)
return sender
def make_frame(master, cat, dispatcher):
frame = LabelFrame(master, text=cat, padx = 5, pady = 5)
for val in COMMANDS[cat]:
Button(frame, text=val, command=dispatcher.make_sender(cat, val)).pack(side=LEFT)
return frame
root = Tk()
root.title('Plasma Control Console')
PANEL_SELECTION = StringVar(root, value='both')
STATUS_LEFT = StringVar(root)
STATUS_RIGHT = StringVar(root)
panel_left = Panel(COM_LEFT, STATUS_LEFT)
panel_right = Panel(COM_RIGHT, STATUS_RIGHT)
dispatcher = Dispatcher(panel_left, panel_right, PANEL_SELECTION)
Label(root, text="Plasma Panel Control Console").grid(row=0, column=0, columnspan=4)
Message(root, textvariable=STATUS_LEFT).grid(row=1, column=0, sticky=E+W+N+S)
frame = Frame(root, pady = 5)
Radiobutton(frame, text="<- Left Panel", variable=PANEL_SELECTION, value='left', indicatoron=False).pack(fill=BOTH,expand=True, anchor=W)
Radiobutton(frame, text="<- Both Panels ->", variable=PANEL_SELECTION, value='both', indicatoron=False).pack(fill=BOTH,expand=True, anchor=W)
Radiobutton(frame, text=" Right Panel ->", variable=PANEL_SELECTION, value='right', indicatoron=False).pack(fill=BOTH,expand=True, anchor=W)
frame.grid(row=1, column=1, columnspan=2)
Message(root, textvariable=STATUS_RIGHT).grid(row=1, column=3, sticky=E+W+N+S)
make_frame(root, 'Power', dispatcher).grid(row=2, column=0, columnspan=2, sticky=W)
make_frame(root, 'Source', dispatcher).grid(row=2, column=2, columnspan=2,sticky=E)
make_frame(root, 'Mode', dispatcher).grid(row=3, column=0, columnspan=4)
root.grid_columnconfigure(0, weight=2, minsize=100)
root.grid_columnconfigure(1, weight=1, minsize=50)
root.grid_columnconfigure(2, weight=1, minsize=50)
root.grid_columnconfigure(3, weight=2, minsize=100)
mainloop()
| true |
e8c4da8d2f17782999209a10e9c6ce250b516c66 | Python | brucekchung/learn-python | /codewars/codewars_2.py | UTF-8 | 990 | 4.03125 | 4 | [] | no_license | #Write a function, persistence, that takes in a positive parameter num and returns its multiplicative persistence, which is the number of times you must multiply the digits in num until you reach a single digit.
def persistence(n, counter = 0):
if (len(str(n)) > 1):
counter += 1
new_sum = multiply_all(n)
return persistence(new_sum, counter)
else:
return counter
def multiply_all(input):
total = 1
string = str(input)
for i in string:
total *= int(float(i))
return total
print('solution: ', persistence(999))
#codewars solutions:
import operator
def persistence_1(n):
i = 0
while n>=10:
n=reduce(operator.mul,[int(x) for x in str(n)],1)
i+=1
return i
def persistence_2(n):
nums = [int(x) for x in str(n)]
sist = 0
while len(nums) > 1:
newNum = reduce(lambda x, y: x * y, nums)
nums = [int(x) for x in str(newNum)]
sist = sist + 1
return sist
| true |
1c5e0f53c158df952f8671a90e5aa8a7ff0704e5 | Python | SRvSaha/Python_Automation_Scipts | /diagonal_difference.py | UTF-8 | 1,132 | 3.96875 | 4 | [] | no_license | ############################################################################
# @author : Ipshita2207 #
# Filename : diagonal_difference.py #
# Timestamp : 04-Oct-2019 (Friday) #
# Description : Given a square matrix, calculate the absolute
difference between the sums of its diagonals #
############################################################################
import sys
'''
Input format:
The first line contains a single integer n, which is the number of
rows and columns in arr.
Each of the next n lines describes a row, arr[i], and consists of n space-separated
integers arr[i][j].
Output format:
Print the absolute difference between the sums of the matrix's two
diagonals as a single integer
'''
n = int(input().strip())
sumLeft = 0
sumRight = 0
for i in range(n):
matrixRow = input().split()
sumLeft = sumLeft + int(matrixRow[i])
sumRight = sumRight + int(matrixRow[-(i + 1)])
diff = abs(sumLeft-sumRight)
print(diff)
| true |
e59bdd9e80b116f747e9d48e442e9d4f426a417c | Python | whpei93/KQ | /kq/window_utils.py | UTF-8 | 2,961 | 2.84375 | 3 | [] | no_license | import os
import cv2
def get_full_game_window(img='../tmp/tmp.png'):
"""
:param img:
:return: full game window, gray type
"""
os.system("screencapture -m -R 0,22,1174,852 {}".format(img))
full_game_window = cv2.imread(img, 0)
return full_game_window
def get_control_window(full_window):
control_start_x = 140
control_start_y = 1370
control_end_x = 1930
control_end_y = 1650
c_window = full_window[control_start_y:control_end_y, control_start_x:control_end_x]
return c_window
def get_board_window(full_window):
board_window_start_x = 770
board_window_start_y = 480
board_window_end_x = 1560
board_window_end_y = 590
board_window = full_window[board_window_start_y:board_window_end_y,
board_window_start_x:board_window_end_x]
return board_window
def get_pot_money_window(full_window):
pot_money_window_start_x = 1000
pot_money_window_start_y = 435
pot_money_window_end_x = 1360
pot_money_window_end_y = 475
pot_money_window = full_window[pot_money_window_start_y:pot_money_window_end_y,
pot_money_window_start_x:pot_money_window_end_x]
return pot_money_window
def get_player_window(full_window, position):
player_window_start_x = 0
player_window_start_y = 0
player_window_end_x = 0
player_window_end_y = 0
if position == 0:
player_window_start_x = 1240
player_window_start_y = 880
player_window_end_x = 1750
player_window_end_y = 1290
elif position == 1:
player_window_start_x = 540
player_window_start_y = 880
player_window_end_x = 1060
player_window_end_y = 1290
elif position == 3:
player_window_start_x = 540
player_window_start_y = 70
player_window_end_x = 1060
player_window_end_y = 460
elif position == 4:
player_window_start_x = 1240
player_window_start_y = 70
player_window_end_x = 1750
player_window_end_y = 460
elif position == 2:
player_window_start_x = 5
player_window_start_y = 530
player_window_end_x = 715
player_window_end_y = 780
elif position == 5:
player_window_start_x = 1640
player_window_start_y = 530
player_window_end_x = 2340
player_window_end_y = 780
player_window = full_window[player_window_start_y:player_window_end_y,
player_window_start_x:player_window_end_x]
return player_window
def get_call_money_window(full_window):
call_money_window_start_x = 1100
call_money_window_start_y = 1420
call_money_window_end_x = 1300
call_money_window_end_y = 1460
call_money_window = full_window[call_money_window_start_y:call_money_window_end_y,
call_money_window_start_x:call_money_window_end_x]
return call_money_window
| true |
be13edb702a3426840a197f9bc9023fa3ec70db9 | Python | hansaimlim/thesis-works | /DrugTargetInteraction/data/Integrated/activities/ChEMBL.py | UTF-8 | 2,029 | 2.515625 | 3 | [] | no_license | import os
import sys
import pandas as pd
import numpy as np
from utils import pandas_df_continuous
def get_chembl_by_assay_type(assay_type='pKd',dataframe=True):
fpath='../../ChEMBL24/'
pic50=fpath+'ChEMBL24_pIC50.tsv'
pkd=fpath+'ChEMBL24_pKd.tsv'
pki=fpath+'ChEMBL24_pKi.tsv'
if (assay_type=='pIC50') or (assay_type=='pic50'):
infile=pic50
atype='pIC50'
elif (assay_type=='pKd') or (assay_type=='pkd'):
infile=pkd
atype='pKd'
elif (assay_type=='pKi') or (assay_type=='pki'):
infile=pki
atype='pKi'
else:
print("Error in parsing ChEMBL data. Choose a proper assay type (pIC50, pKd, or pKi)")
sys.exit()
data=[]
with open(infile,'r') as f:
for line in f:
line=line.strip().split('\t')
ikey=str(line[0])
uni=str(line[1])
rel=line[2]
val=float(line[3])
tup=(ikey,uni,atype,rel,val)
data.append(tup)
if dataframe:
data=pandas_df_continuous(data)
return data
def get_chembl_cyp450_by_assay_type(assay_type='pKd',dataframe=True):
fpath='../../CYP450/ChEMBL23/'
pic50=fpath+'CYP450_pIC50.tsv'
pkd=fpath+'CYP450_pKd.tsv'
pki=fpath+'CYP450_pKi.tsv'
if (assay_type=='pIC50') or (assay_type=='pic50'):
infile=pic50
atype='pIC50'
elif (assay_type=='pKd') or (assay_type=='pkd'):
infile=pkd
atype='pKd'
elif (assay_type=='pKi') or (assay_type=='pki'):
infile=pki
atype='pKi'
else:
print("Error in parsing ChEMBL CYP450 data. Choose a proper assay type (pIC50, pKd, or pKi)")
sys.exit()
data=[]
with open(infile,'r') as f:
next(f)
for line in f:
line=line.strip().split('\t')
ikey=str(line[0])
uni=str(line[1])
rel=line[2]
val=float(line[3])
tup=(ikey,uni,atype,rel,val)
data.append(tup)
if dataframe:
data=pandas_df_continuous(data)
return data
if __name__=='__main__':
pkd_data=get_chembl_by_assay_type(assay_type='pkd')
print(pkd_data)
pkd_data=get_chembl_cyp450_by_assay_type(assay_type='pic50')
print(pkd_data)
| true |
f46c254608e46d395fa52539d5ab7428066d8b50 | Python | xxx0624/Tools | /lda-based-tfidf/tf-idf.py | UTF-8 | 3,807 | 2.921875 | 3 | [] | no_license | #coding=utf-8
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
import jieba, sys, os
import jieba.analyse
import numpy as np
'''
get file content
'''
def get_file_content(file_path):
fopen = open(file_path, 'rb')
content = fopen.read()
return content.decode('utf-8', 'ignore')
'''
conver the corpus file to tf-idf array
para: the file's path
the file's content: 1. one line is one sentence
2. the sentence must be word segment
3. for example: "a is b and c ." and Chinese is the same
return: x_array just like [[1,2,3...],[1,2,3...]...]
x_name [word1,word2...wordN]
'''
def get_array(file_path):
print ('start get x array from corpus...')
vectorizer = CountVectorizer()
fopen = open(file_path, 'rb')
corpus = []
for line in open(file_path,'rb'):
line = fopen.readline()
line = line.strip()
corpus.append(line)
fopen.close()
x1 = vectorizer.fit_transform(corpus)
x_array = x1.toarray()
x_name = vectorizer.get_feature_names()
print ('ok...\n')
return x_array, x_name
'''
filter some not important words(the idf's value is small)
para: the tf-idf array( that is x_array)
'''
def filter_x_array(x_array, tf_idf_minin_value):
x_array = x_array.tolist()
print 'start filter x_array...'
#init
x_array_sum = []
col_array = 0
for one_array in x_array:
col_array = len(one_array)
for i in range(col_array):
x_array_sum.append(int(0))
break
#get the x_array's sum
#x_array_sum is one dimension
for one_array in x_array:
for index in range(len(one_array)):
x_array_sum[ index ] += int(one_array[index])
#start filter
myDict = {}
for i in range(len(x_array_sum)):
if x_array_sum[i] < tf_idf_minin_value:
myDict[i] = i
row_array = len(x_array)
print "row = "+str(row_array)+" ;col = "+str(col_array)
for i in range(row_array):
for j in range(col_array-1, -1 ,-1):
if j in myDict:
#print "j is ",j
#x_array[i][j] = 0
x_array[i].pop(j)
row_array = len(x_array)
col_array = 0
for one_array in x_array:
if col_array==0:
col_array = len(one_array)
else:
if col_array != len(one_array):
print "tf-idf array: cannot erase success(as for one col)!!!!!"
sys.exit('Oh, my god! ERROR!')
print "new_row = "+str(row_array)+" ;new_col = "+str(col_array)
print ('ok...\n')
return np.array(x_array)
'''
get tf-idf weight
para: x_array:[[1,2,3...],[1,2,3...]...]
return: [[0.1,0.2,0.3...],[0.2,0.1,...]...]
'''
def get_tf_idf(x_array):
print ('start get tf-idf array...')
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(x_array)
tfidf_array = tfidf.toarray()
print ('ok...\n')
return tfidf_array
'''
write something to local file
para: x is [[1,2,3...],[1,2,3...]...]
file_path: the dst file path
'''
def write_all_thing(x1, file_path):
print ('start write '+file_path+' into local file...')
fopenw = open(file_path,'w')
print ('the array size is '+str(len(x1)))
for x2 in x1:
cnt = 0
for x3 in x2:
if cnt==0 :
fopenw.write(str(x3))
else:
fopenw.write(' '+str(x3))
cnt += 1
fopenw.write('\n')
fopenw.close()
print ('ok...')
if __name__ == '__main__':
file_path = "localfile/wordallfilterhtmlcontent.txt"
result_file_path = 'localfile/x_array.txt'
tf_idf_minin_value = 5
if len(sys.argv) >= 4:
file_path = sys.argv[1]
result_file_path = sys.argv[2]
tf_idf_minin_value = sys.argv[3]
#get tf-idf array
x_array, x_name = get_array(file_path)
#delete the array's cols which are 0
x_array = filter_x_array(x_array, int(tf_idf_minin_value))
#get tf-idf weight array
tfidf_array = get_tf_idf(x_array)
#write some thing
write_all_thing(x_array, result_file_path)
#write_all_thing(tfidf_array, "localfile/tfidf_array.txt")
else:
print '[ERROR] check the file & value'
print('\nfinish...\n')
| true |
1a11d0cb7237b91e95f59e96eda2a0a83b5c88fc | Python | Shuhana808/NetSecurity | /src/cryptography_operations_module.py | UTF-8 | 6,138 | 2.796875 | 3 | [] | no_license | import base64
import hashlib
import random
import datetime
from cryptography.fernet import Fernet
def gcd(a, b):
while a != 0:
a, b = b % a, a
return b
def miillerTest(d, n):
# Pick a random number in [2..n-2]
# Corner cases make sure that n > 4
a = 2 + random.randint(1, n - 4);
# Compute a^d % n
x = power(a, d, n);
if (x == 1 or x == n - 1):
return True;
# Keep squaring x while one
# of the following doesn't
# happen
# (i) d does not reach n-1
# (ii) (x^2) % n is not 1
# (iii) (x^2) % n is not n-1
while (d != n - 1):
x = (x * x) % n;
d *= 2;
if (x == 1):
return False;
if (x == n - 1):
return True;
def isPrime(n):
k=4
# Corner cases
if (n <= 1 or n == 4):
return False;
if (n <= 3):
return True;
# Find r such that n =
# 2^d * r + 1 for some r >= 1
d = n - 1;
while (d % 2 == 0):
d //= 2;
# Iterate given nber of 'k' times
for i in range(k):
if (miillerTest(d, n) == False):
return False;
return True;
def power(x, y, p):
res = 1 # Initialize result
# Update x if it is more
# than or equal to p
x = x % p
while (y > 0):
# If y is odd, multiply
# x with result
if ((y & 1) == 1):
res = (res * x) % p
# y must be even now
y = y >> 1 # y = y/2
x = (x * x) % p
return res
def modInverse(a, m) :
a = a % m;
for x in range(1, m) :
if ((a * x) % m == 1) :
return x
return 1
def findModInverse(a, m):
if gcd(a, m) != 1:
return None
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, m
while v3 != 0:
q = u3 // v3
v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2, v3
return u1 % m
def generateLargePrime(keysize=1024):
while True:
num = random.randrange(2 ** (keysize - 1), 2 ** (keysize))
if isPrime(num):
return num
def generateKeyPair(keySize):
#print('Generating p prime...')
p = generateLargePrime(keySize)
#print('Generating g such that 1<=g<p and g is relatively prime to p...')
while True:
g = random.randrange(1, p)
if gcd(g, p) == 1:
break
#print('Generating private key x such that 1<=x<=p-1')
x = random.randrange(1, p)
#print('Generating public key y')
y = power(g, x, p)
return (p, g, x, y)
def save_key_pair_components(p,g,x,y):
fp = open("key_component_p.txt", "w+")
fp.write(str(p))
fp.close()
fg = open("key_component_g.txt", "w+")
fg.write(str(g))
fg.close()
fx = open("key_component_x.txt", "w+")
fx.write(str(x))
fx.close()
fy = open("key_component_y.txt", "w+")
fy.write(str(y))
fy.close()
def load_key_pair_components():
fp = open("key_component_p.txt", "r+")
content = fp.read()
p = int(content)
fp.close()
fg = open("key_component_g.txt", "r+")
content = fg.read()
g = int(content)
fg.close()
fx = open("key_component_x.txt", "r+")
content = fx.read()
x = int(content)
fx.close()
fy = open("key_component_y.txt", "r+")
content = fy.read()
y = int(content)
fy.close()
return p, g, x, y
def get_secret_parameter(p):
while True:
k = random.randrange(1, p-1)
if gcd(k, (p - 1)) == 1:
break
return k
def generate_secret_key(message,k):
value = message + str(k)
h = hashlib.sha256(value.encode('ascii')).digest()
return base64.urlsafe_b64encode(h).decode('ascii')
def symmetric_encryption(plainText, key):
plainTextBytes = plainText.encode('utf-8')
f = Fernet(key)
cipherTextBytes = f.encrypt(plainTextBytes)
cipherText = cipherTextBytes.decode("utf-8")
return cipherText
def symmetric_decryption(cipherText, key):
cipherTextBytes = cipherText.encode('utf-8')
f = Fernet(key)
plainTextBytes = f.decrypt(cipherTextBytes)
plainText = plainTextBytes.decode('utf-8')
return plainText
def create_digitalSignature(message, g, k, x, p):
r = power(g, k, p)
m = int(hashlib.sha1(message.encode('utf-8')).hexdigest(),16)
val_1 = x*(r+m)
val_2 = k %(p-1)
s = val_1 - val_2
return r, s
def retrieve_secret_parameter(message, r, s, x, p):
m = int(hashlib.sha1(message.encode('utf-8')).hexdigest(), 16)
val_1 = x * (r + m)
val_2 = s % (p - 1)
k = val_1 - val_2
return k
if __name__ == '__main__':
# p1, g1, x1, y1 = generateKeyPair(2048)
# save_key_pair_components(p1, g1, x1, y1)
p, g, x, y = load_key_pair_components()
start_time1 = datetime.datetime.now()
k = get_secret_parameter(p)
elapsedTime1 = datetime.datetime.now()-start_time1
print('secret parameter selection: %d' %elapsedTime1.microseconds)
c_i1 = 'aserrytigtyufgh'
c_i2 = 'fhietbpnssnshuevbe'
start_time2 = datetime.datetime.now()
secret_key = generate_secret_key(c_i1, k)
elapsedTime2 = datetime.datetime.now() - start_time2
print('secret key generation: %d' % elapsedTime2.microseconds)
start_time3 = datetime.datetime.now()
t_i1 = symmetric_encryption(c_i2, secret_key)
elapsedTime3 = datetime.datetime.now() - start_time3
print('cookie data encryption: %d' % elapsedTime3.microseconds)
message = c_i1 + t_i1
start_time4 = datetime.datetime.now()
r,s = create_digitalSignature(message, g, k, x, p)
elapsedTime4 = datetime.datetime.now() - start_time4
print('digital signature creation: %d' % elapsedTime4.microseconds)
start_time5 = datetime.datetime.now()
k = retrieve_secret_parameter(message, r, s, x, p)
elapsedTime5 = datetime.datetime.now() - start_time5
print('secret parameter extraction: %d' % elapsedTime5.microseconds)
start_time6 = datetime.datetime.now()
c_i2_new = symmetric_decryption(t_i1, secret_key)
elapsedTime6 = datetime.datetime.now() - start_time6
print('cookie data decryption: %d' % elapsedTime6.microseconds) | true |
9fe670270c9ca7435797997856160634393f5647 | Python | ivygenta/scraping | /chatter-scraping.py | UTF-8 | 3,309 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import configparser
import dao
#--------readme-----------------------
#最初の実行時は携帯認証の画面が表示されるので手動で認証してやりなおす、二回目以降は認証不要のはず
#※何回も携帯認証すると認証ロックするみたいなので気を付ける
#iniファイルを書き換えてプロジェクトフォルダ直下におく
#--------readme-----------------------
#iniファイル読み込み
inifile = configparser.ConfigParser()
inifile.read('./config.ini', 'UTF-8')
#headless モード
#options = webdriver.ChromeOptions()
#options.add_argument('--headless')
#,options=options(有効にするときはこれをdriveroptionに追加)
#user-data-dir指定(chromeのuser情報格納場所を指定)
usoptions = webdriver.ChromeOptions()
usoptions.add_argument('--user-data-dir='+inifile.get('chrome', 'user-data-dir'))
#webdriver初期化
driver = webdriver.Chrome(executable_path=inifile.get('chrome', 'chromedriverpath'),options=usoptions)
driver.get('https://login.salesforce.com/?locale=jp')
#ログイン処理
def login():
username = driver.find_element_by_name("username")
password = driver.find_element_by_name("pw")
username.send_keys(inifile.get('user', 'username'))
password.send_keys(inifile.get('user', 'pw'))
username.submit()
#「表示件数を増やす」押下する処理
def morebottunpush():
delay = 10 # seconds
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.LINK_TEXT, '更新の表示件数を増やす »')))#ボタンが表示されるまで待つ
driver.find_element_by_link_text('更新の表示件数を増やす »').click()
#画面スクロールで読み込まれる情報を表示する処理
def scroll():
for i in range(100):
time.sleep(2)#読み込みを待つ
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")#下までスクロールする
#特定のクラスに「ダウンロード中」の文字があれば繰り返す
#「ダウンロード中」:あったら続行、なければ終了
sc = driver.find_elements_by_xpath("//*[@class='cxshowmorefeeditemscontainer showmorefeeditemscontainer']")
for scc in sc:
b = "false"
if "ダウンロード中" in scc.text:
b = "true"
if "true" in b:
continue
else:
break
#さらに表示リンクを押しまくる処理
def morebottuns():
cxmore = driver.find_elements_by_xpath("//*[@class='cxmorelink']")
for more in cxmore:
more.click()
#特定の文字で投稿を検索する処理
def serch():
textelements = driver.find_elements_by_xpath("//*[@class='feeditemtext cxfeeditemtext']")
print(textelements)
commentlist = []
for ele in textelements:
if "お疲れ様です。" in ele.text:
commentlist.append(ele.text)
print(ele.parent)
print(ele.text)
print("------投稿区切り-------")
dao.insertsql(commentlist)
login()
morebottunpush()
scroll()
morebottuns()
serch()
driver.quit()
| true |
79fb86e2273cbc225a439241d8cb13db3d0fa4a2 | Python | sam1208318697/Leetcode | /Leetcode_env/2019/6_03/Sort_Array_By_Parity.py | UTF-8 | 642 | 3.953125 | 4 | [] | no_license | # 905. 按奇偶排序数组
# 给定一个非负整数数组 A,返回一个数组,在该数组中, A 的所有偶数元素之后跟着所有奇数元素。
# 你可以返回满足此条件的任何数组作为答案。
# 示例:
# 输入:[3,1,2,4]
# 输出:[2,4,3,1]
# 输出 [4,2,3,1],[2,4,1,3] 和 [4,2,1,3] 也会被接受。
class Solution:
def sortArrayByParity(self, A):
res = []
for i in range(len(A)):
if A[i]%2!=0:
res.append(A[i])
else:
res.insert(0,A[i])
return res
sol = Solution()
print(sol.sortArrayByParity([3,1,2,4]))
| true |
f7de73debc1d09c95fcc100b8c0e976752e8c717 | Python | ricott1/Term-in-Ale | /bestiary.py | UTF-8 | 2,234 | 2.8125 | 3 | [] | no_license | import character, ability, random, item
def loadVillain(name, level='auto'):
fileName = 'villains/%s.bbb'%name.lower().strip()
try:
with open(fileName,'r') as villainData:
villain = character.Villain()
fullData = villainData.read().splitlines()
abilities = []
if level != 'auto':
exp = (level-1)**2*1000
for data in fullData:
try:
key, value = data.split(' - ', 1)[0].strip().upper(), data.split(' - ', 1)[1].strip()
except:
key, value ='',''
if key == 'TYPE':
villain.type = value[0].upper() + value[1:].lower()
elif key == 'RANDOMNAMES':
villain.randomNames = [n.strip() for n in value.split(',')]
elif key == 'TARGET':
villain.combatRules['target'] = value.lower()
elif key == 'PRIORITY':
villain.combatRules['priority'] = value.lower()
elif key == 'ALIGNMENT':
villain.combatRules['alignment'] = value.lower()
elif key == 'LEVEL' and level == 'auto':
lv = int(value)
exp = (lv-1)**2*1000
elif key == 'BTH':
villain.baseBTH = int(value)
elif key == 'HB':
villain.baseHB = int(value)
elif key == 'SKR':
villain.baseSKR = float(value)
elif key == 'VOP':
villain.baseVOP = int(value)
elif key == 'RTM':
villain.baseRTM = int(value)
elif key == 'STA':
villain.baseSTA = int(value)
elif key == 'ABILITY':
ab = value.split()[0]
lv = value.split()[1]
abilities.append((ab,lv))
elif key == 'BONUS':
villain.levelBonus.append(value)
elif key == 'IMAGE':
pic = value.strip('Q')
villain.picture.append(pic)
elif key == 'DESCRIPTION':
villain.description.append(value)
elif key == 'INVENTORY':
obj = item.loadItem(value)
villain.addInventory(obj)
elif key == 'HASPICTURE':
villain.hasPicture = True
villain.addExperience(exp)
if villain.hasPicture:
villain.initializePicture()
for obj in villain.inventory:
villain.equip(obj)
for ab,lv in abilities:
if villain.level >= int(lv):
villain.abilities[ab] = getattr(ability, ab)(villain)
villain.restore()
return villain
except:
return False
if __name__=='__main__':
import sys
print loadVillain(sys.argv[1]).pickTarget
| true |
95ea8a21d3ac44c7760179bc4ebf67f0c16e6a19 | Python | limitzero/python-nodejs-kickstart | /watcher.py | UTF-8 | 2,089 | 2.90625 | 3 | [] | no_license | """
module : watcher.py
description : Script to automatically watch a directory (via watchdog) for tests and run them via py.test
"""
import sys
import os.path
import subprocess
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class SpecificationsEventHandler(FileSystemEventHandler):
"""Runs the tests inside the specifications class when any specification file is modified
"""
def __init__(self):
self.paused = False
self.banner = "============================================================"
def on_modified(self, event):
super(SpecificationsEventHandler, self).on_modified(event)
"""
Description:
Catches the file modified event from the watchdog package and
creates the full path to the file for submission to the test engine
of choice.
Args:
event: Contains the information for the file system event
when modification has occurred
"""
# file modified triggers directory modified as well...
if event.is_directory:
return
if self.paused:
return
if event.src_path.endswith("_specs.py") and not self.paused:
self.paused = True
#filename = os.path.basename(event.src_path)
directory = os.path.abspath(os.path.dirname(event.src_path))
filename = os.path.basename(event.src_path)
file = os.path.join(directory, filename)
print(self.banner, end="\n")
print("testing specifications found in file: {0}".format(file))
print("")
# if using pytest, uncomment the line below
#subprocess.call(['py.test', '-v', file], shell=True)
#using mamba as the test engine:
subprocess.call(['mamba', file], shell=True)
print(self.banner, end="\n")
self.paused = False
return
if __name__ == "__main__":
path = sys.argv[1]
event_handler = SpecificationsEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| true |
3c5dcb570034d40a366a205c821f7780e7c52d26 | Python | arsturges/miscellaneous | /prisoner_puzzle/stats.py | UTF-8 | 2,842 | 3.359375 | 3 | [] | no_license | from prisoner_puzzle import monte_carlo
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
def example_stats():
number_of_experiments = 500
number_of_prisoners = 50
results = monte_carlo(number_of_experiments, number_of_prisoners)
#Calculate some statistics:
minimum = min(results)
maximum = max(results)
median = (maximum - minimum)/2.0 + minimum
mean = np.mean(results)
string = "The simulation with {} results took an average of {} days \
or {} months or {} years to finish."
print(string.format(number_of_experiments, mean, mean/12, mean/365.0))
#what's the spread?
print "max:", maximum
print "min:", minimum
#where does the mean fall within that spread?
print "mean:", mean
print "median:", median
print "distance from mean to median:", mean - median
#how broad is the peak?
standard_deviation = np.std(results)
print "standard deviation:", standard_deviation
#Plot a histogram
histogram_data = plt.hist(results, bins=30)
plt.vlines(
median,
0,
max(histogram_data[0]),
linestyles='dashed',
lw = 4,
label = "median",
color='red')
plt.vlines(
mean,
0,
max(histogram_data[0]),
linestyles='dashed',
lw = 4,
label = "mean",
color='orange')
plt.title("Prisoner Escape Riddle Historgram, n={}".format(number_of_experiments))
plt.xlabel("Number of days to escape")
plt.ylabel("Frequency")
plt.legend()
plt.show()
#How quickly does the mean converge as number_of_experiments increases?
x_values = [] # number_of_experiments
y_values = [] # average value at each number_of_experiments
number_of_prisoners = 50
for n in range(1,51):
print n
x_values.append(n)
mean = np.mean(monte_carlo(n, number_of_prisoners))
y_values.append(mean)
plt.scatter(x_values, y_values)
plt.title("Convergence of Mean Value, number_of_prisoners = {}".format(number_of_prisoners))
plt.xlabel("Number of experiments")
plt.ylabel("Days to prisoner release")
#plt.savefig("convergence.png")
def line(slope, intercept, x):
a = intercept
b = slope
y = a + b * x
return y
slope, intercept, r_value, p_value, std_err = stats.linregress(x_values,y_values)
x_line_points = np.arange(1,51,1)
y_line_points = []
for x in x_line_points:
y_line_points.append(line(slope, intercept, x))
y_line_points = np.array(y_line_points)
y_values = np.array(y_values)
plt.plot(x_line_points, y_line_points)
plt.vlines(x_values, y_line_points, y_values)
sum_of_distances = sum(abs(y_values - y_line_points))
plt.suptitle("Sum of distances: {}".format(sum_of_distances))
plt.show()
'''
#if we increase the number of prisoners, does the mean increase linearly?
for number_of_prisoners in range(1,51):
print(
"Prisoners:", number_of_prisoners,
"Mean:", mean(monte_carlo(500,number_of_prisoners)))
'''
| true |
54360dcd2f506822a7b7b738a9615ea9c12e931e | Python | MananKGarg/Algorithms | /mergesort.py | UTF-8 | 552 | 3.671875 | 4 | [] | no_license | def mergesort(arr):
if(len(arr) == 1):
return arr
else:
m = len(arr)//2
b = mergesort(arr[:m])
c = mergesort(arr[m:])
d = merge(b,c)
return d
def merge(b,c): # b and c are already sorted
d = []
while(len(b)>0 and len(c)>0):
if(b[0]<=c[0]):
d.append(b[0])
b.pop(0)
else:
d.append(c[0])
c.pop(0)
if(len(b)>0):
d.extend(b)
else:
d.extend(c)
return d
arr = [2,6,1,3,5,4]
d = mergesort(arr)
print(d)
| true |
4c949420f858d554659d924d52a30ebd8dcef506 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_158/1077.py | UTF-8 | 2,463 | 3.171875 | 3 | [] | no_license | # Google Code Jam 2015
def fillable(X, R, C):
if X == 1:
return True # Gabriel can always fill
if X > R * C:
return False
if R * C % X != 0:
return False
if X == 2:
if R % 2 and C % 2:
return False
return True
if X == 3:
if R < 3 and C < 3:
return False
if R == 1 or C == 1:
return False # Richard picks an L piece. Dammit Richard
if R == 3 or C == 3:
return True
assert False # No other 3-cases
if X == 4:
if R == 1 or C == 1:
return False
if R <= 3 and C <= 3:
return False
if (R == 4 and C == 2) or (R == 2 and C == 4):
return False
return True
def tests():
# 3s
assert fillable(1, 1, 1)
assert fillable(1, 1, 2)
assert fillable(1, 1, 4)
assert fillable(1, 2, 2)
assert fillable(1, 2, 3)
assert fillable(2, 4, 1)
assert fillable(3, 2, 3)
assert fillable(3, 3, 2)
assert fillable(4, 3, 4)
assert not fillable(4, 4, 1)
assert not fillable(4, 4, 2)
assert not fillable(4, 2, 4)
assert fillable(4, 4, 3)
assert fillable(4, 4, 4)
assert not fillable(2, 1, 1)
assert not fillable(2, 3, 3)
assert not fillable(3, 1, 1)
assert not fillable(3, 1, 3)
assert not fillable(3, 2, 1)
assert not fillable(3, 2, 2)
assert not fillable(3, 3, 1)
assert not fillable(3, 4, 2)
assert not fillable(3, 4, 4)
assert not fillable(4, 1, 3)
assert not fillable(4, 2, 2)
assert not fillable(4, 3, 1)
assert not fillable(4, 3, 2)
assert not fillable(4, 3, 2)
assert not fillable(4, 4, 1)
# First answer set
assert fillable(2, 2, 2)
def genwinners(fname):
s = ""
lines = open(fname).readlines()[1:]
casenum = 1
for l in lines:
l = l.strip()
print l.split()
(X, R, C) = [int(i) for i in l.split()]
s += "Case #{}: {}\n".format(casenum, "GABRIEL" if fillable(X, R, C) else "RICHARD")
casenum += 1
print s
return s
def test_e2e(fname, correctfname):
f = open(correctfname)
myans = genwinners(fname)
key = "".join(f.readlines())
print key
assert myans == key
if __name__ == "__main__":
tests()
print "Tests passed."
test_e2e("testin.txt", "testout.txt.gold")
infilename = "D-small-attempt1.in"
downloadsdirectory = "/Users/robertkarl/Downloads/"
answer = genwinners(downloadsdirectory + infilename)
outfile = open(downloadsdirectory + "ans.txt", 'w')
outfile.write(answer)
outfile.close()
| true |
ef415466d7026ea73a704767de2898d8df2ce81f | Python | jeffkinnison/pyrameter | /pyrameter/domains/constant.py | UTF-8 | 2,702 | 3.625 | 4 | [] | no_license | """Representation of a singleton hyperparameter domain.
Classes
-------
ConstantDomain
A singleton hyperparameter domain.
"""
from pyrameter.domains.base import Domain
class ConstantDomain(Domain):
"""A singleton hyperparameter domain.
Parameters
----------
name : str
The name of this hyperparameter domain.
domain
The single value in this domain.
See Also
--------
`pyrameter.domains.base.Domain`
"""
def __init__(self, *args, **kwargs):
if len(args) >= 2:
super(ConstantDomain, self).__init__(args[0])
self.domain = args[1]
elif len(args) == 1:
super(ConstantDomain, self).__init__()
self.domain = args[0]
else:
raise ValueError('No domain provided.')
@classmethod
def from_json(cls, obj):
"""Create a new domain from a JSON encoded object.
Parameters
----------
obj : dict
JSON object created with ``to_json``.
Returns
-------
domain : `pyrameter.domains.exhaustive.ExhaustiveDomain`
The domain encoded in ``obj``
"""
domain = cls(obj['name'], obj['domain'])
domain.id = obj['id']
domain.current = obj['current']
return domain
def generate(self):
"""Generate a hyperparameter value from this domain."""
return self.domain
def map_to_domain(self, idx, bound=True):
"""Convert an index to its value within the domain.
This domain has a single value, so returns that value.
Parameters
----------
index : int
Index into a discrete/categorical domain (e.g., a list).
bound : bool, optional
If True and ``index`` is out of bounds, return the first or last
entry in the domain (whichever is closer). Otherwise, raises an
IndexError if ``index`` is out of bounds.
Returns
-------
value
The value at ``index`` in the domain.
Raises
------
IndexError
Raised when ``index`` is out of bounds and ``bound`` is ``False``.
"""
return self.domain
def to_index(self, value):
"""Convert a value to its index in the domain.
This domain has a single value, so the index is always zero.
Parameters
----------
"""
return 0
def to_json(self):
"""Convert the domain to a JSON-compatible format."""
jsonified = super(ConstantDomain, self).to_json()
jsonified.update({'domain': self.domain})
return jsonified
| true |
d9b19c7f1f055327444683f1311afb36f3c10a74 | Python | ysachinj99/PythonFile | /def Reverse of No.py | UTF-8 | 198 | 3.9375 | 4 | [] | no_license | #Reverse
def Reverse(n):
q=0
while (n>0):
r = n%10
q=q*10+r
n=n/10
print("Reverse of no is",q)
n=int(input("Enter a NO:"))
Reverse(n)
| true |
01a00299d744e3a748dde3ec4829f7f78685d3e4 | Python | toooooodo/pytorch-simple-seq2seq | /prepare_data.py | UTF-8 | 4,799 | 2.84375 | 3 | [] | no_license | from __future__ import unicode_literals, print_function, division
from torch.utils.data import Dataset, DataLoader
from io import open
import unicodedata
import re
import random
import numpy as np
class Lang:
def __init__(self, name):
self.name = name
# {index: token}
self.index_to_token = {0: '<pad>', 1: '<bos>', 2: '<eos>'}
# {token: index}
self.token_to_index = {'<pad>': 0, '<bos>': 1, '<eos>': 2}
# {token: count_of_this_token}
self.token_count = dict()
self.token_n = 3 # number of different tokens
def add_sentence(self, sentence):
for token in sentence.split(' '):
self.add_token(token)
def add_token(self, token):
if token in self.token_to_index:
self.token_count[token] += 1
else:
self.index_to_token[self.token_n] = token
self.token_to_index[token] = self.token_n
self.token_n += 1
self.token_count[token] = 1
class F2EDataSet(Dataset):
def __init__(self, max_length=10):
super(F2EDataSet, self).__init__()
self.max_length = max_length
self.eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s ",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
self.in_lang, self.out_lang, self.in_seq, self.out_seq, self.pairs = self.load_text()
def __len__(self):
return self.in_seq.shape[0]
def __getitem__(self, item):
"""
:param item:
:return: [French Sentence, English Sentence]
"""
return [self.in_seq[item], self.out_seq[item]]
def load_text(self):
with open('./data/eng-fra.txt', 'r', encoding='utf-8') as f:
pairs = f.readlines()
# pair[0]: ['go .', 'va !'] English => French
pairs = [[self.normalizeString(s) for s in pair.rstrip().split('\t')] for pair in pairs]
# French => English
pairs = [list(reversed(pair)) for pair in pairs]
print(f'Read {len(pairs)} sentence pairs.')
pairs = [pair for pair in pairs if self.filter_pair(pair)]
print(f'Trimmed to {len(pairs)} sentence pairs.')
in_language = Lang('French')
out_language = Lang('English')
for in_sentence, out_sentence in pairs:
in_language.add_sentence(in_sentence)
out_language.add_sentence(out_sentence)
print(in_language.name, in_language.token_n)
print(out_language.name, out_language.token_n)
in_indices, out_indices = [], []
for in_sentence, out_sentence in pairs:
in_indices.append(self.convert_token_to_index(in_language, in_sentence))
out_indices.append(self.convert_token_to_index(out_language, out_sentence))
in_indices, out_indices = np.array(in_indices), np.array(out_indices)
return in_language, out_language, in_indices, out_indices, pairs
def unicodeToAscii(self, s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
def normalizeString(self, s):
s = self.unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def filter_pair(self, pair):
return len(pair[0].split(' ')) < self.max_length and len(pair[1].split(' ')) < self.max_length and pair[
1].startswith(self.eng_prefixes)
def convert_token_to_index(self, lang, sentence):
indices = []
for token in sentence.split(' '):
indices.append(lang.token_to_index[token])
# padding
indices += [2] + [0] * (self.max_length - len(indices) - 1)
return indices
def random_sample(self, k=5):
return random.choices(self.pairs, k=k)
def convert_index_to_token(self, lang, indices):
tokens = []
for index in indices:
tokens.append(lang.index_to_token[index])
return ' '.join(tokens)
if __name__ == '__main__':
data_set = F2EDataSet()
# print(data_set.random_sample())
random_sentences = data_set.random_sample()
sample_in_indices, sample_out_indices = [], []
for in_sentence, out_sentence in random_sentences:
sample_in_indices.append(data_set.convert_token_to_index(data_set.in_lang, in_sentence))
sample_out_indices.append(data_set.convert_token_to_index(data_set.out_lang, out_sentence))
print(random_sentences)
print(sample_in_indices)
print(sample_out_indices)
# loader = DataLoader(data_set, batch_size=32, shuffle=True)
# for batch_idx, (in_seq, out_seq) in enumerate(loader):
# print(in_seq[0].dtype)
# print(out_seq[0])
# break
| true |
7e04e69952d2221228c76a652174be4ce7f1f42f | Python | Busiky/Tasks | /Polygonal_numbers/all_solutions.py | UTF-8 | 460 | 3.0625 | 3 | [] | no_license | from solution import *
def solve(number):
result = []
n, angle = 1, 3
while angle <= number:
while create_polygon_number(n, angle) <= number:
if create_polygon_number(n, angle) == number:
result.append((n, angle))
n += 1
n = 1
angle += 1
if not result:
result.append((None, None))
return result
if __name__ == "__main__":
print(*solve(int(input())), sep='\n')
| true |
328cd9e7a497c896017086c1d7c472229cc702a7 | Python | CivetWang/PHY407 | /Lab07_Q1.py | UTF-8 | 2,191 | 3.125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 10:23:52 2018
@author: Civet
"""
"""
Use the program to simulate the circulation motion of object
about the ball-rod system which describe the space garbage system
"""
#import moduleds
import numpy as np
import pylab as plt
import time
t0 = time.clock()
#Set G, M, L constants
G=1
M=10
L=2
delta=1e-6
#define the ODE system
def f(r):
xp=r[0]
x=r[1]
yp=r[2]
y=r[3]
r0 = np.sqrt(x**2+y**2)
f1=-G*M*x/(r0**2*np.sqrt(r0**2+L**2/4))
f2=xp
f3=-G*M*y/(r0**2*np.sqrt(r0**2+L**2/4))
f4=yp
return np.array([f1,f2,f3,f4],float)
def rho(r1,r2,h):
return h*delta/np.sqrt((1/30.*(r1[1]-r2[1]))**2+(1/30.*(r1[3]-r2[3]))**2)
def Runge_Kutta(ri,h0):
rp=ri
k1 = h0*f(rp)
k2 = h0*f(rp+0.5*k1)
k3 = h0*f(rp+0.5*k2)
k4 = h0*f(rp+k3)
rp = rp+(k1+2*k2+2*k3+k4)/6.
return rp
#Set timing step
t1 = 0
t2 = 10
h = 0.01
#prepare the timeline array
tpoints=[]
tpoints.append(t1)
xppoints=[]
xpoints=[]
yppoints=[]
ypoints=[]
#prepare the initial condition of the system
r= np.array ([0.0,1.0,1.0,0.0],float)
xppoints.append(r[0])
xpoints.append(r[1])
yppoints.append(r[2])
ypoints.append(r[3])
#Usw Runge_Kutta method to solve the system
i=0
while tpoints[i] <= t2:
r1 = Runge_Kutta(r,h)
r1 = Runge_Kutta(r1,h)
r2 = Runge_Kutta(r,2*h)
if rho(r1,r2,h) <1.0:
hp = h*rho(r1,r2,h)**(1/4)
else:
hp=2*h
r = Runge_Kutta(r,hp)
tpoints.append(tpoints[i]+hp)
i +=1
h = h*rho(r1,r2,h)**(1/4)
xppoints.append(r[0])
xpoints.append(r[1])
yppoints.append(r[2])
ypoints.append(r[3])
#output the diagram
plt.figure(1)
plt.plot(xpoints,ypoints,'k.',label='Adapted')
x=np.loadtxt('Lab6Q1x.txt')
y=np.loadtxt('Lab6Q1y.txt')
plt.plot(x,y,label='non-adapted')
plt.xlabel('Position in X')
plt.ylabel('Position in Y')
plt.title('Space Garbage system by adapted stepsize')
plt.legend()
plt.show()
print (time.clock()-t0)
plt.figure(2)
dtpoints = np.array(tpoints[1:])-np.array(tpoints[:-1])
plt.plot(tpoints[:-1],dtpoints)
plt.xlabel('Time')
plt.ylabel('Adapted stepsize')
plt.title('Adapted stepsize as a function of time')
plt.show()
| true |
63a0a646f85e4dc72b7826db0c644548bd30ed58 | Python | clash402/caesar-cipher | /main.py | UTF-8 | 1,428 | 3.546875 | 4 | [
"MIT"
] | permissive | from art import logo
# PROPERTIES
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p',
'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# METHODS
def code(message_text, shift_num, direction_to_code):
message = ""
for char in message_text:
if char in alphabet:
pos = 0
if direction_to_code == "encode":
pos = alphabet.index(char) + shift_num
elif direction_to_code == "decode":
pos = alphabet.index(char) - shift_num
message += alphabet[pos]
else:
message += char
return f"\nThe {direction_to_code}d text is: {message}"
# MAIN
print(logo)
app_is_in_progress = True
while app_is_in_progress:
direction = input("\nType 'encode' to encrypt, type 'decode' to decrypt: ").lower()
if direction == "encode" or direction == "decode":
pass
else:
print(f"ERROR: {direction} is not a command.")
continue
text = input("Type your message: ").lower()
shift = int(input("Type the shift number: ")) % 25
result = code(text, shift, direction)
print(result)
if input("Run again? (y/n) ").lower() != "y":
print("Goodbye")
app_is_in_progress = False
| true |
dd6831c3d2edfe75a691c0927892943fd74ed534 | Python | TFact-96/ProgTheorie | /algorithms/PullMove.py | UTF-8 | 5,763 | 3.171875 | 3 | [] | no_license | import numpy as np
import random
from classes.GridPoint import GridPoint
# check if diagonal coordinates from a point is not filled
# returns all available coordinates
def check_diagonals(grid_object, x, y, z):
"""
Check if diagonal coordinates from a point is not filled, returns all available coordinates.
:param grid_object: grid_object with gridpoint
:param x: x coord
:param y: y coord
:param z: z coord
:return: availaible moves
"""
# diagonal moves for chain pulling
available_moves = []
for move in grid_object.diagonal_moves:
# cant overflow the grid
if f"{x + move[0], y + move[1], z + move[2]}" not in grid_object.grid:
grid_object.grid[f"{x + move[0], y + move[1], z + move[2]}"] = GridPoint(
False, [x + move[0], y + move[1], z + move[2]]
)
# if its not filled
if not grid_object.grid[f"{x + move[0], y + move[1], z + move[2]}"].filled:
available_moves.append(move)
return available_moves
def create_vectors(grid_object, node):
"""
Returns coords of i, i+1 (or i-1) and the vector between these coords
:param grid_object: grid object
:param node: node
:return: coords of i, i+1 or i-1
"""
node_i_coords = [node.x, node.y, node.z]
# if node at left end of chain, use i+1 to fold into the middle
if node.n < (len(grid_object.protein) / 2):
node_i1 = grid_object.grid_chain[int(node.n) + 1]
# right end of chain use i-1 to fold into the middle
else:
node_i1 = grid_object.grid_chain[int(node.n) - 1]
# get coords and vector
node_i1_coords = np.array([node_i1[1][0], node_i1[1][1], node_i1[1][2]])
vector1 = node_i1_coords - np.array(node_i_coords)
return node_i_coords, node_i1_coords, vector1
def check_requirements(
grid_object, available_moves, vector1, node_i_coords, node_i1_coords
):
"""
Check requirements if L and C are free --> returns coords of L, coords of C, and True if so.
Ff multiple L and C's are free, returns a random choice of them.
:param grid_object: grid object
:param available_moves: availaible moves
:param vector1: vector1
:param node_i_coords: coords of node i
:param node_i1_coords: coords of node i+1 or i-1
:return: L and C
"""
viable_moves = []
found = False
for move in available_moves:
L = node_i_coords + np.array(move)
C = L - vector1
if (
(not grid_object.overlap(L[0], L[1], L[2]))
and (not grid_object.overlap(C[0], C[1], C[2]))
and (np.linalg.norm(L - node_i1_coords) == 1.0)
):
viable_moves.append([L, C])
found = True
if not found:
return 0, 0, False
random_choice = random.choice(viable_moves)
return random_choice[0], random_choice[1], True
def move_residue_left(index, grid_object):
"""
Function to move residue in first half of chain.
:param index: index
:param grid_object: grid object
"""
residue_node_key = grid_object.grid_chain[index][0]
residue_node = grid_object.grid[residue_node_key].nodes[0]
residue_node_next_key = grid_object.grid_chain[index + 2][0]
residue_node_next = grid_object.grid[residue_node_next_key].nodes[0]
grid_object.transfer_point(
residue_node, residue_node_next.x, residue_node_next.y, residue_node_next.z,
)
def move_residue_right(grid_object, node):
"""
Function to move residue in second half of chain.
:param grid_object: grid object
:param node: node
"""
index_from_end = len(grid_object.protein)
# residue of chain follows in footsteps
while index_from_end > node.n + 2:
index_from_end -= 1
residue_node_key = grid_object.grid_chain[index_from_end][0]
residue_node = grid_object.grid[residue_node_key].nodes[0]
residue_node_next_key = grid_object.grid_chain[index_from_end - 2][0]
residue_node_next = grid_object.grid[residue_node_next_key].nodes[0]
grid_object.transfer_point(
residue_node, residue_node_next.x, residue_node_next.y, residue_node_next.z,
)
# pulling a node in the grid_object diagonally. Always towards the middle of the chain.
def pull_move(grid_object, node):
"""
The main pull move.
:param grid_object: grid_object
:param node: node
"""
node_i_coords, node_i1_coords, vector1 = create_vectors(grid_object, node)
available_moves = check_diagonals(
grid_object, node_i_coords[0], node_i_coords[1], node_i_coords[2]
)
L, C, check = check_requirements(
grid_object, available_moves, vector1, node_i_coords, node_i1_coords
)
if check:
# For left side of chain folding towards the middle
if node.n < (len(grid_object.protein) / 2):
# residue of chain follows in footsteps
for index in range(int(node.n - 1)):
move_residue_left(index, grid_object)
# Previous node moves to C
previous_node_key = grid_object.grid_chain[int(node.n) - 1][0]
previous_node = grid_object.grid[previous_node_key].nodes[0]
grid_object.transfer_point(previous_node, C[0], C[1], C[2])
# for right side of chain folding towards the middle
else:
move_residue_right(grid_object, node)
# Previous node moves to C
previous_node_key = grid_object.grid_chain[int(node.n) + 1][0]
previous_node = grid_object.grid[previous_node_key].nodes[0]
grid_object.transfer_point(previous_node, C[0], C[1], C[2])
# node moves to L
grid_object.transfer_point(node, L[0], L[1], L[2])
| true |
de566dc552a2b8b08ec1749b41da22c4c2473db0 | Python | ManuelaS/sklearn-lifelines | /sklearn_lifelines/estimators_wrappers.py | UTF-8 | 2,368 | 2.6875 | 3 | [] | no_license | from lifelines import AalenAdditiveFitter
from lifelines import CoxPHFitter
from sklearn.base import BaseEstimator
class CoxPHFitterModel(BaseEstimator):
def __init__(self, duration_column=None, event_col=None, initial_beta=None, strata=None, alpha=0.95, tie_method='Efron', penalizer=0.0, **kwargs):
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.duration_column = duration_column
self.event_col = event_col
self.initial_beta = initial_beta
self.strata = strata
def fit(self, X, y, **fit_params):
X_ = X.copy()
X_[self.duration_column]=y[self.duration_column]
if self.event_col is not None:
X_[self.event_col] = y[self.event_col]
est = CoxPHFitter(alpha=self.alpha, tie_method=self.tie_method, penalizer=self.penalizer)
est.fit(X_, duration_col=self.duration_column, event_col=self.event_col, initial_beta=self.initial_beta, strata=self.strata, **fit_params)
self.estimator = est
return self
def predict(self, X):
return self.estimator.predict_expectation(X)[0].values[0]
class AalenAdditiveFitterModel(BaseEstimator):
def __init__(self, duration_column=None, event_col=None, timeline=None, id_col=None, fit_intercept=True, alpha=0.95, coef_penalizer=0.5, smoothing_penalizer=0.0,**kwargs):
self.fit_intercept=fit_intercept
self.alpha=alpha
self.coef_penalizer=coef_penalizer
self.smoothing_penalizer=smoothing_penalizer
self.duration_column = duration_column
self.event_col = event_col
self.timeline = timeline
self.id_col = id_col
def fit(self, X, y, **fit_params):
X_ = X.copy()
X_[self.duration_column]=y[self.duration_column]
if self.event_col is not None:
X_[self.event_col] = y[self.event_col]
est = AalenAdditiveFitter(fit_intercept=self.fit_intercept, alpha=self.alpha, coef_penalizer=self.coef_penalizer,
smoothing_penalizer=self.smoothing_penalizer)
est.fit(X_, duration_col=self.duration_column, event_col=self.event_col, timeline=self.timeline, id_col = self.id_col, **fit_params)
self.estimator = est
return self
def predict(self, X):
return self.estimator.predict_expectation(X)[0].values[0]
| true |
497cd22e80c0bbe0daaa973a7b9a1625b8656014 | Python | jglee087/AI-ImageCourse | /Keras/PracticeKeras/keras16_lstm1.py | UTF-8 | 2,337 | 3.21875 | 3 | [] | no_license | import numpy as np
from numpy import array
from keras.models import Sequential
from keras.layers import Dense, LSTM, Reshape
#1. 데이터
x=array( [ [1,2,3], [2,3,4], [3,4,5], [4,5,6], [5,6,7], [6,7,8], [7,8,9], [8,9,10], \
[9,10,11], [10,11,12], [20,30,40], [30,40,50], [40,50,60] ])
y=array( [4,5,6,7,8,9,10,11,12,13,50,60,70] )
# print(x.shape)
# print(y.shape)
x = x.reshape(x.shape[0], x.shape[1], 1)
#2. 모델 구성
model=Sequential()
## 1
# model.add(LSTM(10, activation='elu', input_shape=(3,1), return_sequences = True))
# model.add(LSTM(8, activation='elu', input_shape=(3,1), return_sequences = False))
# model.add(Dense(5, activation='elu'))
# model.add(Dense(1))
## 2
model.add(LSTM(10, activation='relu', input_shape=(3,1), return_sequences = True))
model.add(LSTM(2, activation='elu', return_sequences = True))
model.add(LSTM(3, activation='tanh', return_sequences = True))
model.add(LSTM(5, activation='sigmoid', return_sequences = True))
model.add(LSTM(10, activation='exponential', return_sequences = False))
model.add(Dense(5, activation='elu'))
model.add(Dense(1))
## 3
# model.add(LSTM(10, activation='elu', input_shape=(3,1),return_sequences=True))
# #model.add(Reshape((1,10))) # (None,10) -> (None,10,1)
# model.add(LSTM(15, activation='elu')) #input_shape=(10,1)
# model.add(Dense(5, activation='elu'))
# model.add(Dense(1))
## 4
#model.add(LSTM(12, activation='elu', input_shape=(3,1),return_sequences=True))
# model.add(Dense(16, activation='elu'))
# model.add(LSTM(16, activation='elu',return_sequences=True))
# model.add(Dense(32, activation='elu'))
# model.add(LSTM(20, activation='elu',return_sequences=False))
# model.add(Dense(64, activation='elu'))
# model.add(Dense(1))
model.summary()
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss', patience=50, mode='auto')
#3. 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x, y, epochs=100, batch_size=1, verbose=2, \
callbacks=[early_stopping])
#4. 평가 예측
loss, mae = model.evaluate(x,y,batch_size=1)
print('\nLoss:',loss,',MAE: ',mae)
#5. 값 예측
x_input = array([[6.5,7.5,8.5],[50,60,70],[70,80,90], \
[100,110,120] ]) #
x_input = x_input.reshape(4,3,1) # (1,3,1)
y_pred=model.predict(x_input,batch_size=1)
print(y_pred) | true |
4234de5ca87ed49dddb45c56bc483411fd3ccc14 | Python | johndpope/ECE551-homework | /homework7/code/ex6.py | UTF-8 | 2,498 | 3.0625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import ipdb
def fwd_bwd_filter(gamma, mu, x):
v, y = np.zeros(len(x)), np.zeros(len(x))
v[0] = x[0] # based on your chosen boundary conditions
for t in range(1, len(x)):
v[t] = mu*v[t-1] + gamma*x[t]
y[len(y)-1] = v[-1] # based on your boundary conditions
for t in range(len(x)-1, 0, -1):
y[t-1] = mu*y[t] + gamma*v[t-1]
return y
def interpolate(t, c, phi):
s = np.zeros(len(t))
for n in range(len(c)):
s += c[n] * phi(t-n)
return s
if __name__ == '__main__':
# part b---------------------------------------------------------------------
phi = [
lambda t: 1.0*(np.abs(t)<0.5), # phi_0
lambda t: (1-np.abs(t)) * (np.abs(t)<1), # phi_1
lambda t: 0.5*(1.5+t)**2 * ((t>=-1.5)*(t<-0.5)) \
+ (0.75-t**2) * ((t>=-0.5)*(t < 0.5)) \
+ 0.5*(1.5-t)**2 * ((t>=0.5)*(t<1.5)), # phi_2
lambda t: 0.5*(1.0+np.cos(np.pi*t)) * (np.abs(t)<1.0), # phi_3
lambda t: np.cos(np.pi*t) * (np.abs(t)<0.5) # phi_4
]
mu = np.sqrt(8/(2*np.sqrt(2)+3))
gamma = 2*np.sqrt(2)-3
filters = [
lambda x: x,
lambda x: x,
lambda x: fwd_bwd_filter(mu, gamma, x),
lambda x: x,
lambda x: x,
]
x = [6,7,5,6,9,2,4,3,6]
N = 500
t = np.linspace(0,10,N)
for k in range(len(phi)):
c = filters[k](x) # compute the coefficients
s = interpolate(t, c, phi[k]) # compute the interpolating functions here
plt.subplot(len(phi), 1, k+1)
plt.plot(t, s)
plt.plot(np.arange(9), x, 'rx')
plt.ylabel('phi'+str(k))
# part c---------------------------------------------------------------------
N = 5 # number of points
plt.figure() # open a figure
plt.axis([0,1,0,1]) # ... and a axis
plt.grid('on')
points = np.array(plt.ginput(N)) # pick N points using mouse input
plt.plot(points.T[0], points.T[1], 'rx') # plot them
plt.close()
t = np.linspace(0,N-1,500)
plt.figure()
for k in range(len(phi)):
c0 = filters[k](points.T[0])
c1 = filters[k](points.T[1])
s0 = interpolate(t, c0, phi[k])
s1 = interpolate(t, c1, phi[k])
plt.plot(s0, s1, label='\phi'+str(k))
plt.plot(points.T[0], points.T[1], 'o')
plt.axis([0,1,0,1]); plt.grid('on'); plt.legend()
plt.show()
| true |
270e8837055c583ef9934804d6f4134f0e95c1e4 | Python | AlexMGitHub/TheWholeEnchilada | /src/bokeh_server/eda/tabs/features_tab.py | UTF-8 | 6,440 | 3.109375 | 3 | [
"MIT"
] | permissive | """Return tab containing mutual information and PCA plots.
Plots:
- mi_plot: A horizontal bar plot of MI scores in descending order.
- pca_plot: A line graph of the cumulative sum of explained variance.
"""
# %% Imports
# Standard system imports
from pathlib import Path
import pickle
# Related third party imports
from bokeh.io import show
from bokeh.layouts import row
from bokeh.models import Panel
from bokeh.palettes import Category10, Category20, Turbo256
from bokeh.plotting import figure
import numpy as np
import pandas as pd
from sklearn.feature_selection import mutual_info_classif, \
mutual_info_regression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# Local application/library specific imports
# %% Define tab
def feature_importance(data, metadata, numeric_cols):
"""Return plots describing importance of data features."""
# -------------------------------------------------------------------------
# Functions
# -------------------------------------------------------------------------
def make_mi_scores(X, y, ml_type):
"""From Ryan Holbrook's feature engineering course on Kaggle."""
disc_feats = X.dtypes == int
if ml_type == 'classification':
mi_scores = mutual_info_classif(X, y, discrete_features=disc_feats)
elif ml_type == 'regression':
mi_scores = mutual_info_regression(X, y,
discrete_features=disc_feats)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=True)
return mi_scores
def make_pca_components(X_scaled):
"""Fit PCA to scaled data and calculate cumulative variance."""
# Scale data
scaler = StandardScaler()
X_scaled_arr = scaler.fit_transform(X)
X_scaled = pd.DataFrame(X_scaled_arr, columns=X.columns)
# Create principal components
pca = PCA()
pca.fit(X_scaled)
# Cumulative variance will begin at 0% for zero components
components = list(range(pca.n_components_ + 1)) # +1 for zeroth comp
evr = pca.explained_variance_ratio_ # Explained variance
cum_var = np.cumsum(np.insert(evr, 0, 0)) # Insert zero variance
return cum_var, components
# -------------------------------------------------------------------------
# Setup
# -------------------------------------------------------------------------
target = metadata['target']
dataset = metadata['dataset']
ml_type = metadata['type']
data_df = pd.DataFrame.from_dict(data)
X = data_df[numeric_cols]
if ml_type == 'regression':
X = X.drop(columns=[target])
y = data_df[target]
# Calculate mutual information
mi_scores = make_mi_scores(X, y, ml_type)
features = list(mi_scores.index)
scores = list(mi_scores.values)
# Calculate PCA components
cum_var, components = make_pca_components(X)
# Define plot colors
if len(features) <= 10:
colors = Category10[len(features)]
elif len(features) <= 20:
colors = Category20[len(features)]
else:
color_idx = np.linspace(0, len(Turbo256), num=len(features),
endpoint=False, dtype=int)
colors = [Turbo256[x] for x in color_idx]
# Set layout constants
MARGIN = 30
PLOT_WIDTH = 600
PLOT_HEIGHT = 600
# -------------------------------------------------------------------------
# Plots
# -------------------------------------------------------------------------
# Define Mutual Information plot
mi_plot = figure(y_range=features, background_fill_color="#DDDDDD",
output_backend="webgl", toolbar_location=None, tools="",
title='Mutual Information Scores for '
f'{dataset} dataset (target={target})',
width=PLOT_HEIGHT, height=PLOT_WIDTH,
margin=(0, MARGIN, 0, 0),
sizing_mode="scale_height")
mi_plot.hbar(y=features, right=scores, height=0.8, color=colors)
# Style MI plot
mi_plot.grid.grid_line_dash = [6, 4]
mi_plot.grid.grid_line_color = "white"
mi_plot.axis.major_label_text_font_size = "1em"
mi_plot.axis.major_label_text_font_style = "bold"
mi_plot.axis.axis_label_text_font_size = "1em"
mi_plot.axis.axis_label_text_font_style = "bold"
mi_plot.title.text_font_size = "1em"
mi_plot.title.text_font_style = "bold"
mi_plot.xaxis.axis_label = "MI Score"
# Define PCA plot
pca_plot = figure(background_fill_color="#DDDDDD",
output_backend="webgl", toolbar_location=None, tools="",
title='PCA Cumulative Explained Variance Percentage for '
f'{dataset} dataset', width=PLOT_WIDTH,
height=PLOT_HEIGHT, sizing_mode="scale_height")
pca_plot.line(x=components, y=cum_var, line_width=2)
pca_plot.circle(x=components, y=cum_var, size=10)
# Style PCA plot
pca_plot.grid.grid_line_dash = [6, 4]
pca_plot.grid.grid_line_color = "white"
pca_plot.axis.major_label_text_font_size = "1em"
pca_plot.axis.major_label_text_font_style = "bold"
pca_plot.axis.axis_label_text_font_size = "1em"
pca_plot.axis.axis_label_text_font_style = "bold"
pca_plot.title.text_font_size = "1em"
pca_plot.title.text_font_style = "bold"
pca_plot.xaxis.axis_label = "Component Number"
# -------------------------------------------------------------------------
# Layout
# -------------------------------------------------------------------------
tab_layout = row(mi_plot, pca_plot, width=2*PLOT_WIDTH+MARGIN)
tab = Panel(child=tab_layout, title='Feature Importance')
return tab, features[-4:] # Four most important features
if __name__ == '__main__':
data_path = Path('src/bokeh_server/data/eda_data')
with open(data_path, 'rb') as data_file:
pickled_data = pickle.load(data_file)
data = pickled_data['data']
metadata = pickled_data['metadata']
dataset = metadata['dataset']
id_col = dataset + '_id'
del data[id_col]
table_cols = list(data.keys())
numeric_cols = [x for x in table_cols if type(data[x][0]) in (float, int)]
tab = feature_importance(data, metadata, numeric_cols)
show(tab.child)
| true |
4d4fe6a80f4e6430c198724f9cb185c6c9f9505b | Python | sykoyoyo/RobotPy-Tutorial | /Part 1: The Basics/robot.py | UTF-8 | 2,718 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env python3 # <---- This runs the code
'''
Start by importing your libraries. Refer to http://robotpy.readthedocs.io/projects/wpilib/en/latest/api.html
for the libraries and their API handles.
Wpilib - Base FRC package
'''
import wpilib
from wpilib import drive
class MyRobot(wpilib.IterativeRobot): #<-- This is the base class for the entire robot
'''
Now we are going to call our functions. These functions are called by the Field Management system
when the match starts. General rule of thumb:
Any function that ends with "INIT" is an initialization function. It is only called once. You should NOT
have your robot move during this time. These are good for calling functions, reseting encoders, etc.
Any fuction that ends with "Periodic" is a loop. These are were you put your controls into so you an move
and do.
'''
def robotInit(self):
'''
This is the FIRST Thing that gets called for your robot. Remember that python works from top to bottom.
So you will need to define everything before you implement it.
'''
self.leftMotor = wpilib.Talon(0) # <-- This is what links our PWM port on the CRIO to a physical ESC.
self.rightMotor = wpilib.Talon(1)
#User Input
self.playerOne = wpilib.XboxController(0)# <-- This is for using Xbox controllers
#Now we need to link our motors to the DifferentialDrive class
self.robotDrive = wpilib.drive.DifferentialDrive(self.leftMotor, self.rightMotor)
def disabledInit(self):
'''
This is called when the robot is disabled. You will very rarely have to use this one.
'''
pass
def autonomousInit(self):
'''
This is executed 1 time right before the autonomousPeriodic runs.
This is good for setting up autonomous only variables.
'''
pass
def autonomousPeriodic(self):
'''
We will go over Autonomous modes in a future part. For now leave this as just pass
'''
pass
def teleopInit(self):
'''
This is called once right before teleopPeriodic is called.
You can use this to effectively change things after autonomous.
'''
pass
def teleopPeriodic(self):
'''
Now we can call the robotDrive class. We are going to set this one up for arcadeDrive.
self.robotDrive.arcadeDrive(Forward/Backwards Axis, Rotation axis)
'''
#Drive
self.robotDrive.arcadeDrive(self.playerOne.getY(0), self.playerOne.getX(0))
if __name__ == "__main__": #This is the end of the code. Don't mess with this part =)
wpilib.run(MyRobot)
| true |
e801ba90e645a4eef0ec5e1b4024367521ceb749 | Python | ErrorInever/ResNetCarDetect | /run.py | UTF-8 | 1,904 | 2.546875 | 3 | [] | no_license | import argparse
import logging
import model
import time
from argparse import RawTextHelpFormatter
def create_parser():
parser = argparse.ArgumentParser(prog='DenseNet', formatter_class=RawTextHelpFormatter)
parser.add_argument("-d", "--dir", type=str, help="Root directory where the training data is stored.")
parser.add_argument("-c", "--cuda", type=bool, help="Enable CUDA kernels (0 or 1) default False (0)", default=False)
parser.add_argument("-e", "--epochs", type=int, help="Number of epochs.", default=1)
parser.add_argument("-l", "--learning_rate", type=float, help="Number of step.", default=0.000025)
parser.add_argument("-b", "--batch_size", type=int, help="Number of batch.", default=8)
parser.add_argument("-w", "--workers", type=int, help="Number of train workers", default=8)
parser.add_argument("-k", "--key", type=str, help="Api key of losswise", default='NA')
args = parser.parse_args()
parser.print_help()
time.sleep(0.5)
params = {
"data_dir": args.dir,
"cuda": args.cuda,
"epochs": args.epochs,
"lr": args.learning_rate,
'bs': args.batch_size,
"workers": args.workers,
"key": args.key
}
if params["data_dir"] is None:
raise TypeError('there is no data directory')
return params
if __name__ == '__main__':
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler("densenet.log", mode='w')
# create formatter and set it
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
logger.info(' Program started')
params = create_parser()
logger.info(' Arguments of cmd:{}'.format(params))
model.DenseNet161.train_model(params)
| true |
4980201e65d2918c1e0c07e337736fd06ce76f88 | Python | MrSwayne/Reinforcement-Learning-for-Connect4 | /Algorithms/MCTS_UCT.py | UTF-8 | 2,758 | 2.53125 | 3 | [] | no_license | from Algorithms.MCTS import *
class MCTS_UCT(MCTS):
def __init__(self,memory = None, duration=None, depth=None, n=2000, e=1.414, g=0.9, l=1, a=0.005, debug=False):
super().__init__(memory, duration, depth, n, e, g, l, a, debug)
self.MAX_REWARD = 1
self.MIN_REWARD = -1
def get_name(self):
return "MCTS_UCT"
def reward(self, node, state):
if not state.game_over:
return 0
check_win = state.winner
if(int(check_win) == int(node.player)):
return self.MIN_REWARD
elif(int(check_win) < 0):
return self.MIN_REWARD
elif(int(check_win) == 0):
return (self.MAX_REWARD + self.MIN_REWARD) / 2
else:
return self.MAX_REWARD
def select_node(self):
node = super().select_node()
return node
def child_policy(self, node):
highest_val = float("-inf")
best_children = []
if self.max_explore:
min_visits = float("inf")
for child in node.children:
if child.visit_count < min_visits:
best_children = []
min_visits = child.visit_count
if child.visit_count <= min_visits:
best_children.append(child)
else:
for child in node.children:
score = child.score / child.visit_count
if score > highest_val:
best_children = []
highest_val = score
if score >= highest_val:
best_children.append(child)
return random.choice(best_children)
def tree_value(self, node):
if node.visit_count == 0:
return float("inf")
else:
return (node.score / node.visit_count) + (self.e + math.sqrt(2*math.log(node.parent.visit_count) / node.visit_count))
def tree_policy(self, node):
max_score = float('-inf')
best_children = []
for child in node.children:
score = self.tree_value(child)
if score > max_score:
best_children = []
max_score = score
if score >= max_score:
best_children.append(child)
return random.choice(best_children)
def backpropagate(self, node, reward, num_steps):
while node is not None:
if reward >= 1:
node.score += 1
elif reward == 0:
node.score += 0.5
else:
node.score += 0
reward *= -1
node.visit_count += 1
node = node.parent
| true |
f17d7b6373eaf3e33112cd313c687d3f898c1f28 | Python | jlcanela/python-db-sample | /src/sql-alchemy-sample.py | UTF-8 | 2,226 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Application(Base):
__tablename__ = 'application'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class Log(Base):
__tablename__ = 'log'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
message = Column(String(250))
application_id = Column(Integer, ForeignKey('application.id'))
application = relationship(Application)
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('mysql+mysqlconnector://root:MYSQL_ROOT_PASSWORD@localhost/logging')
# user="root",passwd="MYSQL_ROOT_PASSWORD", auth_plugin="mysql_native_password")
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_app = Application(name='sample-app')
session.add(new_app)
session.commit()
current_app = session.query(Application).first()
print("application name:", current_app.name)
# Insert an Address in the address table
new_log= Log(message='a log', application=new_app)
session.add(new_log)
session.commit()
logs = session.query(Log).all()
for log in logs:
print(log.application.name, ":", log.message)
| true |
c053dcdb5aa108bd3d30768d84803d926b53dfcf | Python | JaydipModhwadia/PythonCode | /Names In Class.py | UTF-8 | 315 | 4.09375 | 4 | [] | no_license | # Names in the class
my_list= ["John", "Derek", "Samantha", "Yvonne"]
print(my_list)
print(my_list[0])
print(my_list[1])
print(my_list[2])
print(my_list[3])
my_list.append("Willy")
print(my_list)
my_list.sort()
print(my_list)
my_list.append("WillyNumba2")
my_list.append("WillyNumba3")
print(my_list)
| true |
4898493667c49381b569df8cdc97b746cee93d12 | Python | Mozzie395236/Packages | /Moz_tools.py | UTF-8 | 10,713 | 2.6875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import itertools
from sklearn.decomposition import PCA
import math
from scipy.stats import skew
from sklearn.preprocessing import StandardScaler
from scipy.special import boxcox1p
from fancyimpute import KNN
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
class Plots():
def __init__(self, data=None):
if data is not None:
self.df = data
self.shape = data.shape
self.cat = data.dtypes[data.dtypes == object].index
self.num = data.dtypes[data.dtypes != object].index
self.y = data.columns[-1]
def distribution(self, df=None, cat=0, num=0):
if df is None:
df = self.df
if cat == 0:
cat = self.cat
if num == 0:
num = self.num
for i in range(len(cat)):
sns.countplot(x=df[cat].columns[i], data=df)
plt.show()
for i in df[num].columns:
df[i].plot(kind='density', title=i)
plt.show()
def unique_ratio(self, df=None, threshold=0):
if df is None:
df = self.df
plt.figure(figsize=(20, 10))
df = df.apply(lambda x: x.unique().shape[0], axis=0) / df.shape[0]
df[df > threshold].plot(kind='bar', title='unique ratio')
def na_ratio(self, df=None, cols=None, rot=45, threshold=0):
if df is None:
df = self.df
if cols is None:
cols = df.columns
tmp0 = df.isna().sum() / df.shape[0]
tmp1 = tmp0[tmp0 > threshold]
df[cols][tmp1.index].isnull().sum().plot(kind='bar', rot=rot, title='number of missing values')
def correlations_to_y(self, df=None, y=0, num=0, cat=0, threshold=0.6):
if df is None:
df = self.df
if y == 0:
y = self.y
if num == 0:
num = self.num
if cat == 0:
cat = self.cat
tmp_ = []
for i in num:
tmp_ += [df[y].corr(df[i])]
cor = pd.Series(tmp_, index=num)
cor[abs(cor) > threshold].plot(kind="barh")
plt.show()
for i in cat:
data = pd.concat([df[y], df[i]], axis=1)
sns.boxplot(x=i, y=y, data=data)
plt.show()
def correlation_scatter(self, df=None, columns=None):
if df is None:
df = self.df
if columns is None:
columns = self.num
sns.set()
sns.pairplot(df[columns], size=2.5)
plt.show()
def correlation_heat_map(self, df=None, threshold=0, method='pearson', show=True):
if df is None:
df = self.df
corr = df.corr(method=method)
c = corr[abs(corr) > threshold]
c = c[c != 1]
c.dropna(how='all', axis=1, inplace=True)
c.dropna(how='all', inplace=True)
mask = np.zeros_like(c, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
fig, ax = plt.subplots(figsize=(10, 10))
colormap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(c, mask=mask, cmap=colormap, annot=show, fmt=".2f")
plt.xticks(range(len(c.columns)), c.columns)
plt.yticks(range(len(c.columns)), c.columns)
plt.show()
@staticmethod
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
print('Confusion matrix, without normalization')
print(cm)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class DataCleaner():
def __init__(self, df=None):
if df is not None:
self.df = df
self.cat = self.df.dtypes[self.df.dtypes == object].index
self.num = self.df.dtypes[self.df.dtypes != object].index
def fill_na(self, df=None, fill_zero=None, fill_mode=None, fill_knn=None, k=None, fill_value=None, value='None'):
if df is None:
df = self.df
if fill_zero is not None:
for i in fill_zero:
df[[i]] = df[[i]].fillna(value=0)
if fill_mode is not None:
for i in fill_mode:
df[[i]] = df[[i]].fillna(value=df[i].mode()[0])
if fill_knn is not None:
for i in fill_knn:
df[[i]] = KNN(k=k).fit_transform(df[[i]])
if fill_value is not None:
for i in fill_value:
df[[i]] = df[[i]].fillna(value=value)
return df
def fill_outliers(self, df=None, cols=None, method='Standardize', replace=True):
if df is None:
df = self.df
if cols is None:
cols = self.num
if method == 'Standardize':
for col in cols:
scaler_ = StandardScaler()
scaler_.fit(df[[col]])
tmp_ = scaler_.transform(df[[col]])
maxv = tmp_.mean() + 3 * tmp_.std()
minv = tmp_.mean() - 3 * tmp_.std()
if replace:
for i in range(len(tmp_)):
if tmp_[i] < minv:
tmp_[i] = minv
if tmp_[i] > maxv:
tmp_[i] = maxv
else:
tmp_ = tmp_[tmp_ > minv][tmp_ < maxv]
df[col] = scaler_.inverse_transform(tmp_)
elif method == 'IQR':
for col in cols:
tmp_ = df[col]
IQR = df[col].quantile(0.75) - df[col].quantile(0.25)
m = df[col].mean()
maxv = m+3*IQR
minv = m-3*IQR
if replace:
for i in range(len(tmp_)):
if tmp_[i] < minv:
tmp_[i] = minv
if tmp_[i] > maxv:
tmp_[i] = maxv
else:
df[col] = tmp_[tmp_ > minv][tmp_ < maxv]
return df
def skewness(self, df=None, num=None, method='box-cox', lamd=0.16, threshold=0.75):
if df is None:
df = self.df
if num is None:
num = self.num
skewed_feats = df[num].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > threshold]
skewed_feats = skewed_feats.index
if method == 'log1p':
df[skewed_feats] = np.log1p(df[skewed_feats])
if method == 'box-cox':
df[skewed_feats] = boxcox1p(df[skewed_feats], lamd)
return df
class FeatureEngineering():
def __init__(self, df=None):
if df is not None:
self.df = df
def dimension_reduction_num(self, cols, new_name, df=None, drop=True, n=1):
if df is None:
df = self.df
pca = PCA(n_components=n)
new_col = pca.fit_transform(df[cols])
if drop:
df.drop(columns=cols, inplace=True)
df[new_name] = new_col
return df
def dimension_reduction_cat(self, cols, new_name, df=None, drop=True, factorize=True):
if df is None:
df = self.df
if factorize:
for i in cols:
df[i] = pd.factorize(df[i])[0]
tmp_ = 1
for i in cols:
tmp_ *= df[i]
df[new_name] = tmp_
if drop:
df.drop(columns=cols, inplace=True)
return df
def extra_pow(self, cols, pow=3, df=None):
if df is None:
df = self.df
for p in range(2, pow+1):
for col in cols:
newcol_ = col + str(p)
tmp_ = []
for i in range(len(df[col])):
tmp_.append(math.pow(df[col][i], p))
df[newcol_] = tmp_
return df
def rmse_cv(model, train_x, train_y, cv=10):
rmse = np.sqrt(-cross_val_score(model, train_x, train_y, scoring="neg_mean_squared_error", cv=cv))
return rmse
class FeatureSelection():
def __init__(self, train_x=None, train_y=None, test_x=None):
if train_x is not None:
self.train_x = train_x
if train_y is not None:
self.train_y = train_y
if test_x is not None:
self.test_x = test_x
def lassocv(self, alphas=[1, 0.1, 0.001, 0.0005], train_x=None, train_y=None, cv=10):
if train_x is None:
train_x = self.train_x
if train_y is None:
train_y = self.train_y
model_lasso = LassoCV(alphas=alphas, cv=cv).fit(train_x, train_y)
coef = pd.Series(model_lasso.coef_, index=train_x.columns)
print('rmse score:', rmse_cv(model_lasso, train_x=train_x, train_y=train_y, cv=cv).mean())
return coef
def ridgecv(self, alphas=[0.05, 0.1, 0.3, 1, 3, 5, 10], train_x=None, train_y=None, cv=10):
if train_x is None:
train_x = self.train_x
if train_y is None:
train_y = self.train_y
cv_ridge = [rmse_cv(Ridge(alpha=alpha), train_x, train_y, cv=cv).mean()
for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
cv_ridge.plot(title="Validation - Ridge")
plt.xlabel("alpha")
plt.ylabel("rmse")
plt.show()
print('rmse score:', cv_ridge.min())
r = Ridge(cv_ridge.idxmin())
r.fit(train_x, train_y)
coef = pd.Series(r.coef_, index=train_x.columns)
return coef
if __name__ == '__main__':
df = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), columns=['a', 'b', 'c', 'd', 'e'])
tmp = Plots(df)
| true |
9d893e3f2ca10a27a1c3a7e9d76615842b3e4606 | Python | diabolical-ninja/Finsights | /margin_call_analysis/helpers.py | UTF-8 | 7,367 | 3.3125 | 3 | [
"MIT"
] | permissive | """
Title: Margin Loan LVR Analysis Helper Functions
Desc: A collection of helper functions used throughout the analysis
"""
import numpy as np
import pandas as pd
from string import digits
import math
from datetime import datetime
from yahoo_finance import get_yahoo_history
from alpha_vantage.timeseries import TimeSeries
def remove_numbers(string: str) -> str:
"""Strips numbers from a string
Eg, a1b2c3 -> abc
Args:
string (str): String to clean
Returns:
str: Provided string without numbers
"""
from string import digits
return string.translate(str.maketrans("", "", digits))
def get_historical_data(key, time_slice, symbol):
"""Wrapper function to source historical EOD stock data
Args:
key (str): alphavantage api key
time_slice (str): Aggregate level to fetch. Options are:
- daily
- weekly
- monthly
symbol (str): Symbol used, including the exchange
Returns:
DataFrame: EOD dataframe
"""
# Instantiate Session
ts = TimeSeries(key=key, output_format="pandas", indexing_type="integer")
# Retrieve Data
if time_slice == "daily":
df, metadata = ts.get_daily(symbol, outputsize="full")
elif time_slice == "weekly":
df, metadata = ts.get_weekly(symbol)
elif time_slice == "monthly":
df, metadata = ts.get_monthly(symbol)
# Replace 0's with NA's because they're almost certainly false
df.replace(0, np.nan, inplace=True)
# Fix crappy column header that contain numbers & periods
df.columns = [remove_numbers(x).replace(". ", "") for x in df.columns.tolist()]
# Fix Date type
df["date"] = pd.to_datetime(df["date"])
return df
def calc_lvr(df, initial_investment, initial_lvr):
"""For a given investment, calculates LVR at each time unit based on price fluctuations
Uses close price for the calcs
Args:
df (data.frame): EOD dataframe
initial_investment (int): Sum of personal contribution + loan
initial_lvr (float): Decimal percentage of loan value ratio (lvr)
Returns:
data.frame: Timeseries data frame with price, investment value & LVR
"""
# Determine Amount borrowed
borrowed_investment = initial_investment / (1 - initial_lvr) * initial_lvr
total_investment = initial_investment + borrowed_investment
# Initial number of shares
# start_price = df[df['date']==df['date'].min()]['close'][0]
start_price = df[df.close > 0]["close"].iloc[0]
initial_holdings = math.floor(total_investment / start_price)
# Historical LVR's
df_lvr = pd.DataFrame(
{
"date": df.date,
"price": df.close,
"value": df.close * initial_holdings,
"lvr": borrowed_investment / (df.close * initial_holdings),
}
)
return df_lvr
def calc_drawdown(df, price_col, window_size):
"""Calculates drawdown
Reference: https://www.investopedia.com/terms/d/drawdown.asp
Args:
df (data.frame): Price history dataframe
price_col (str): Column name for price data
window_size (int): How many units to look back. Units change based on aggregate provided
Returns:
data.frame: Original dataframe with drawdown appended
"""
# Calculate rolling prior maximum to compare against current price
prior_max = df[price_col].rolling(window=window_size, min_periods=1).max()
# Calculate percentage change, aka drawdown
df["market_drawdown"] = (df[price_col] / prior_max - 1.0) * 100
df["market_drawdown"].replace(-100, 0, inplace=True)
return df
def calc_margin_call_drop(current_lvr, base_lvr, buffer=0.0):
"""Calculates the % drop required to trigger a margin call
Args:
current_lvr (float): LVR on the loan
base_lvr (float): Max LVR allowed
buffer (float, optional): Defaults to 0.0. LVR buffer that triggers a drawdown
Returns:
float: Decimal representation of % drop required to trigger a margin call
"""
return (1 - current_lvr / (base_lvr + buffer)) * 100
def calc_max_safe_lvr(max_drawdown, base_lvr, buffer=0.0):
"""Calculates maximum safe LVR to have historically avoided a margin call
Args:
max_drawdown (float): Maximum historically observed drawdown
base_lvr (float): Max LVR allowed
buffer (float, optional): Defaults to 0.0. LVR buffer that triggers a drawdown
Returns:
float: Decimal representation of max historically safe LVR
"""
max_drawdown = max_drawdown if max_drawdown < 0 else -1 * max_drawdown
return (100 + max_drawdown) * (base_lvr + buffer)
def create_margin_call_range_table(max_lvr, buffer=0.1, step_size=0.01):
"""Creates a lookup table of % drop required to trigger a margin call for the various cLVR's provided
Args:
max_lvr (float): Decimal for upper bound on allowed LVR
buffer (float, optional): Defaults to 0.1. LVR buffer prior to margin call
step_size (float, optional): Defaults to 0.01. How granular to make the intervals
Returns:
data.frame: Table containing % drop required to trigger a margin call at each LVR
"""
lvr_range = np.arange(0, max_lvr + buffer + step_size, step_size)
df = pd.DataFrame(
{
"lvr": lvr_range,
"mc_trigger": [
calc_margin_call_drop(x, max_lvr, buffer) for x in lvr_range
],
}
)
return df
def margin_call_samples(symbol, time_slice, drawdown_window, lvr_lookup):
"""Helper function to wrap all steps
Args:
symbol (str): Ticker code, noting to append exchange if required
time_slice (str): daily, weekly or monthly
drawdown_window (int): Number of periods to lookup. Note the units change based on the timeslice above
lvr_lookup (data.frame): Lookup table with LVRs & their corresponding margin call trigger
Returns:
data.frame: Historical EOD data
data.frame: Lookup table with margin call frequency appended
float: Max safe LVR that would have historically avoided a margin call
"""
# Get historical price data
df_eod = get_yahoo_history(
symbol=symbol,
start_date="2000-01-01",
end_date=datetime.now().strftime("%Y-%m-%d"),
frequency=time_slice,
)
df_eod["symbol"] = symbol
# Calculate Drawdown
df_eod = calc_drawdown(df_eod, price_col="Close", window_size=drawdown_window)
# Count margin calls for each LVR
mc_counts = list()
for row, col in lvr_lookup.iterrows():
# Identify drawdowns that would have caused a margin call
margin_calls = df_eod["market_drawdown"].apply(
lambda x: 1 if abs(x) > col["mc_trigger"] and x < 1 else 0
)
# Count instances
counts = margin_calls.value_counts()
mc_count = counts[1] if (len(counts) > 1) else 0
mc_counts.append(mc_count)
lvr_lookup["{}_mc_count".format(symbol.replace(".", "_"))] = mc_counts
# Calculate historically safe max LVR (note buffer absorbed into max lvr)
max_historical_safe_lvr = calc_max_safe_lvr(
df_eod.market_drawdown.min(), lvr_lookup.lvr.max()
)
return df_eod, lvr_lookup, max_historical_safe_lvr
| true |
a84392b46faafb3ec3c754454fe717a1af501192 | Python | allensarmiento/Secure-Chat-System | /server/tests/Scratch/test_AsymmetricEncryption.py | UTF-8 | 913 | 2.9375 | 3 | [] | no_license | from unittest import TestCase
from Crypto.PublicKey.RSA import RsaKey
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import base64
import secrets
class TestAsymmetricEncryption(TestCase):
"""test class to demo asym encryption and decryption"""
def setUp(self) -> None:
key = RSA.generate(2048)
self.private_key: RsaKey = RSA.import_key(key.export_key("PEM"))
self.public_key: RsaKey = RSA.import_key(key.publickey().export_key("PEM"))
def test_demo1(self):
msg = b'This is a test message'
cipher = PKCS1_OAEP.new(self.public_key)
ciphertext = cipher.encrypt(msg)
send_cipher_text = base64.b64encode(ciphertext)
recieve_cipher_bin = base64.b64decode(send_cipher_text)
cipher = PKCS1_OAEP.new(self.private_key)
message = cipher.decrypt(recieve_cipher_bin)
self.assertEqual(msg, message)
| true |
c9c9adb764a0219c639421ceb04f3451fcc99307 | Python | i-ang/Dimsum | /dimsum.py | UTF-8 | 1,509 | 3.71875 | 4 | [] | no_license | import numpy as np
# This program calculates the Prices of your Dimsum
# based on the total price of the bill and
# how many dishes you have had!
#
# At the moment, it only takes 2 sizes.
# The next version will take 3 sizes.
def increment(price):
price = price + 1
return price
def dec(price):
price = price - 1
return price
def price_divide(price, number):
ans = price / number
return ans
#def check_valid(s_price, m_price, l_price):
# if (s_price <= m_price) and (m_price <= l_price):
# return 1
# else:
# return 0
sm_price = 0.0
me_price = 0.0
run = 1
s_num = float(input('how many small did u get?'))
print(s_num)
m_num = float(input('how many medium'))
print(m_num)
#l_num = int(input('how many large'))
#print(l_num)
total = float(input('how much was it?'))
#increment small price, until total price
#increment med price, decrement small. med=total-small
#keep going until small is lesser med
while (sm_price < total):
sm_price = total
print('$',sm_price)
while (run == 1):
sm_price = dec(sm_price) #decrement small price.
sm_1 = price_divide(sm_price,s_num) #find individual price
me_price = total - sm_price #make sure these are floats!!
med_1 = price_divide(me_price,m_num)
if(np.floor(sm_1) >= np.floor(med_1)):
run = 1
continue
elif(np.floor(sm_1) < np.floor(med_1)):
run = 0
break
print(sm_1,med_1)
| true |
1c9cfd482883087ff7a6818c0873687f950947c2 | Python | samadhan563/Python-Programs | /Chapter_07/01_Employee.py | UTF-8 | 1,536 | 3.78125 | 4 | [] | no_license | # Program for if else in python
'''
Author : Samadhan Gaikwad.
Software Developer
Location: Pune.
'''
# ---------------------------------------------------------------------------------------------
# Program
from Employee import Employee
# Single object of employee
# id = int(input())
# firstName = str(input())
# lastName = str(input())
# salary = float(input())
# emp= Employee(id, firstName, lastName, salary)
# print(emp.toString())
# array of object of employee
emp = []
count = int(input("Enter List size : "))
for i in range(1, count+1):
print("Enter details like : First Name, Last name, Salary")
id = int(i)
firstName = str(input())
lastName = str(input())
salary = float(input())
emp.append(Employee(id, firstName, lastName, salary))
# emp[i]=Employee(id, firstName, lastName, salary)
i += 1
for e in emp:
print(e.toString())
# ---------------------------------------------------------------------------------------------
# Output
'''
PS D:\E-DAC Data\Python> & C:/Users/samad/AppData/Local/Programs/Python/Python39/python.exe "d:/E-DAC Data/Python/Chapter_07/01_Employee.py"
Enter List size : 2
Enter details like : First Name, Last name, Salary
aaaaa
aaaaa
22222
Enter details like : First Name, Last name, Salary
sssss
sssss
33333
Emp Id : 1
Emp First Name : aaaaa
Emp Last Name : aaaaa
Emp Salary : 22222.0
Emp Id : 2
Emp First Name : sssss
Emp Last Name : sssss
Emp Salary : 33333.0
'''
| true |
d0cebb811d3a0d7005ab6802622ad3838bf03992 | Python | Sorune/BaekJoon | /Baekjoon8393.py | UTF-8 | 80 | 3.0625 | 3 | [] | no_license | N=int(input())
n=0
for i in range(1,N+1):
n+=i
if i==N:
print(n) | true |
5dd90b33f458516ba37d52282fb97ab76854fc8b | Python | DoubleRogue-LY/Chinese-Standard-Mahjong-AI | /action.py | UTF-8 | 7,715 | 2.59375 | 3 | [] | no_license | import json
from MahjongGB import MahjongFanCalculator
import numpy as np
import random
import function
from discard import DisCard
def Can_Hu(extra_card, data, is_ZIMO):
## give extra_card(draw or others play), judge if can HU and return
# if_Hu(bool), action, data
play_ID, quan, pack, hand, hua = data["info"]
#将list转换成tuple
new_pack = []
for item in pack:
new_pack.append(tuple(item))
new_pack = tuple(new_pack)
try:
new_hand = decode_card(hand)#transfer list[int] -> list[string]
ans=MahjongFanCalculator(new_pack,new_hand,extra_card,hua,is_ZIMO,False,False,False,play_ID,quan)
fan = 0
for item in ans:
fan+=item[0]
fan = fan-hua
if fan<8:#未到8番
raise Exception
except Exception as err:
#not HU
return False, "", ""
else:
action = "HU"
data = ""
return True, action, data
def Can_Gang(ID,extra_card, data):
## give extra_card(draw or others play), judge if can GANG and return
# if_Gang(bool), action, data
play_ID, _, _, hand, _ = data["info"]
extra_card = str_to_num(extra_card)
count = 0
for card in hand:
if card==extra_card:
count+=1
if count==3:##there are 3 same cards in hand and they are equal to extra_card, so we can gang
GANG = ["GANG",num_to_str(extra_card),(play_ID-ID+4)%4]
data["info"][2].append(GANG)##add gang to pack
##delete gang card in hand
while(extra_card in hand):
hand.remove(extra_card)
data["info"][3]=hand
return True, "GANG", data
else:
return False, None, None
def Can_BuGang(ID,extra_card, data):
## give extra_card(draw or others play), judge if can BUGANG and return
# if_BuGang(bool), action, data
pack = data["info"][2]
for i, item in enumerate(pack):
if item[0]=="PENG" and extra_card==item[1]:#find a peng and its peng card is equal to extra_card,so bugang
data["info"][2][i][0]="GANG"
return True, "BUGANG "+extra_card, data
return False,None,None
def Can_Peng(ID,extra_card, data):
## give extra_card(draw or others play), judge if can PENG and return
# if_Peng(bool), action, data
play_ID, _, _, hand, _ = data["info"]
extra_card = str_to_num(extra_card)
count = 0
for card in hand:
if card==extra_card:
count+=1
if count==2:##there are 2 same cards in hand and they are equal to extra_card, so we can gang
PENG = ["PENG",num_to_str(extra_card),(play_ID-ID+4)%4]
data["info"][2].append(PENG)##add gang to pack
while(extra_card in hand):
hand.remove(extra_card)
data["info"][3]=hand
discard, data = DisCard(data)
return True, "PENG "+discard, data
else:
return False, None, None
def Can_Chi(extra_card, data):
## give extra_card(draw or others play), judge if can CHI and return
# if_Chi(bool), action, data
extra_card = str_to_num(extra_card)
_, _, _, hand, _ = data["info"]
hand.sort()
if extra_card>=30:
return False, None, None##风、中发白不能chi
CHI = None
##判断是否能chi,顺序确定(可修改)
if extra_card-1 in hand:
if extra_card-2 in hand:
CHI = ["CHI", num_to_str(extra_card-1), 3]
data["info"][2].append(CHI)
data["info"][3].remove(extra_card-1)
data["info"][3].remove(extra_card-2)
discard, data = DisCard(data)
return True, "CHI "+num_to_str(extra_card-1)+" "+discard, data
elif extra_card+1 in hand:
CHI = ["CHI", num_to_str(extra_card), 2]
data["info"][2].append(CHI)
data["info"][3].remove(extra_card-1)
data["info"][3].remove(extra_card+1)
discard, data = DisCard(data)
return True, "CHI "+num_to_str(extra_card)+" "+discard, data
else:
if extra_card+1 in hand and extra_card+2 in hand:
CHI = ["CHI", num_to_str(extra_card+1), 2]
data["info"][2].append(CHI)
data["info"][3].remove(extra_card+2)
data["info"][3].remove(extra_card+1)
discard, data = DisCard(data)
return True, "CHI "+num_to_str(extra_card+1)+" "+discard, data
return False, None, None
def Action(curr_input, input_data):##for the newest request,my action
card = input_data["card"]
play_ID = input_data["info"][0]
curr_input = curr_input.split(" ")
requests_ID = int(curr_input[0])
if requests_ID==2:#if I draw a card
other_ID = play_ID
get_card = curr_input[1]
card[str_to_num(get_card)]+=1#已知牌池增加
if_HU, action, data = Can_Hu(get_card, input_data, is_ZIMO=True)#whether i can hu?
if if_HU:
return action, data
if_Gang, action, data = Can_Gang(other_ID,get_card, input_data)#whether i can 暗gang
if if_Gang:
return action+" "+get_card, data
if_BuGang, action, data = Can_BuGang(other_ID,get_card, input_data)#whether i can bugang
if if_BuGang:
return action, data
discard, data = DisCard(input_data,get_card)#discard
action = "PLAY "+discard
return action, data
other_ID = int(curr_input[1])
#other requests
if requests_ID==3 and other_ID!=play_ID:
other_action = curr_input[2]
if other_action=="BUHUA" or other_action=="DRAW":#other buhua or draw
action = "PASS"
data = input_data
return action, data
if (other_action=="PLAY" and other_ID!=play_ID) or other_action=="PENG" or other_action=="CHI":
if other_action=="CHI":#other chi
played_card = curr_input[4]
CHI_card = curr_input[3]
CHI_card = str_to_num(CHI_card)
##更新已知牌池
card[CHI_card-1]+=1
card[CHI_card]+=1
card[CHI_card+1]+=1
else:
played_card = curr_input[3]
if other_action=="PENG":#other peng
PENG_card = input_data["pre_card"]
card[str_to_num(PENG_card)]+=3
#other play a card(包括chi,peng后打的牌)
input_data["pre_card"] = played_card
if_HU, action, data = Can_Hu(played_card, input_data, is_ZIMO=False)
if if_HU:
return action, data
if_Peng, action, data = Can_Peng(other_ID,played_card, input_data)
if if_Peng:
return action, data
if_Gang, action, data = Can_Gang(other_ID,played_card, input_data)
if if_Gang:
return action, data
if play_ID==(other_ID+1)%4:
if_Chi, action, data = Can_Chi(played_card, input_data)
if if_Chi:
return action, data
if other_action=="GANG":#other gang
if input_data["pre_require"]!="DRAW":
GANG_card = input_data["pre_card"]
card[str_to_num(GANG_card)]=4
if other_action=="BUGANG":#other bugang
BUGANG_card = curr_input[3]
card[str_to_num(BUGANG_card)]=4
input_data["pre_require"] = other_action
action="PASS"
data = input_data
return action, data
return "PASS", input_data | true |
574d7351b2e95e08ab16bf3de566116f8f4b899c | Python | LeeBumSeok/team5 | /week6/20171665_assignment6.py | UTF-8 | 6,254 | 2.890625 | 3 | [] | no_license | import pickle
import sys
from PyQt5.QtWidgets import (QWidget, QPushButton,
QBoxLayout,QHBoxLayout, QVBoxLayout, QApplication, QLabel,
QComboBox, QTextEdit, QLineEdit)
from PyQt5.QtCore import Qt
class ScoreDB(QWidget):
def __init__(self):
super().__init__()
self.initUI()
self.dbfilename = 'assignment6.dat'
self.scoredb = []
self.readScoreDB()
self.showScoreDB()
self.msg=''
def initUI(self):
self.setGeometry(300, 300, 500, 250)
self.setWindowTitle('Assignment6')
name = QLabel('Name:')
age = QLabel('Age:')
score = QLabel('Score:')
amount = QLabel('Amount:')
key = QLabel('Key:')
self.nameedit = QLineEdit()
self.ageedit = QLineEdit()
self.scoreedit = QLineEdit()
self.amountedit = QLineEdit()
self.keycombo = QComboBox(self)
self.keycombo.addItem('Age')
self.keycombo.addItem('Name')
self.keycombo.addItem('Score')
add = QPushButton('Add')
delete = QPushButton('Del')
find = QPushButton('Find')
inc = QPushButton('Inc')
show = QPushButton('Show')
result = QLabel('Result:')
self.resultedit = QTextEdit(self)
#set Layout
hbox = QHBoxLayout()
hbox.addWidget(name)
hbox.addWidget(self.nameedit)
hbox.addWidget(age)
hbox.addWidget(self.ageedit)
hbox.addWidget(score)
hbox.addWidget(self.scoreedit)
hbox2 = QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(amount)
hbox2.addWidget(self.amountedit)
hbox2.addWidget(key)
hbox2.addWidget(self.keycombo)
hbox3 = QHBoxLayout()
hbox3.addStretch(1)
hbox3.addWidget(add)
hbox3.addWidget(delete)
hbox3.addWidget(find)
hbox3.addWidget(inc)
hbox3.addWidget(show)
hbox4 = QHBoxLayout()
hbox4.addWidget(result)
hbox5 = QHBoxLayout()
hbox5.addWidget(self.resultedit)
vbox = QVBoxLayout()
vbox.addStretch(5)
vbox.addLayout(hbox)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
vbox.addLayout(hbox5)
self.setLayout(vbox)
#event connect
add.clicked.connect(self.Add)
delete.clicked.connect(self.Delete)
find.clicked.connect(self.Find)
inc.clicked.connect(self.Inc)
show.clicked.connect(self.Show)
self.setGeometry(300,300,500,300)
self.show()
def closeEvent(self, event):
self.writeScoreDB()
def readScoreDB(self):
try:
fH = open(self.dbfilename, 'rb')
except FileNotFoundError as e:
self.scoredb = []
return
try:
self.scoredb = pickle.load(fH)
return self.scoredb
except:
print("Empty DB: ", self.dbfilename)
else:
print("Open DB: ", self.dbfilename)
fH.close()
# write the data into person db
def writeScoreDB(self):
fH = open(self.dbfilename, 'wb')
#change Type to int
for i in self.scoredb:
i['Age'] = int(i['Age'])
i['Score'] = int(i['Score'])
pickle.dump(self.scoredb, fH)
fH.close()
def showScoreDB(self):
#get values of scoredb
sss = self.readScoreDB()
aaa=''
#show scoredb in TextEdit by using String variable
for i in sorted(sss, key=lambda person: person['Name']):
for j in sorted(i):
if j == 'Age' or j == 'Score':
i[j] = str(i[j])
aaa += (j + '=' + i[j] + '\t')
aaa += '\n'
self.resultedit.setText(aaa)
def Add(self):
# get value from each LineEdit
name = self.nameedit.text()
age = self.ageedit.text()
score = self.scoreedit.text()
#set new Dictionary and append to Scoredb
new = {'Age':age, 'Name':name, 'Score':score}
self.scoredb.append(new)
self.writeScoreDB()
self.showScoreDB()
def Delete(self):
#get value from LineEdit 'Name'
new_scoredb = []
delname = self.nameedit.text()
#Compare with scoredb and append to new_Scoredb
for i in self.scoredb:
if i['Name'] != delname:
new_scoredb.append(i)
#modify scoredb
self.scoredb = new_scoredb
self.writeScoreDB()
self.showScoreDB()
def Find(self):
#get value from LineEdit 'Name'
find_name = self.nameedit.text()
bbb=''
find_scoredb=[]
#set find_List about same Name
for i in self.scoredb:
if i['Name'] == find_name:
find_scoredb.append(i)
#show find_List
for p in find_scoredb:
for z in p:
if z == 'Age' or z == 'Score':
p[z] = str(p[z])
bbb += (z + '=' + p[z] + '\t')
bbb += '\n'
self.resultedit.setText(bbb)
def Inc(self):
#get value from LineEdit 'Name' and 'Amount'
inc_name = self.nameedit.text()
inc_amount = self.amountedit.text()
#Change Type and add amount
for i in self.scoredb:
if i['Name'] == inc_name:
i['Score'] = int(i['Score'])
i['Score'] += int(inc_amount)
#modify Scoredb value and show
self.writeScoreDB()
self.showScoreDB()
def Show(self,text):
text = self.keycombo.currentText() #get value from combobox
strdb = self.readScoreDB()
msg=''
# set Text to TextEditBox from Scoredb
for i in sorted(strdb, key=lambda person: person[text]):
for j in sorted(i):
if j == 'Age' or j == 'Score':
i[j] = str(i[j])
msg += (j + '=' + i[j] + '\t')
msg += '\n'
self.resultedit.setText(msg)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = ScoreDB()
sys.exit(app.exec_())
| true |
05c5a237251370f5ce128c80780b9253a1a1b55f | Python | GreenPonik/GreenPonik_TSL2561 | /GreenPonik_TSL2561/GreenPonik_TSL2561.py | UTF-8 | 3,150 | 2.90625 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
"""
####################################################################
####################################################################
####################### GreenPonik_TSL2561 #########################
####################### Read TSL2561 sensor ########################
#################### with Python3 through i2c ######################
####################################################################
####################################################################
"""
import time
import board
import busio
import adafruit_tsl2561
class GreenPonik_TSL2561:
def read_tsl2561(self):
"""
@brief Read tsl 2561 sensor on raspberry pi i2c bus
Get light spectre data
"""
try:
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the TSL2561 instance, passing in the I2C bus
tsl = adafruit_tsl2561.TSL2561(i2c)
# Print chip info
print("Chip ID = {}".format(tsl.chip_id))
print("Enabled = {}".format(tsl.enabled))
print("Gain = {}".format(tsl.gain))
print("Integration time = {}".format(tsl.integration_time))
print("Configuring TSL2561...")
print("Configuring TSL2561...")
# Enable the light sensor
tsl.enabled = True
time.sleep(1)
# Set gain 0=1x, 1=16x
tsl.gain = 0
# Set integration time (0=13.7ms, 1=101ms, 2=402ms, or 3=manual)
tsl.integration_time = 1
# print("Getting readings...")
print("Getting readings....")
# Get raw (luminosity) readings individually
broadband = tsl.broadband
infrared = tsl.infrared
# Get raw (luminosity) readings using tuple unpacking
# broadband, infrared = tsl.luminosity
# Get computed lux value (tsl.lux can return None or a float)
lux = tsl.lux
# Print results
# print("Enabled = {}".format(tsl.enabled))
print("Enabled = {}".format(tsl.enabled))
# print("Gain = {}".format(tsl.gain))
print("Gain = {}".format(tsl.gain))
# print("Integration time = {}".format(tsl.integration_time))
print("Integration time = {}".format(tsl.integration_time))
# print("Broadband = {}".format(broadband))
print("Broadband = {}".format(broadband))
# print("Infrared = {}".format(infrared))
print("Infrared = {}".format(infrared))
# if lux is not None:
# print("Lux = {}".format(lux))
# else:
# print("Lux value is None. Possible \
# sensor underrange or overrange.")
# Disble the light sensor (to save power)
tsl.enabled = False
print('read light data: ')
print(lux)
print(infrared)
print(broadband)
return lux, infrared, broadband
except BaseException as e:
print('An exception occurred: {}'.format(e))
| true |
89275c490efa262da76ca9c736b26716ad1a3be0 | Python | sunovivid/hiddenlayer | /CodingTestExamples/Basic_Algorithms/Heap/Heap 3.py | UTF-8 | 2,769 | 3 | 3 | [] | no_license | '''from collections import deque
def solution(jobs):
works={}
for job in jobs:
works.setdefault(job[0],[]).append(job[1])
timeline = deque(sorted(works.keys()))
ans=0
while timeline:
t1=timeline.popleft()
start=min(works[t1])
t2=t1+start
ans+=start
if len(works[t1])==1:
waiting=[]
else:
waiting=list(zip([start for _ in range(len(works[t1]-1))], works[t1][1:]))
while timeline and timeline[0] <= t2:
elt=timeline.popleft()
waiting += list(zip([elt for _ in range(len(works[elt]))], works[elt]))
while waiting:
next_tup=min(waiting,key=lambda tup:tup[1])
del waiting[waiting.index(next_tup)]
t2+=next_tup[1]
ans+=t2-next_tup[0]
while timeline and timeline[0] <= t2:
elt=timeline.popleft()
waiting += list(zip([elt for _ in range(len(works[elt]))], works[elt]))
return ans//len(jobs)'''
#8,18 실패
from collections import deque
import heapq
def solution(jobs):
works={}
for job in jobs:
if job[0] in works:
heapq.heappush(works[job[0]],job[1]+job[0]*10**(-4))
else:
works[job[0]]=[job[1]+job[0]*10**(-4)]
timeline = deque(sorted(works.keys()))
ans=0
while timeline:
t1=timeline.popleft()
start=int(heapq.heappop(works[t1]))
t2=t1+start
ans+=start
if works[t1]:
waiting=works[t1]
else:
waiting=[]
while timeline and timeline[0] <= t2:
elt=timeline.popleft()
for worktime in works[elt]:
heapq.heappush(waiting,worktime)
while waiting:
next_tup=heapq.heappop(waiting)
t2+=int(next_tup)
ans+=t2-round((next_tup-int(next_tup))*10**4)
while timeline and timeline[0] <= t2:
elt=timeline.popleft()
for worktime in works[elt]:
heapq.heappush(waiting, worktime)
return ans//len(jobs)
#성공. tuple 의 경우 sorting 이 가능하다고 하니 그걸로 다시 가보자. 또한 heapq 도 tuple 을 지원한다고 한다.
#print(solution([[0, 3], [1, 9], [2, 6]]))
#print(solution([[0,1],[1,2],[500,6]]))
#print(solution([[0, 3], [1, 9], [2, 6], [30, 3]]))
#print(solution([[24, 10], [18, 39], [34, 20], [37, 5], [47, 22], [20, 47], [15, 2], [15, 34], [35, 43], [26, 1]]))
print(solution([[24, 10], [18, 39], [34, 20], [37, 5], [47, 22], [20, 47], [15, 34], [15, 2], [35, 43], [26, 1]]))
#print(solution([[0, 9], [0, 4], [0, 5], [0, 7], [0, 3]])) | true |
13f54208fe32abafd0feb63206102d66a9ca5698 | Python | 981377660LMT/algorithm-study | /22_专题/日程安排-扫描线+差分/732. 我的日程安排表3.py | UTF-8 | 746 | 3.5625 | 4 | [] | no_license | from sortedcontainers import SortedDict
class MyCalendarThree:
def __init__(self):
self.diff = SortedDict()
def book(self, start: int, end: int) -> int:
"""日程安排 [start, end) ,请你在每个日程安排添加后,
返回一个整数 k ,表示所有先前日程安排会产生的最大 k 次预订"""
self.diff[start] = self.diff.get(start, 0) + 1
self.diff[end] = self.diff.get(end, 0) - 1
res, cur = 0, 0
for key in self.diff:
cur += self.diff[key]
res = max(res, cur)
return res
# Your MyCalendarThree object will be instantiated and called as such:
# obj = MyCalendarThree()
# param_1 = obj.book(start,end)
| true |
4560d2cd1ea61e7ca134b0a355fdcbe53d6e994f | Python | rio1004666/ProblemSolving | /얼음채우기3(DFS).py | UTF-8 | 612 | 3.140625 | 3 | [] | no_license | def dfs(y, x):
if y < 0 or x < 0 or y >= n or x >= m: #인덱스에 이미 마이너스가 들어가버린다.
return
if board[y][x] == 0:
board[y][x] = 1
dfs(y + 1, x)
dfs(y - 1, x)
dfs(y, x + 1)
dfs(y, x - 1)
if __name__ == '__main__':
n, m = map(int, input().split())
board = [list(map(int, input().rstrip())) for _ in range(n)]
result = 0
for i in range(n):
for j in range(m):
if board[i][j] == 1:
continue
else:
result += 1
dfs(i, j)
print(result)
| true |
e93fd6149ce72ee9409bafa171552c6e0ee510c8 | Python | eubinecto/examples | /distill/train_kd.py | UTF-8 | 2,194 | 2.78125 | 3 | [] | no_license | import torch
import torch.optim as optim
from KD_Lib.KD import VanillaKD
from torchvision import datasets, transforms
from KD_Lib.models.shallow import Shallow
def main():
# This part is where you define your datasets, dataloaders, models and optimizers
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"mnist_data",
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=32,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"mnist_data",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=32,
shuffle=True,
)
# two shallow models. number of parameters is halved for the student model.
# would be interesting to see how much of the accuracy is compromised in the student model.
teacher_model = Shallow(hidden_size=800)
student_model = Shallow(hidden_size=200)
# instantiate optimizers
teacher_optimizer = optim.SGD(teacher_model.parameters(), 0.01)
student_optimizer = optim.SGD(student_model.parameters(), 0.01)
# Now, this is where KD_Lib comes into the picture
distiller = VanillaKD(teacher_model=teacher_model,
student_model=student_model,
train_loader=train_loader,
val_loader=test_loader,
optimizer_teacher=teacher_optimizer,
optimizer_student=student_optimizer)
# here are the code for distillation.
distiller.train_teacher(epochs=5, plot_losses=True, save_model=True) # Train the teacher network
distiller.train_student(epochs=5, plot_losses=True, save_model=True) # Train the student network
distiller.evaluate(teacher=False) # Evaluate the student network
distiller.get_parameters() # to compare the number of parameters.
if __name__ == '__main__':
main()
| true |
76506466b755e7d2b78408c0583883c9a9c3fc70 | Python | GINK03/atcoder-solvers | /hitachi2020_b.py | UTF-8 | 240 | 2.71875 | 3 | [] | no_license | A,B,M=map(int,input().split())
ax=list(map(int,input().split()))
bx=list(map(int,input().split()))
ans = min(ax) + min(bx)
for _ in range(M):
x,y,c=map(int,input().split())
x-=1;y-=1;
ans = min(ans, ax[x]+bx[y]-c)
print(ans)
| true |
3b236b6c8a36248bb5d57f269fe8ae115f9c93c8 | Python | MeowLi233/cpsc5910-su20 | /lab3-shortest-path/venv/liuvacuum.py | UTF-8 | 8,836 | 2.890625 | 3 | [] | no_license | from random import random, Random, uniform, randint
from agentdefs import Environment
ENV_DIRTY = "DIRT"
ENV_CLEAN = "CLEAN"
ENV_WALL = "WALL"
ENV_GOLD = "GOLD"
ACTION_FORWARD = "FORWARD"
ACTION_SUCK = "SUCK"
ACTION_TURN_LEFT = "LEFT"
ACTION_TURN_RIGHT = "RIGHT"
ACTION_NOP = "NOP"
ACTION_SENSE_GOLD = "SENSEGOLD"
ACTION_MINE_GOLD = "MINEGOLD"
ACTION_UNLOAD_GOLD = "UNLOADGOLD"
class Percept:
def __init__(self, attributes):
self.attributes = attributes
class LIUVacuumEnvironment(Environment):
"""
Create a vacuum environment with the given width, height, world-gen element biases and PRF seed
"""
def __init__(self, env_x=5, env_y=5, dirt_bias=0.1, wall_bias=0.0, world_seed=None):
super().__init__()
self.env_x = env_x
self.env_y = env_y
self.dirt_bias = dirt_bias
self.wall_bias = wall_bias
#FIXME: In the UI?
self.gold_bias = 0.01
self.world = None
self.randomize_world(world_seed)
"""
Add thing to environment
"""
def add_thing(self, thing, location=None):
# Facing x=1, y=0 (EAST)
# Note, the facing determines the thing's "active" axis.
# I.e. x=1 implies that the x-axis is "active" for this thing
# This is useful for ACTION_FORWARD
thing.facing = (1, 0)
thing.performance = -1000.0
super().add_thing(thing, location)
"""
Generate a percept for an agent
"""
def true_with_prob(self, p):
return uniform(0, 1) <= p
def isgold(self, p):
return self.world[p[0]][p[1]] == ENV_GOLD
def inbounds(self, pos):
x, y = pos
return x > 0 and x < self.env_x - 1 and y > 0 and y < self.env_y - 1
def adjacent_at_distance(self, agent, n):
r, c = agent.location
if n == 0:
l = [agent.location]
elif n == 1:
l = [(r+1, c), (r-1, c), (r, c+1), (r, c-1)]
elif n == 2:
l = [(r+2, c), (r-2, c), (r, c+2), (r, c-2),
(r+1, c+1), (r+1, c-1),
(r-1, c+1), (r-1, c-1)]
else:
raise(Exception(f"Bad arg {n} to adjacent"))
return [p for p in l if self.inbounds(p)]
def glitter_percept(self, agent):
if any(self.isgold(p) for p in self.adjacent_at_distance(agent, 0)):
p = self.true_with_prob(0.95)
elif any(self.isgold(p) for p in self.adjacent_at_distance(agent, 1)):
p = self.true_with_prob(0.75)
elif any(self.isgold(p) for p in self.adjacent_at_distance(agent, 2)):
p = self.true_with_prob(0.50)
else:
p = self.true_with_prob(0.01)
if p:
print(f"Glitter at {agent.location}")
else:
print(f"No Glitter at {agent.location}")
return p
def percept(self, agent):
return Percept({"home": agent.location[0] == 1 and agent.location[1] == 1,
"dirt": self.world[agent.location[0]][agent.location[1]] == ENV_DIRTY,
"glitter": self.glitter_percept(agent) if agent.last_action == ACTION_SENSE_GOLD else None,
"bump": agent.bump})
"""
Process actions generated by agents in environment
"""
def execute_action(self, agent, action):
agent.bump = False
if action == ACTION_FORWARD:
new_location = (agent.location[0] + agent.facing[0], agent.location[1] + agent.facing[1])
agent.bump = self.world[new_location[0]][new_location[1]] == ENV_WALL
agent.location = agent.location if agent.bump else new_location
elif action == ACTION_SUCK:
self.world[agent.location[0]][agent.location[1]] = ENV_CLEAN
elif action == ACTION_TURN_LEFT:
"""
NORTH -> WEST | ( 0, -1) -> (-1, 0)
EAST -> NORTH | ( 1, 0) -> ( 0, -1)
SOUTH -> EAST | ( 0, 1) -> ( 1, 0)
WEST -> SOUTH | (-1, 0) -> ( 0, 1)
"""
agent.facing = (agent.facing[1], -agent.facing[0] if agent.facing[0] != 0 else agent.facing[0])
elif action == ACTION_TURN_RIGHT:
agent.facing = (-agent.facing[1] if agent.facing[1] != 0 else agent.facing[1], agent.facing[0])
elif action == ACTION_SENSE_GOLD:
pass
elif action == ACTION_MINE_GOLD:
if self.world[agent.location[0]][agent.location[1]] == ENV_GOLD and agent.num_gold < 2:
agent.num_gold += 1
self.world[agent.location[0]][agent.location[1]] = ENV_CLEAN
elif action == ACTION_UNLOAD_GOLD:
if agent.location[0] == 1 and agent.location[1] == 1 and agent.num_gold > 0:
agent.num_gold -= 1
agent.add_gold_reward()
elif action == ACTION_NOP:
pass
else:
raise(Exception(f"Bad action {action}"))
"""
Start position for a given Thing in the environment
"""
def default_location(self, thing):
return 1, 1
"""
Random-generate an environment for the vacuum with an optional seed
"""
def wallify(self, randfunc):
self.world = [
[
ENV_WALL if
x == 0 or
x == self.env_x - 1 or
y == 0 or
y == self.env_y - 1 or
(randfunc() < self.wall_bias and not (x == 1 and y == 1))
else ENV_CLEAN
for y in range(self.env_y)
]
for x in range(self.env_x)
]
def dirtify(self, randfunc):
for x in range(self.env_x-1):
for y in range(self.env_y-1):
if (self.world[x][y] != ENV_WALL):
if randfunc() < self.dirt_bias:
self.world[x][y] = ENV_DIRTY
def quadrant_for(self, r, c):
m = int(self.env_x / 2)
if r < m and c < m:
return 0
elif r < m and c >= m:
return 1
elif r >= m and c < m:
return 2
else:
return 3
def quadrant_positions(self, quadrant):
m = int(self.env_x / 2)
xrange = range(0, m) if quadrant < 2 else range(m, self.env_x)
yrange = range(0,m) if quadrant == 0 or quadrant == 2 else range(m, self.env_y)
return [(x, y) for x in xrange for y in yrange]
def quadrant_sum(self, quadrant, state):
return sum([1 if self.world_pos_state(p) == state else 0 for p in self.quadrant_positions(quadrant)])
def world_pos_state(self, p):
return self.world[p[0]][p[1]]
def goldify(self, randfunc):
quadrant_bias = [randint(1,3), randint(1,3), randint(1,3), randint(1,3)]
for x in range(self.env_x-1):
for y in range(self.env_y-1):
if randfunc() * 1 / quadrant_bias[self.quadrant_for(x,y)] < self.gold_bias:
if self.world[x][y] == ENV_CLEAN:
self.world[x][y] = ENV_GOLD
def randomize_world(self, seed=None):
randfunc = random if seed is None else Random(seed).random
self.wallify(randfunc)
self.dirtify(randfunc)
self.goldify(randfunc)
# CAUTION!
# The position argument is possibly breaking since it's just
# the default place where Things are placed (see defn of default_location)
# The heading argument is DEFINITELY breaking because "East" as a heading
# is defined in the agent world model. But the internal representation of
# East is something like (0,1) which is just too gross.
def prep_agent(self, agent, recon_type):
if recon_type == "Summary":
recon = {'walls': self.env_positions(ENV_WALL),
'gold': [self.quadrant_sum(q, ENV_GOLD) for q in [0,1,2,3]],
'dirt': [self.quadrant_sum(q, ENV_DIRTY) for q in [0,1,2,3]]
}
elif recon_type == "Full":
recon = {'width': self.env_x,
'height': self.env_y,
'position': (1,1),
'heading': 'East',
'walls': self.env_positions(ENV_WALL),
'gold': self.env_positions(ENV_GOLD),
'dirt': self.env_positions(ENV_DIRTY)
}
else:
raise(Exception(f"Bad recon value {recon}"))
agent.prep(recon)
def env_positions(self, env):
return [(r,c) for r in range(self.env_y) for c in range(self.env_x) if self.world[c][r] == env]
| true |
30724aaaa3987a460fd5c77aaf949576312ddfe5 | Python | bettyzry/SubspaceSeparability | /EOSS.py | UTF-8 | 6,335 | 2.640625 | 3 | [] | no_license | import k_nearest
from feature_select import FS_SVM, LARS_lasso
from ODModel import IForest
import numpy as np
import pandas as pd
from evaluation import Mutiple_OD_jaccard, Mutiple_OD_precision
class EOSS:
def __init__(self, X, y, k, a, r=None, subspaces_size=5, ODModel=IForest.IForestOD(), feature_select_Model=LARS_lasso.LARS_lasso()):
"""
:param X: data on all space, DataFrame
:param k: k-nearest
:param a:
:param r:
"""
self.k = k
self.r = r or k # 填写r时为r,否则为k
self.a = a
self.subspaces_size = subspaces_size
self.X = X
self.y = y
self.ODModel = ODModel
self.feature_select_Model = feature_select_Model
return
def evaluation(self, reason_true, reason):
reason_pre = reason['reason'].values
jaccard = Mutiple_OD_jaccard.avg_jaccard(reason_true, reason_pre)
precision = Mutiple_OD_precision.avg_precision(reason_true, reason_pre)
return jaccard, precision
def get_expalinable_subspace(self):
"""
:return: self.reason( 'explainable_subspace': index of outlier, 'reason', 'value')
"""
outliers = np.where(self.y == 1)[0]
reason = pd.DataFrame(outliers, columns=['outlier'])
reason['explainable_subspace'] = ''
reason['value'] = 0.1
for ii, p in enumerate(outliers):
explainable_subspace, score = self.get_single_explainable_subspace(p)
reason['explainable_subspace'][ii] = explainable_subspace
reason['value'][ii] = score
print(reason['value'][ii])
print(reason['value'].values)
return reason
def get_single_explainable_subspace(self, p):
# 通过特征选择确定可能的根因子空间
subspaces = self.feature_select_Model.feature_select(self.X, self.y, self.subspaces_size)
accuracies = np.zeros(len(subspaces))
for ii, subspace in enumerate(subspaces):
sub_X = self.X[subspace] # 该子空间对应的数据
kn = k_nearest.kNN(sub_X, self.k)
# --------------------- 数据点采样,构建二分类的数据集 --------------------- #
Ip = self.get_inlier_index(kn, p) # 获取正常对应的索引
Ip_data_df = kn.X.iloc[Ip, :] # 正常对应的数据
Ip_data_df['label'] = 0
Op_data_df = self.get_outlier(kn, p) # 获取正常对应的数据
Op_data_df['label'] = 1
Tp_data_df = Ip_data_df.append(Op_data_df) # 混合两类数据
p_data_df = kn.X.iloc[[p], :] # p对应的数据
p_data_df['label'] = 1
Tp_data_df = Tp_data_df.append(p_data_df) # 混合两类数据
label = Tp_data_df['label'].values
Tp_data_df = Tp_data_df.drop('label', axis=1)
# --------------------- 异常检测 ---------------------- #
scores = self.ODModel.detect(Tp_data_df, label) # 进行异常检测
accuracies[ii] = scores[-1] # 点p的得分
kn.__del__() # 释放kn
argsort = accuracies.argsort(axis=0) # 根据数值大小,进行索引排序
result = argsort[-1]
explainable_subspace = subspaces[result]
print(accuracies, accuracies[result])
return explainable_subspace, accuracies[result]
def get_inlier_index(self, kn, p):
"""
:param kn: class k_nearest
:param p: the loc of an outlier
:return Ip: the sampled inlier set of p
"""
datasize = len(kn.X)
Rk = kn.get_k_nearest(p)
Q = [-1]*self.r
i = 0
while i < self.r:
d = int(np.random.uniform(0, datasize))
if d in Rk or d in Q:
continue
else:
Q[i] = d
i += 1
Ip = np.concatenate([Rk, Q])
return Ip
def get_outlier(self, kn, p):
"""
:param kn: class k_nearest
:param p: the loc of an outlier
:return Op: the sampled outlier set of p
"""
columnsize = len(kn.X.columns)
distances = kn.get_distances(p)
d = max(distances)
k_distance = distances[distances.argsort(axis=0)[self.k]]
l = self.a * (1 / np.sqrt(d)) * k_distance
mean = kn.X.iloc[p, :].values
conv = np.ones([columnsize, columnsize]) * l # 协方差矩阵
Op = np.random.multivariate_normal(mean=mean, cov=conv, size=self.k+self.r)
Op = pd.DataFrame(Op, columns=kn.X.columns)
return Op
def do_eoss(relative_df):
"""
:param relative_df: ('trace_id', 'device_id', 'cluster_id', 'span_name', cols, 'label')
:return: predict_df('trace_id', 'device_id', 'cluster_id', 'span_name', 'reason', 'value')
"""
X = relative_df.drop(['trace_id', 'device_id', 'cluster_id', 'span_name', 'label'], axis=1)
y = relative_df['label'].values
k = 35
a = 0.35
eoss = EOSS(X, y, k, a, subspaces_size=1)
reason = eoss.get_expalinable_subspace() # self.reason( 'outlier': index of outlier, 'reason')
reason.to_csv('result/reason.csv')
print(reason['value'].values)
predict_df = relative_df[['trace_id', 'device_id', 'cluster_id', 'span_name', 'label']]
predict_df = predict_df[predict_df.label == 1] # fail=1, api=0
predict_df['reason'] = [i[0] for i in reason['explainable_subspace'].values]
predict_df['value'] = reason['value'].values
predict_df = predict_df.drop(['label'], axis=1)
return predict_df
if __name__ == '__main__':
df = pd.read_csv('data/cardio.csv')
X = df.drop(columns=['label'], axis=0)
y = df['label'].values
realtive_df = df.copy()
realtive_df['trace_id'] = [i for i in range(len(realtive_df))]
realtive_df['device_id'] = [i for i in range(len(realtive_df))]
realtive_df['cluster_id'] = [i for i in range(len(realtive_df))]
realtive_df['span_name'] = 'a'
realtive_df.to_csv('result/relative_df.csv')
predict_df = do_eoss(realtive_df)
predict_df.to_csv('result/result.csv') | true |
843c57755ac5e1a86b1d23559175d2e88cb639d1 | Python | rohe/IdPproxy | /test/debug_eppn.py | UTF-8 | 272 | 2.609375 | 3 | [
"BSD-2-Clause"
] | permissive | import urlparse
__author__ = 'rolandh'
def eppn_from_link(link):
# link = 'http://www.facebook.com/johan.lundberg.908'
p = urlparse.urlparse(link)
return "%s@%s" % (p.path[1:], p.netloc)
print eppn_from_link('http://www.facebook.com/johan.lundberg.908') | true |
6cabfd8f8085f97579af7ea4a448d764e985242f | Python | Design-Computing/Design-Computing.github.io | /marking_and_admin/admin/most_wanted.py | UTF-8 | 4,846 | 2.9375 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
"""Make a page of faces with names.
Run this and it will produce an HTML file with links to everyone's mugshots
"""
from __future__ import division
from __future__ import print_function
from importlib import import_module
from StringIO import StringIO
import os
import pandas as pd
import requests
import ruamel.yaml as yaml
LOCAL = os.path.dirname(os.path.realpath(__file__)) # the context of this file
CWD = os.getcwd() # The curent working directory
# print("LOCAL", LOCAL)
# print("CWD", CWD)
rootdir = '../code1161StudentRepos'
def the_head():
""""Return the css.
Just keeps things tidy.
"""
return """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Python Adventurers</title>
<link rel="stylesheet" href="admin/mugshot.css">
<script src="script.js"></script>
</head>
<body>
"""
def wrap_into_body(content):
return the_head() + content + "</body></html>"
def card_template(details):
details["raw"] = "https://raw.githubusercontent.com"
details["gh"] = "https://github.com"
return """
<div class="person">
<img src="{raw}/{gitHubUsername}/{repo_name}/master/mugshot.png">
<h1>{name}</h1>
<dl>
<dt>name:</dt>
<dd>{name}</dd>
<dt>Student Number:</dt>
<dd>{studentNumber}</dd>
<dt>GitHub:</dt>
<dd>
<a href="{gh}/{gitHubUsername}/{repo_name}">{gitHubUsername}</a>
</dd>
<dt>UNSW Email:</dt>
<dd>{unswEmail}</dd>
<dt>realEmail:</dt>
<dd>{realEmailFirstBit}{realEmailOtherBit}</dd>
</div>""".format(**details).replace("^AT^", "@")
def getDFfromCSVURL(url, columnNames=False):
"""Get a csv of values from google docs."""
r = requests.get(url)
data = r.content
if columnNames:
return pd.read_csv(StringIO(data), header=0, names=columnNames)
else:
return pd.read_csv(StringIO(data))
def df_of_students():
"""Get an updated copy of the spreadsheet."""
# pull the forks list
ss_of_details_url = ("https://docs.google.com/spreadsheets/d/"
"1qeOp6PZ48BFLlHaH3ZEil09MBNfQD0gztuCm2cEiyOo/"
"pub?gid=2144767463"
"&single=true&output=csv")
return getDFfromCSVURL(ss_of_details_url, ["paste",
"their_username",
"repo_name",
"check",
"repo_url",
"slack"])
def rip_out_dicts(d):
newD = {}
for key in d.iterkeys():
row = d[key]
if type(row) is dict:
i = row.keys()[0]
newD[key] = row[i]
else:
newD[key] = row
return newD
def graft_fork_onto_aboutMe(forkDetails, about_me_details):
f = forkDetails
a = dict(about_me_details)
# print("XXXXXXXXXX", f, "\n", a)
username = a["gitHubUsername"]
def safe_lower(x):
return str(x).upper().lower()
pertinent_row = f[f["their_username"].apply(safe_lower) ==
safe_lower(username)]
# print("pertinent_row", pertinent_row)
try:
pertinent_row = pertinent_row.to_dict()
a.update(pertinent_row) # update is in place
return rip_out_dicts(a)
except:
pass # print(username, pertinent_row)
def make_guess_who_board():
"""Generate code for each person."""
dirList = os.listdir(rootdir)
student_fork_details = df_of_students()
# Status, name_unsw, gitHubUsername, mediumUsername, OnMedium,
# name, realEmailFirstBit, realEmailOtherBit, GHusername, stackoverflow,
# studentNumber, unswEmail, slack, topic, nice_email, gh_has_fork,
# repo_name
# TODO: update this so that it only looks at the spreadsheet and not the
# dirlist.
body = ""
for student_repo in dirList:
path = os.path.join(rootdir, student_repo, "aboutMe.yml")
details = open(path).read()
details = details.replace("@", "^AT^")
details = details.replace("é", "e")
details = details.replace(":([^ /])", ": $1")
details = yaml.load(details, yaml.RoundTripLoader)
if details["mediumUsername"][0] != "@":
details["mediumUsername"] = "@" + details["mediumUsername"]
details["repo_name"] = student_repo
details = graft_fork_onto_aboutMe(student_fork_details,
details)
# print(details)
try:
body += card_template(details)
except:
pass
return wrap_into_body(body)
target = open("guess_who_poster.html", 'w')
target.write(make_guess_who_board())
target.close()
| true |
d7bed0a12568f761fce6b8f7ea772bb4e9e83eb1 | Python | tskr1681/MastermindGame_rug | /app.py | UTF-8 | 1,502 | 2.71875 | 3 | [] | no_license | from flask import Flask, request, render_template, url_for
from datetime import datetime
import math
from game import Game
from strategy_analyser import StrategyAnalyser
app = Flask(__name__)
@app.route('/')
def homepage():
the_time = datetime.now().strftime("%A, %d %b %Y %l:%M %p")
return f"""
<h1>Hello heroku</h1>
<p>It is currently {the_time}.</p>
<img src="http://loremflickr.com/600/400" />
<p><h1> <a href="{ url_for('game_') }"> Play Game </a></h1></p>
"""
@app.route('/game', methods=['GET', 'POST'])
def game_():
game = Game()
game.play()
color_code={
1: 'yellow',
2: 'rgb(102, 0, 204)',
3: 'red',
4: 'rgb(0,102,0)',
5: 'rgb(255, 204, 255)',
6: 'rgb(0, 0, 255)',
}
code = (game.codemaker.code)
moves = (game.moves)
feedbacks = (game.feedback)
log = game.log.split('\n')
analyser = StrategyAnalyser(5)
analyser.run_simulation()
games = (analyser.number_of_games)
codes = (analyser.codes)
sc1 = (analyser.mathematician_codebreaker_score)
sc2 = (analyser.logician_codebreaker_score)
sc3 = (analyser.random_codebreaker_score)
return render_template('game.html', color=color_code, code=code, moves=moves,\
feedbacks=feedbacks, winner=game.winner, log=log,\
games=games, codes=codes, sc1=sc1, sc2=sc2, sc3=sc3 )
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
| true |
54c24e1ebd30feba76df469ffe09ff5bfb07c925 | Python | aisen-x/git_learn | /第三周/验证二叉搜索树.py | UTF-8 | 390 | 3.078125 | 3 | [] | no_license | class Solution:
def isValidBST(self, root: TreeNode) -> bool:
# 左中右遍历 最后遍历排序为升序
res = []
def helper(root):
if not root:
return
helper(root.left)
res.append(root.val)
helper(root.right)
helper(root)
return res == sorted(res) and len(set(res)) == len(res)
| true |
294af8f3b182f2fc7d0df6e15a2eef07bfb90e2a | Python | surchs/cpac_netmat | /tools/phenoMatcher.py | UTF-8 | 1,200 | 2.96875 | 3 | [] | no_license | '''
Created on Jan 10, 2013
@author: surchs
script to make a new phenotypic file for just the subjects in a subject list
goal is to reduce N of subjects in phenofile for preprocessing
'''
import sys
def Main(phenoIn, subjectIn, phenoOut):
'''
short method to do the task
'''
pIn = open(phenoIn, 'rb')
sIn = open(subjectIn, 'rb')
pOut = open(phenoOut, 'wb')
inFirstLine = pIn.readline()
pInLines = pIn.readlines()
pOutLines = []
pOutLines.append(inFirstLine)
sInLines = sIn.readlines()
sLines = []
for sub in sInLines:
sId = sub.strip()
sLines.append(sId)
print(sInLines)
for pLine in pInLines:
useLine = pLine.strip().split(',')
subId = useLine[0]
# print(subId)
if subId in sLines:
# take it in
pOutLines.append(pLine)
continue
else:
# print(subId + ' ain\'t in!')
continue
# write it out
pOut.writelines(pOutLines)
pOut.close()
print('Done')
if __name__ == '__main__':
phenoIn = sys.argv[1]
subjectIn = sys.argv[2]
phenoOut = sys.argv[3]
Main(phenoIn, subjectIn, phenoOut)
pass
| true |
79a42bc2f0e5166300a73d6b3ef820280801acb0 | Python | Jan710/calculator | /calculator.py | UTF-8 | 503 | 4.5 | 4 | [] | no_license | def calculator(op, num1, num2):
if op == 1:
return num1 + num2
elif op == 2:
return num1 - num2
elif op == 3:
return num1 * num2
elif op == 4:
return num1 / num2
elif op == 5:
return num1 ** num2
op = int(input(f'Welche Operation willst du durchführen?\n1. Addition\n2. Substration\n3. Multiplikation\n4. Division\n5. Exponent\nNR: '))
num1 = int(input('Erste Zahl: '))
num2 = int(input('Zweite Zahl: '))
print(calculator(op, num1, num2))
| true |
4a751929131d1da0f8d0ea3ab8528b296ec2a652 | Python | michaelmcmillan/LittList | /test/unit/http_client/test_http_client.py | UTF-8 | 1,196 | 3.109375 | 3 | [] | no_license | from unittest import TestCase
from unittest.mock import MagicMock
from http_client import HTTPClient, URL
class TestURL(TestCase):
def test_returns_http_if_http_is_protocol(self):
url = URL('http://www.nb.no/services/search/v2/search')
self.assertEqual(url.protocol, 'http://')
def test_returns_https_if_https_is_protocol(self):
url = URL('https://www.nb.no/services/search/v2/search')
self.assertEqual(url.protocol, 'https://')
def test_returns_www_dot_nb_dot_no_as_host(self):
url = URL('https://www.nb.no/services/search/v2/search')
self.assertEqual(url.host, 'www.nb.no')
def test_returns_path(self):
url = URL('https://www.nb.no/services/search/v2/search')
self.assertEqual(url.path, '/services/search/v2/search')
def test_it_escapes_query_parameters_in_url(self):
url = URL('http://www.nb.no/services/search/v2/search', [('q', 'q=snømannen')])
self.assertEqual(url.querystring, '?q=q%3Dsn%C3%B8mannen')
def test_it_combines_all_url_components(self):
url = URL('http://vg.no/article', [('id', 1)])
self.assertEqual(str(url), 'http://vg.no/article?id=1')
| true |
c8cba108583ac625d402b309d40c3466efefe735 | Python | themis0888/CS408-Project | /SAD.py | UTF-8 | 4,335 | 2.875 | 3 | [] | no_license | from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
def find_in_image(template,target):
im1 = Image.open(template)
im2 = Image.open(target)
im1_mat = im1.load()
im2_mat = im2.load()
width1, height1 = im1.size
width2, height2 = im2.size
bestSAD = 1000000000
bestX = -1
bestY = -1
for x in range(0,width2-width1,15):
for y in range(0,height2-height1,15):
SAD = 0.0
for i in range(0,width1,2):
for j in range(0,height1,3):
SAD += abs(abs(im2_mat[x+i,y+j][0] - im1_mat[i,j][0])+
abs(im2_mat[x+i,y+j][1] - im1_mat[i,j][1])+
abs(im2_mat[x+i,y+j][2] - im1_mat[i,j][2]))
if bestSAD > SAD:
bestX = x
bestY = y
bestSAD = SAD
if "cup" in template:
category = "cup"
elif "glasscase" in template:
category = "glasscase"
elif "greenbar" in template:
category = "greenbar"
elif "pencilcase" in template:
category = "pencilcase"
elif "rice" in template:
category = "rice"
elif "scissors" in template:
category = "scissors"
elif "shave" in template:
category = "shave"
elif "snack" in template:
category = "snack"
elif "socks" in template:
category = "socks"
elif "spaghetti" in template:
category = "spaghetti"
elif "tape" in template:
category = "tape"
return (bestX,bestY, category, bestSAD)
def draw_bounding_box(target_path,positions_category,width,height):
target = Image.open(target_path)
target_mat = target.load()
#font = ImageFont.truetype("sans-serif.ttf", 16)
for pos_cate in positions_category:
bestX = pos_cate[0]
bestY = pos_cate[1]
category = pos_cate[2]
for i in range(width):
target_mat[bestX+i,bestY+height] = (32, 20, 255)
target_mat[bestX+i,bestY] = (32, 20, 255)
for j in range(height):
target_mat[bestX,bestY+j] = (32, 20, 255)
target_mat[bestX+width,bestY+j] = (32, 20, 255)
# Write Text
draw = ImageDraw.Draw(target)
# font = ImageFont.truetype(<font-file>, <font-size>)
# draw.text((x, y),"Sample Text",(r,g,b))
#print(category)
draw.text((bestX+2, bestY+height+2),category,(255, 0, 0))
target.save("result.jpg")
if __name__ == "__main__":
positions_category = []
classes = ["cup","glasscase","greenbar","pencilcase","rice","scissors","shave","snack","socks","spaghetti","tape"]
for i in range(10):
template_name = "templates_small/" + classes[i] + ".jpg"
#target_name = "test_images/KakaoTalk_Video_20171002_1733_58_402 16.jpg"
target_name = "test_images/KakaoTalk_Video_20171002_1735_21_045 20.jpg"
positions_category.append(find_in_image(template_name, target_name))
draw_bounding_box(target_name, positions_category, 20, 36)
print(positions_category)
SADs = []
for e in positions_category:
SADs.append(e[3])
SADs.sort()
print(SADs)
"""
im1 = Image.open("templates_small/greenbar.jpg")
im2 = Image.open("test_images/KakaoTalk_Video_20171002_1733_58_402 16.jpg")
im1_mat = im1.load()
im2_mat = im2.load()
width1, height1 = im1.size
width2, height2 = im2.size
bestSAD = 1000000000
bestX = -1
bestY = -1
#print((width2-width1)*(height2-height1)/25*width1*height1)
for x in range(0,width2-width1,15):
for y in range(0,height2-height1,15):
SAD = 0.0
for i in range(0,width1,2):
for j in range(0,height1,3):
SAD += abs(abs(im2_mat[x+i,y+j][0] - im1_mat[i,j][0])+
abs(im2_mat[x+i,y+j][1] - im1_mat[i,j][1])+
abs(im2_mat[x+i,y+j][2] - im1_mat[i,j][2]))
if bestSAD > SAD:
bestX = x
bestY = y
bestSAD = SAD
im3 = Image.new(im2.mode, im2.size)
im3 = im2.copy()
im3_mat = im3.load()
# Draw a output picture with bounding box
for i in range(width1):
im3_mat[bestX+i,bestY+height1] = (32, 20, 255)
im3_mat[bestX+i,bestY] = (32, 20, 255)
for j in range(height1):
im3_mat[bestX,bestY+j] = (32, 20, 255)
im3_mat[bestX+width1,bestY+j] = (32, 20, 255)
im3.save("result.jpg")
"""
| true |
cc3840156f2b4f7c69784ae39b21bcc405170ea9 | Python | hurttttr/MyPythonCode | /网络爬虫/dangdang/dangdang/spiders/myspider2.py | UTF-8 | 954 | 2.578125 | 3 | [] | no_license | import scrapy
from dangdang.items import DangdangItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class Myspider2Spider(CrawlSpider):
name = 'myspider2'
allowed_domains = ['dangdang.com']
start_urls = ['http://category.dangdang.com/pg1-cid4005627.html']
rules = (
Rule(LinkExtractor(allow=r'pg\d+'), callback='parse_item', follow=True),
)
def parse_item(self, response):
lst = response.xpath('//ul[@class="bigimg cloth_shoplist"]/li')
for i in lst:
item = DangdangItem()
item['name']=i.xpath('./p[@class="name"]/a/@title').extract()[0]
item['price']=i.xpath('./p[@class="price"]/span/text()').extract()[0][1:]
item['link'] = i.xpath('./p[@class="link"]/a/@href').extract()[0]
item['comment'] = i.xpath('./p[@class="star"]/a/text()').extract()[0].replace('条评论','')
yield item
| true |
8875103ae20b8817f92ad8acfcdcfeed15dff846 | Python | erx00/modified-lsb | /lsb.py | UTF-8 | 1,912 | 3.125 | 3 | [] | no_license | from utils import str_to_binary, binary_to_str
from utils import int_to_binary, binary_to_int
def encode_lsb(image, message):
"""
Converts the input message into binary and encodes it
into the input image using the least significant bit
algorithm.
:param image: (ndarray) cover image (supports grayscale
and RGB)
:param message: (str) message
:return: (ndarray) stego image
"""
message += '<EOS>'
bits = str_to_binary(message)
nbits = len(bits)
if len(image.shape) == 2:
image = image.reshape((image.shape[0], image.shape[1], 1))
nrows, ncols, nchannels = image.shape
for i in range(nrows):
for j in range(ncols):
for c in range(nchannels):
pos = ncols * nchannels * i + nchannels * j + c
if pos < nbits:
b = int_to_binary(int(image[i, j, c]))
b.set(bits[pos], -1)
image[i, j, c] = binary_to_int(b)
else:
return image
return image
def decode_lsb(image):
"""
Decodes message from input image using the least
significant bit algorithm.
:param image: (ndarray) stego image (supports grayscale
and RGB)
:return: (str) message
"""
if len(image.shape) == 2:
image = image.reshape((image.shape[0], image.shape[1], 1))
nrows, ncols, nchannels = image.shape
bits, message = [], ""
for i in range(nrows):
for j in range(ncols):
for c in range(nchannels):
bits.append(int_to_binary(int(image[i, j, c]))[-1])
pos = ncols * nchannels * i + nchannels * j + c
if pos % 8 == 7:
message += binary_to_str(bits)
bits = []
if message[-5:] == '<EOS>':
return message[:-5]
return message
| true |
27e1a9a7f58cef79ad9b33ccd6dfa06ca9b9a60f | Python | testdummies/iRacer | /iRacer.py | UTF-8 | 9,347 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python
#https://nebelprog.wordpress.com/2013/09/02/create-a-simple-game-menu-with-pygame-pt-4-connecting-it-to-functions/
import sys
import pygame
import os
import modules.configuration.active_settings as st
from shutil import copyfile
import modules.bluetooth_controls.connection as bt
import modules.interface.keyboard_input as key
import webbrowser
pygame.init()
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLACK = (0, 0, 0)
class MenuItem(pygame.font.Font):
def __init__(self, text, font=None, font_size=30,
font_color=WHITE, (pos_x, pos_y)=(0, 0)):
pygame.font.Font.__init__(self, font, font_size)
self.text = text
self.font_size = font_size
self.font_color = font_color
self.label = self.render(self.text, 1, self.font_color)
self.width = self.label.get_rect().width
self.height = self.label.get_rect().height
self.dimensions = (self.width, self.height)
self.pos_x = pos_x
self.pos_y = pos_y
self.position = pos_x, pos_y
def is_mouse_selection(self, (posx, posy)):
if (posx >= self.pos_x and posx <= self.pos_x + self.width) and \
(posy >= self.pos_y and posy <= self.pos_y + self.height):
return True
return False
def set_position(self, x, y):
self.position = (x, y)
self.pos_x = x
self.pos_y = y
def set_font_color(self, rgb_tuple):
self.font_color = rgb_tuple
self.label = self.render(self.text, 1, self.font_color)
class GameMenu():
def __init__(self, screen, items, funcs, bg_color=BLACK, font=None, font_size=30,
font_color=WHITE):
self.screen = screen
self.scr_width = self.screen.get_rect().width
self.scr_height = self.screen.get_rect().height
self.bg_color = bg_color
self.clock = pygame.time.Clock()
self.funcs = funcs
self.items = []
for index, item in enumerate(items):
menu_item = MenuItem(item, font, font_size, font_color)
# t_h: total height of text block
t_h = len(items) * menu_item.height
pos_x = (self.scr_width / 2) - (menu_item.width / 2)
# This line includes a bug fix by Ariel (Thanks!)
# Please check the comments section of pt. 2 for an explanation
pos_y = (self.scr_height/2) - (t_h/2) + ((index*2) + index * menu_item.height)
menu_item.set_position(pos_x, pos_y)
self.items.append(menu_item)
self.mouse_is_visible = True
self.cur_item = None
def set_mouse_visibility(self):
if self.mouse_is_visible:
pygame.mouse.set_visible(True)
else:
pygame.mouse.set_visible(False)
def set_keyboard_selection(self, key):
"""
Marks the MenuItem chosen via up and down keys.
"""
for item in self.items:
# Return all to neutral
item.set_italic(False)
item.set_font_color(WHITE)
if self.cur_item is None:
self.cur_item = 0
else:
# Find the chosen item
if key == pygame.K_UP and \
self.cur_item > 0:
self.cur_item -= 1
elif key == pygame.K_UP and \
self.cur_item == 0:
self.cur_item = len(self.items) - 1
elif key == pygame.K_DOWN and \
self.cur_item < len(self.items) - 1:
self.cur_item += 1
elif key == pygame.K_DOWN and \
self.cur_item == len(self.items) - 1:
self.cur_item = 0
self.items[self.cur_item].set_italic(True)
self.items[self.cur_item].set_font_color(RED)
# Finally check if Enter or Space is pressed
if key == pygame.K_SPACE or key == pygame.K_RETURN:
text = self.items[self.cur_item].text
self.funcs[text]()
def set_mouse_selection(self, item, mpos):
"""Marks the MenuItem the mouse cursor hovers on."""
if item.is_mouse_selection(mpos):
item.set_font_color(RED)
item.set_italic(True)
else:
item.set_font_color(WHITE)
item.set_italic(False)
def run(self):
mainloop = True
while mainloop:
# Limit frame speed to 50 FPS
self.clock.tick(50)
mpos = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
mainloop = False
if event.type == pygame.KEYDOWN:
self.mouse_is_visible = False
self.set_keyboard_selection(event.key)
if event.type == pygame.MOUSEBUTTONDOWN:
for item in self.items:
if item.is_mouse_selection(mpos):
self.funcs[item.text]()
if pygame.mouse.get_rel() != (0, 0):
self.mouse_is_visible = True
self.cur_item = None
self.set_mouse_visibility()
# Redraw the background
self.screen.fill(self.bg_color)
for item in self.items:
if self.mouse_is_visible:
self.set_mouse_selection(item, mpos)
self.screen.blit(item.label, item.position)
pygame.display.flip()
if __name__ == "__main__":
def main_menu():
menu_items = ('Start', 'Settings','Help', 'Quit')
funcs = {'Start': start,
'Settings': settings,
'Help': help_page,
'Quit': quit_app}
pygame.display.set_caption('iRacer: Main Menu')
# gm = GameMenu(screen,funcs.keys(), funcs) # gives wrong order from function keys
gm = GameMenu(screen, menu_items, funcs)
gm.run()
def help_page():
print ("display help")
menu_items = ('Open Help Page', 'Back')
funcs = {'Open Help Page': load_help_page,
'Back': main_menu
}
pygame.display.set_caption('iRacer: Start')
# gm = GameMenu(screen,funcs.keys(), funcs) # gives wrong order from function keys
gm = GameMenu(screen, menu_items, funcs)
gm.run()
def load_help_page():
url = "file://"+os.path.dirname(os.path.realpath(__file__))+"/help.html"
print (url)
webbrowser.open(url, new=2)
def start():
if connected:
menu_items = ('Disconnect', 'Control', 'Back')
else:
menu_items = ('Connect', 'Back')
funcs = {'Connect': connect,
'Disconnect': disconnect,
'Control': control,
'Back': main_menu}
pygame.display.set_caption('iRacer: Start')
# gm = GameMenu(screen,funcs.keys(), funcs) # gives wrong order from function keys
gm = GameMenu(screen, menu_items, funcs)
gm.run()
connected = False
transmission = "AUTOMATIC"
def connect():
global connected
try:
st.load_config()
print ("config up")
bt.initialise_bluetooth_settings()
print ("settings up")
bt.connect_bluetooth()
print ("connecting")
connected = True
except:
connected = False
start()
def disconnect():
global connected
if connected:
try:
bt.disconnect_bluetooth()
connected = False
except:
connected = True
else:
pass
start()
def control():
if connected:
key.check_active_keys()
else:
pass
def quit_app():
try:
bt.disconnect_bluetooth()
except:
pass
sys.exit()
def settings():
menu_items = ('Change Settings', 'Restore Default Settings', 'Back')
funcs = {'Change Settings': change_settings,
'Restore Default Settings': restore_def_settings,
'Back': main_menu}
pygame.display.set_caption('iRacer: Settings')
gm = GameMenu(screen, menu_items, funcs)
gm.run()
def change_settings():
#Instead of opening settings figure out how to do following...get settings menu from QT - and start it from different file
#before doing anything
menu_items = (transmission, 'Back')
funcs = {transmission: set_transmission,
'Back': main_menu}
pygame.display.set_caption('iRacer: Settings')
gm = GameMenu(screen, menu_items, funcs)
gm.run()
def set_transmission():
global transmission
if transmission == "AUTOMATIC":
transmission = "MANUAL"
else:
transmission = "AUTOMATIC"
st.update_config_field('DRIVE_SETTINGS', 'Transmission', transmission)
change_settings()
def restore_def_settings():
print ("restoring default settings")
copyfile('modules/configuration/config_default.ini','modules/configuration/config.ini')
# Creating the screen
screen = pygame.display.set_mode((350, 240), 0, 32)
main_menu()
| true |
56131bbdd308f9a06edb2fbfab06c86b6cedcaad | Python | Arcensoth/pymcutil | /tests/util/__init__.py | UTF-8 | 1,357 | 2.828125 | 3 | [
"MIT"
] | permissive | import unittest
from pymcutil import util
class UtilTestCase(unittest.TestCase):
def test_default_with_value(self):
self.assertEqual(util.default('hello'), 'hello')
def test_default_with_value_and_default(self):
self.assertEqual(util.default('hello', 'world'), 'hello')
def test_default_with_value_none_and_default(self):
self.assertEqual(util.default(None, 'world'), 'world')
def test_default_with_value_none_and_default_none(self):
self.assertEqual(util.default(None, None), None)
def test_defaults(self):
self.assertEqual(
util.defaults(dict(a='alpha', b='beta'), b='bravo', c='charlie'),
dict(a='alpha', b='beta', c='charlie'))
def test_first_immediate(self):
self.assertEqual(util.first('a', 'b', 'c'), 'a')
def test_first_eventually(self):
self.assertEqual(util.first(None, None, None, 'a', 'b', 'c'), 'a')
def test_first_none(self):
self.assertEqual(util.first(None, None, None), None)
def test_require(self):
self.assertEqual(util.require(123, 'number'), 123)
def test_require_error(self):
with self.assertRaises(ValueError):
util.require(None, 'number')
def test_get_logger(self):
log = util.get_logger([], 'mylist')
self.assertEqual(log.name, 'list:mylist')
| true |
eb295b4294efa4dae47b7c502eec7b4707f5a42d | Python | Ngulefac/Python_for_everyone | /Introduction to python programming/Checking_if_an_element_is_in_an_an_array.py | UTF-8 | 187 | 3.546875 | 4 | [] | no_license | list = 0,2,1,1,2,4,3,9,6 # This is an altenate way of declaring an array
print(1 in list)
print(7 in list)
print(9-4 in list)
print(9-4 not in list)
print([list[2]] + [list[1]])
# done | true |
c07eed7512d12cebbfee300220bf2d04f5508f79 | Python | hheimo/Deep-Q-Learning-CartPole | /main.py | UTF-8 | 4,067 | 3.09375 | 3 | [] | no_license | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras import optimizers
from keras.callbacks import Callback, TensorBoard, ModelCheckpoint
from collections import deque
import random
import tensorflow as tf
import gym
import A2C
print("Program start:")
##Hyperparameters
episodes = 5000 #number of games
#cb = TensorBoard()
class DQNAgent:
#Create agent
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
#Previous experiences
self.memory = deque(maxlen=2000)
self.gamma = 0.95 #discount rate
self.epsilon = 1.0 #exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def _customLoss(self, target, pred):
return 0
#Build NN
def _build_model(self):
model = Sequential()
# 1st layer with input size of 4 and 24 nodes
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
# 2nd layer with 24 nodes
model.add(Dense(24, activation='relu'))
# Output layer with 2 nodes for actions (left, right)
model.add(Dense(self.action_size, activation='linear'))
model.summary()
# Compile model
model.compile(loss='mse', optimizer=optimizers.Adam(self.learning_rate),
metrics=['accuracy'])
return model
#Save previous experiences
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
#Explore
if np.random.rand() <= self.epsilon:
#return random action
return random.randrange(self.action_size)
#Compute action probabilities
act_values = self.model.predict(state)
return np.argmax(act_values[0]) #returns highest action
#Method that trains the neural net with experiences in the memory
def replay(self, batch_size):
#Fetch random memories
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = reward + self.gamma*np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
if __name__ == '__main__':
#init gym
env = gym.make('CartPole-v0')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
#Deep reinforcement
#agent = DQNAgent(state_size, action_size)
#A2C agent
agent2 = A2C.A2CAgent(state_size, action_size)
#Iterate game
for e in range(episodes):
#reset state
state = env.reset()
state = np.reshape(state, [1, 4])
for time_t in range(500):
#Rendering
env.render()
#decide action
#action = agent.act(state)
action = agent2.get_action(state)
#advance to next state
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state, [1, 4])
#Save state, action, reward and done
#agent.remember(state, action, reward, next_state, done)
#A2C
agent2.train_model(state, action, reward, next_state, done)
#Next state to current state
state = next_state
#when game ends
if done:
print("episode: {}/{}, score: {}"
.format(e, episodes, time_t))
break
#train agent
#if len(agent.memory) > 32:
# agent.replay(32)
| true |
6d27261a6d4ce2bfca15d30055a7c52c0cfcd50e | Python | Shibaken2017/Practice2 | /RecursiveMacroEconomics/continuous_markov_chain_2.py | UTF-8 | 644 | 2.6875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm,gaussian_kde
from quantecon import LAE
n = 20
k = 5000
J = 6
theta = 0.9
d = np.sqrt(1 - theta ** 2)
delta = theta / d
fig, axes = plt.subplots(J, 1, figsize=(10, 4 * J))
initial_condition = np.linspace(8, 0, J)
X = np.empty((k, n))
for j in range(J):
axes[j].set_ylim(-4, 8)
title = "time series from t" + str(initial_condition[j])
axes[j].set_title(title)
Z = np.random.randn(k, n)
X[:, 0] = initial_condition[j]
for t in range(1, n):
X[:, t] = theta * np.abs(X[:, t - 1]) + d * Z[:, t]
axes[j].boxplot(X)
plt.show() | true |
a99a12b5fc934095e9b96c736142c4e6dca4fcf4 | Python | mcherkassky/listen | /youtube/youtube_tools.py | UTF-8 | 1,942 | 2.515625 | 3 | [] | no_license | __author__ = 'mcherkassky'
import re
from gdata.youtube import service
client = service.YouTubeService()
client.email = 'mcherkassky@gmail.com'
client.password = 'mAbel1127'
client.source = 'my-example-app'
client.ProgrammaticLogin()
#get video feed from search query
def getVideoFeed(search_text):
query = service.YouTubeVideoQuery()
query.vq = search_text
query.orderby = 'relevance'
query.max_results = 5
query.racy = 'include' #query.racy
feed = client.YouTubeQuery(query)
feed_out = []
for entry in feed.entry:
title = entry.title.text
img = entry.media.thumbnail[0].url
youtube_url = re.findall('[^/]+$', entry.id.text)[0]
try:
view_count = entry.statistics.view_count
except:
view_count = ""
author = entry.author[0].name.text
duration = entry.media.duration.seconds
try:
description = entry.media.description.text[0:50] + '...'
except:
description = "No description available"
feed_out.append({'title': unicode(title, errors='replace'),
'img': unicode(img, errors='replace'),
'view_count': unicode(view_count, errors='replace'),
'author': unicode(author, errors='replace'),
'youtube_url': unicode(youtube_url, errors='replace'),
'artist': 'YouTube',
'duration': unicode(duration, errors='replace'),
'description': unicode(description, errors='replace')})
return feed_out
#get actual youtube objects from feed
def getVideoObjects(search_text):
query = service.YouTubeVideoQuery()
query.vq = search_text
query.orderby = 'relevance'
query.max_results = 5
query.racy = 'include' #query.racy
feed = client.YouTubeQuery(query)
out = [entry for entry in feed.entry]
return out | true |
72811e16608de0e25c7da6b6e71f6c965f1c51c5 | Python | Ahuge/yativ | /src/yativ/scale.py | UTF-8 | 1,244 | 2.9375 | 3 | [
"MIT"
] | permissive | import numpy
def scale(input, float_factor):
def downsize(factor):
new_arr = []
for row_index_mult, _ in enumerate(input[::factor]):
new_arr.append([])
for col_index_mult, _ in enumerate(input[0][::factor]):
values = []
for row_factor_index in range(int(factor)):
ri = (row_index_mult*factor)+row_factor_index
if ri < len(input):
row = input[ri]
for col_factor_index in range(int(factor)):
ci = (col_index_mult*factor)+col_factor_index
if ci < len(row):
cell = row[ci]
values.append(cell)
if values:
new_arr[-1].append(sum(values)/len(values))
return numpy.asarray(new_arr)
def upscale(factor):
raise NotImplementedError("Upscaling is not yet implemented!")
if float_factor == 1:
return input
elif float_factor > 1:
# print("Scaling %s to %s" % (input.shape[0], input.shape[0]/factor))
return downsize(int(float_factor))
else:
return upscale(float_factor)
| true |
57ab77861bc307735c91536443aad82b1445e15c | Python | jose31canizar/MUVR | /convert.py | UTF-8 | 700 | 2.6875 | 3 | [] | no_license | import json
# import utm
with open('nodes.json', 'r+') as f, open('stop_times.json', "r+") as s:
data = json.load(f)
stops = json.load(s)
for d in data["DocumentElement"]:
d['arrivalTime'] = [x['arrival_time'] for x in stops if x['stop_id'] == d['Node']]
# latlon = utm.to_latlon(float(d['PosxNode'].replace(',', '.')), float(d['PosyNode'].replace(',', '.str(')), 30, 'T') # <--- add `id` value.)
# d['latitude'] = str(latlon[0])
# d['longitude'] = str(latlon[1])
# print(json.dumps(d, indent=1))
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate() # remove remaining part | true |
57a9135e79bba15e564b7dd20dabf3ec4481a24b | Python | Newcomer03/Basic-Programs | /Python/Day 5/A5Q1.py | UTF-8 | 1,331 | 4.125 | 4 | [] | no_license | s = input("Enter a String:\n")
v_c = 0 #vowel counter
c_c = 0 #consonant counter
u_c = 0 #uppercase characters counter
l_c = 0 #lowercase characters counter
d_c = 0 #digit counter
space_c = 0 #space counter
s_c = 0 #special character counter
for i in s:
if i.isalpha(): #checks for alphabet
if i in "aeiouAEIOU": #checks for vowels
v_c += 1
else: #if not vowel then consonant
c_c += 1
if i.isupper(): #checks for uppercase characters
u_c += 1
else: #if not uppercase then it is lowercase
l_c += 1
elif i.isdigit(): #if not alphabet then checks for digit
d_c += 1
else: #if not alphanumeric then enters this block
if i == " ":
space_c += 1 #checks for spaces
else:
s_c += 1 #checks for special characters
print("No. of Vowels : ",v_c)
print("No. of Consonants : ",c_c)
print("No. of Uppercase Characters : ",u_c)
print("No. of Lowercase Characters : ",l_c)
print("No. of Digit : ",d_c)
print("No. of Spaces : ",space_c)
print("No. of Special Characters : ",s_c)
| true |
7e8dc57e660ecef41e462d3550c063eb8edd086a | Python | demul/image_segmentation_project | /FCN_MSCOCO/data_load.py | UTF-8 | 8,203 | 2.640625 | 3 | [] | no_license | import os
import numpy as np
from imageio import imread
import cv2
from cv2 import resize
from matplotlib.pyplot import imshow, hist, show, figure
import util
class ImgLoader :
def __init__(self, dataset='coco'):
if dataset == 'pascal' :
pascal_root = 'C:\VOC2012'
seg_folder = 'ImageSets/Segmentation' # Annotation 위치 (txt)
origin_img = 'JPEGImages' # 원본 데이터 위치 (jpg)
class_img = 'SegmentationClass' # ground truth 위치 (png)
self.seg_image_annotation = os.path.join(pascal_root, seg_folder)
self.seg_image_origin = os.path.join(pascal_root, origin_img)
self.seg_image_class = os.path.join(pascal_root, class_img)
else:
coco_root = 'C:\coco'
seg_folder = 'SimpleAnnotation' # Annotation 위치 (txt)
origin_img = 'images' # 원본 데이터 위치 (jpg)
class_img = 'SegmentationClass' # ground truth 위치 (png)
self.seg_image_annotation = os.path.join(coco_root, seg_folder)
self.seg_image_origin = os.path.join(coco_root, origin_img)
self.seg_image_class = os.path.join(coco_root, class_img)
self.origin_path = None
self.class_path = None
self.class_color_list = None
def load_name_list(self, train_or_val):
with open(self.seg_image_annotation + '/' + train_or_val + '.txt', 'r') as f:
lines = f.readlines()
# 파일명 양 끝의 공백과 특수문자 제거
# list 단위로 한꺼번에 실행하기 위해 다음 함수 사용.
## map(리스트에 함수 적용하여 매핑)
## lambda(임시함수의 주소 반환)
data_lines = map(lambda x: x.strip(), lines)
path2origin = map(lambda x: os.path.join(self.seg_image_origin, x) + '.jpg', data_lines)
data_lines = map(lambda x: x.strip(), lines)
path2class = map(lambda x: os.path.join(self.seg_image_class, x) + '.png', data_lines)
# map -> list 변환 후 리턴
origin_img_list = list(path2origin)
class_img_list = list(path2class)
return origin_img_list, class_img_list
def load_img_list(self, file_path, batch_size, batch_count):
img_list = []
for i in range(batch_count, batch_count+batch_size):
file = file_path[i]
img = imread(file)
img_list.append(img)
return img_list
def load_random_img_list(self, batch_size):
input_img_list = []
label_img_list = []
idx = np.arange(0, len(self.origin_path))
np.random.shuffle(idx)
idx = idx[:batch_size]
for i in idx:
input_file = self.origin_path[i]
label_file = self.class_path[i]
input_img = imread(input_file)
label_img = imread(label_file)
input_img_list.append(input_img)
label_img_list.append(label_img)
return input_img_list, label_img_list
def calculate_size(self,img_list):
h_list = []
w_list =[]
for img in img_list :
h, w, _ = img.shape
h_list.append(h)
w_list.append(w)
h_max = max(h_list)
h_min = min(h_list)
w_max = max(w_list)
w_min = min(w_list)
return h_max, h_min, w_max, w_min
def make_batch_resize(self, img_list, height, width, interpolation=1):
if interpolation == 0:
interpolation = cv2.INTER_NEAREST
elif interpolation == 1:
interpolation = cv2.INTER_LINEAR
else:
print('NOT ALLOWED INTERPOLATION METHOD')
exit()
batch=np.empty((
len(img_list),
height,
width,
3
), dtype=np.float32)
for idx, img in enumerate(img_list) :
if (len(img.shape)<3) : ###MS-COCO에는 흑백도 섞여있다; ㅡㅡ
img=np.tile(img, (3,1,1)) ###그냥 3 채널로 복사해버려서 가짜 흑백 이미지를 만들자. 새 차원을 추가하면서 복제할려면 이런식으로 해야 한다.
img=np.transpose(img, (1,2,0)) ###맨 앞차원이 늘어나게 되므로 맨 앞차원을 맨 뒷차원으로 전치시켜줘야한다.
batch[idx] = resize(img[:, :, :3], (height, width, 3), interpolation=interpolation) *255 #png파일이라 R,G,B,alpha의 4차원 데이터이므로 alpha차원을 제거
################################################################################################
####### class image의 경우 픽셀값이 소수가 되는 것을 방지하기 위해 NN으로 보간해야 한다!########
################################################################################################
# skimage.transform.resize(img, output_size, order)
# order=0: Nearest - neighbor
# order=1: Bi - linear(default)
# order=2: Bi - quadratic
# order=3: Bi - cubic
# order=4: Bi - quartic
# order=5: Bi - quintic
return batch
def make_label_batch(self, img_batch,):
newbatch = np.empty((
img_batch.shape[0],
img_batch.shape[1],
img_batch.shape[2],
len(self.class_color_list)+1 #배경의 차원 하나를 추가해 줄 것이므로!
), dtype=np.float32)
for i in range (img_batch.shape[0]):
label_fg = np.zeros([img_batch.shape[1], img_batch.shape[2]], dtype=np.bool)
class_img = img_batch[i, :, :, :].astype(np.uint8)
for j, color in enumerate(self.class_color_list):
label = np.all(class_img == color, axis=2)
label_fg |= label
newbatch[i, :, :, j+1] = label.astype(np.float32)
label_bg = ~label_fg
newbatch[i, :, :, 0] = label_bg.astype(np.float32)
return newbatch
def run(self, train_or_val) :
self.origin_path, self.class_path = self.load_name_list(train_or_val)
self.class_color_list = util.make_dict_from_colormap()
#hmax, hmin, wmax, wmin=calculate_size(class_img_list)
if not os.path.isfile('colormap.txt'):
print('There is no Color Map. Making Color Map.')
self.make_colormap()
def nextbatch(self, batch_size, itr, stochastic = False):
if (stochastic) :
origin_img_list, class_img_list = self.load_random_img_list(batch_size)
else:
origin_img_list = self.load_img_list(self.origin_path, batch_size, itr)
class_img_list = self.load_img_list(self.class_path, batch_size, itr)
##########################각종 Agumentation 기법을 여기넣으면 좋을듯###############################
input_batch = self.make_batch_resize(origin_img_list, 320, 320, 1)
class_batch = self.make_batch_resize(class_img_list, 320, 320, 0)
class_label_batch = self.make_label_batch(class_batch)
##################################################################################################
return input_batch, class_label_batch
def nextbatch_for_inference(self, batch_size, itr):
origin_img_list = self.load_img_list(self.origin_path, batch_size, itr)
class_img_list = self.load_img_list(self.class_path, batch_size, itr)
##########################각종 Agumentation 기법을 여기넣으면 좋을듯###############################
input_batch = self.make_batch_resize(origin_img_list, 320, 320, 1)
class_batch = self.make_batch_resize(class_img_list, 320, 320, 0)
##################################################################################################
return input_batch, class_batch
def make_colormap(self):
label_img_list = []
for file in self.class_path :
label_img = imread(file)
label_img_list.append(label_img)
class_batch = self.make_batch_resize(label_img_list, 320, 320, 0)
util.make_colormap_from_label(class_batch) | true |
c0a8f727f48c8d083e886f38f502d47ea3b5dbf9 | Python | hcmMichaelTu/python | /lesson08/turtle_draw.py | UTF-8 | 335 | 3.359375 | 3 | [] | no_license | import turtle as t
def forward(deg):
t.setheading(deg)
t.forward(d)
d = 20
t.shape("turtle")
t.speed("slowest")
t.onkey(lambda: forward(180), "Left")
t.onkey(lambda: forward(0), "Right")
t.onkey(lambda: forward(90), "Up")
t.onkey(lambda: forward(270), "Down")
t.onkey(t.bye, "Escape")
t.listen()
t.mainloop()
| true |
b29116cd143017b204ec2380b911c34c9697bd18 | Python | dkcira/sortingalgos | /insertion_sort.py | UTF-8 | 484 | 3.6875 | 4 | [] | no_license | from audit import audit
@audit
def insertion_sort(array):
""" insertion sort """
for i in range(len(array)):
j = i
# push as much to the left as possible
while j > 0 and array[j] < array[j-1]:
array[j], array[j-1] = array[j-1], array[j]
j -= 1
if __name__ == '__main__':
import random
array = [random.randint(-20, 60) for i in range(32)]
print(' array', array)
insertion_sort(array)
print('sorted', array) | true |
9e7b1b5473563ae930c20e51fe1a54dd5319c495 | Python | metafridays/SkillFactory-QA-Public | /Task15_6_5.py | UTF-8 | 180 | 3.0625 | 3 | [] | no_license | a = [['Егор', 15, 165, 44], ['Лена', 20, 160, 45], ['Витя', 32, 180, 77], ['Полина', 28, 175, 65]]
a_sort = sorted(a, key=lambda x: x[0])
print(a)
print(a_sort)
| true |
dda2844ef6cc40a155a908abb69c4f4fb87743e5 | Python | delta-plus/python-networking-examples | /webClient.py | UTF-8 | 513 | 2.953125 | 3 | [] | no_license | #!/usr/bin/python3
import sys
import socket
usage = '''
Simple Web Client
-----------------
Usage: ./webclient.py [URL] [port]
If no port is specified, default is 80.
'''
if (len(sys.argv) == 1):
print(usage)
exit()
if (len(sys.argv) == 2):
host = sys.argv[1]
port = 80
else:
host = sys.argv[1]
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.sendall(b'GET / HTTP/1.1\r\nHost: ' + host.encode() + b'\r\n\r\n')
print(s.recv(1024).decode())
| true |
7d4099af0a205810dada6d68554a4fa02f377f95 | Python | wkwkgg/atcoder | /abc/problems080/079/c.py | UTF-8 | 332 | 2.65625 | 3 | [] | no_license | ABCD = input()
S = list(ABCD)
op_cnt = len(S) - 1
ans = None
for i in range(2**op_cnt):
op = ["+"] * op_cnt
for j in range(op_cnt):
if i >> j & 1:
op[op_cnt - j - 1] = "-"
expr = S[0] + "".join(o+s for s,o in zip(S[1:], op))
if eval(expr) == 7:
ans = expr + "=7"
break
print(ans) | true |
02dd7138d0536331a07b0a7b8e7f785f276c1dd8 | Python | Dadajon/machine-learning-a-z | /01-data-preprocessing/categorical-data.py | UTF-8 | 960 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 23:27:03 2019
@author: dadajonjurakuziev
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importing the dataset
dataset = pd.read_csv('data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Taking care of missing data
from sklearn.impute import SimpleImputer
missing_values = SimpleImputer(missing_values=np.nan, strategy='mean', verbose=0)
missing_values = missing_values.fit(X[:, 1:3])
X[:, 1:3] = missing_values.transform(X[:, 1:3])
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.preprocessing import OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
| true |
287d4a87bcb8b86dfe4073601ad41aa40b79e22b | Python | sapagat/numpy_specs | /spec/item_selection_and_manipulation/sort_spec.py | UTF-8 | 938 | 3.3125 | 3 | [] | no_license | from mamba import *
from expects import *
import numpy as np
from ..matchers import equal_np_array
with description('Item selection and manipulation'):
with description('working with "sort"'):
with it('sorts an array in-place'):
x = np.array([40, 20, 10, 30])
x.sort()
expect(x).to(equal_np_array([10, 20, 30, 40]))
with it('sorts by the last axis by default'):
x = np.array([
[40, 20],
[10, 30]
])
x.sort()
expect(x).to(equal_np_array([
[20, 40],
[10, 30]
]))
with it('allows to specify the axis to sort by'):
x = np.array([
[40, 20],
[10, 30]
])
x.sort(axis=0)
expect(x).to(equal_np_array([
[10, 20],
[40, 30]
]))
| true |
85a44b9437cf0ddffd606e381ae6e471063f29c2 | Python | blockrepublictech/py-demux-eos-runner | /runner.py | UTF-8 | 2,440 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | from demuxeos import Demux
# Callback start block function (OPTIONAL)
# Starts this block and stores primitive details in django database
# Actual: Start a database transaction and store block info, then return
def start_block(block): #block=None makes it an optional parameter **kwargs accepts whatever arguments given
print("Block "+ str(block['block_num']) +" started!")
# Callback action functions
# Stores the action associated with transaction ID
# Actual: Add information about particular action to DB
def action(action, block, transaction): #(action, **kwargs) would ignore the block and transaction params by the user
#print("action=", action)
#print("block=", block)
#print("transaction=", transaction)
print("Action for account=eosio.unregd, name=add")
def action1(action, block, transaction):
print("Action for (None, None)")
def action2(action, block, transaction):
print("Action2 for account=eosio.unregd, name=add")
def action3(action, block, transaction):
print("Action3 for random account and name")
def effect_action(action, **kwargs):
print("Effect function")
# Callback commit block function (OPTIONAL)
# Commit block when entire process is
# Actual: Commit the FB transaction
def commit_block(block):
print ("Block " + str(block['block_num']) + " commited.")
d = Demux(start_block_fn=start_block, commit_block_fn=commit_block)
# Tells demux there are callback functions and registers them
d.register_action(action, "eosio.unregd", "add")
d.register_action(action2, "eosio.unregd", "add")
d.register_action(action1)
d.register_action(action3, "randomAccount", "randomName")
d.register_action(effect_action, is_effect=True)
# Input block number to be stored
#block_num = input('Enter a block number: ')
# Iterates through the transactions and actions of the block number
#d.process_block(block_num)
#d.process_block(block_num, include_effects=True)
#print("action_dict=", d.action_dict)
#print("head_block=", d._head_block)
# Input a start and end block for multi-block processing
start_block = int(input('Enter start block number: '))
end_block = int(input('Enter end block number: '))
#end_block = None
# Input a start and end block for multi-block processing
#d.process_blocks(start_block)
d.process_blocks(start_block, end_block, include_effects=True) # only effects
#d.process_blocks(start_block, end_block) # only updates
#print('action_dict=', demux.action_dict)
| true |
6e4484f9d9bde2448579d6391383e9e2a81a7370 | Python | beingveera/whole-python | /python/python-base/python/sma.py | UTF-8 | 715 | 3.671875 | 4 | [] | no_license | import re
import random as rn
strs=input("Enter your String : ")
a=re.compile("[a-z]")
x=a.findall(strs)
print("Total Lower Case Charactor in String : {} ".format(len(x)))
print("List of Lower Case Charactor is Sting : {} ".format(x))
b=re.compile("[A-Z]")
y=b.findall(strs)
print("Total Upper Charactor in String : {} ".format(len(y)))
print("List of Upper Case Charactor is Sting : {} ".format(y))
c=re.compile("\d+")
z=c.findall(strs)
print("Total Number in String : {} ".format(len(z)))
print("List of Numbers is Sting : {} ".format(z))
d=re.compile("\W+")
w=d.findall(strs)
print("Total Spacial Charactor in String : {} ".format(len(w)))
print("List of Special Charactor is Sting : {} ".format(w))
| true |
4da30c2427c4dc30c3cae1bae0e30a2d29877f23 | Python | zkzk123zk/Classic_model_examples | /2019_MobileNet-v3_IdentificationAndDetection/GUI_view/get_elimination_strategy.py | UTF-8 | 3,832 | 3.15625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding:utf-8 -*-
def elimination_strategy(name):
print(name)
if name == '老鼠':
str = '1、查鼠情:查清密度、分布等。' \
'2、制定综合防治方案:捕杀、药杀、防鼠、环境治理等。' \
'3、确保人、畜安全,防止环境污染。(1)投饵在鼠道上或出没场所,儿童、禽畜和宠物无法取食。' \
'(2)记录投放点,鼠药投放点要有明显的标示灭鼠后收回残饵和死鼠。' \
'(3)不用常见的食品做诱饵,以免误食。' \
'(4) 毒饵消耗完毕要及时补充毒饵。' \
'(5)投放、回收时戴手套,操作后彻底洗手。' \
'4、定期检查,巩固防治效果。'
elif name == '蟑螂':
str = '1、经常保持隐蔽场所的清洁。寻找虫源可在夜晚当蟑螂四出活动时突然开灯,观察何处有蟑螂,又往何处递窜,找其隐蔽场所后再消灭之。' \
'2、对臆蔽场所可用科士威用在蟑螂经常往来的场所。' \
'3、对所有缝隙应用纸筋、石灰(500克纸筋加50千克石灰,再加水调成)涂料,嵌堵缝隙。' \
'4、如仓库、厂房或室内蟑螂过多,危害严重时,可用硫黄或磷化铝熏蒸灭除'
elif name == '苍蝇':
str = '苍蝇防治有很多方法,比如物理防治,即保持室内的清洁卫生,并且安装纱门纱窗,防止苍蝇进入室内,在室内可以放置粘蝇纸引诱苍蝇觅食,' \
'达到灭杀作用,必要时选用有机磷类药物进行喷杀。'
elif name == '鼠妇':
str = '主要采用化学防制(滞留喷洒、设置隔离带)和环境治理相结合的综合防制方法,在短期内提高杀灭率,将鼠妇的种群密度减少到最低程度,使之不易繁殖起来,造成危害。' \
'1)在外围绿化孳生地撒投双硫磷,杀灭土壤及绿化内的幼虫,此项处理基本上可以长期有效控制鼠妇孳生,避免长期用药处理,造成对生态环境的破坏。' \
'2)鼠妇发现的活动区域用药物做滞留喷洒,如墙基缝隙、地面等区域,此项处理是针对性鼠妇较多区域的处理。' \
'3)在室内常闭安全门口,布放粘虫纸,用于粘捕入侵室内成虫,此项处理是有效防止成虫入侵室内造成危害,减少成虫入侵室内的风险。' \
'4)定期对鼠妇孳生区域进行检查,如有发现成虫活动,马上采取上述方法进行处理,在每年鼠妇孳生的季节,提前做好鼠妇预防工作,做到提前控制,阻止鼠妇在贵方范围内孳生,减少虫害带来的风险。'
elif name == '蚂蚁':
str = '1、物理防治:由于家庭蚂蚁的巢穴不容易或不便于确定,物理方法防治室内蚂蚁比较困难,只能限于杀灭可以看到的工蚁等,不能断根。一般可用开水烫等方法。' \
'2、化学防治:家庭蚂蚁的化学防治,主要依赖于毒饵诱杀。将适口性好、驱避作用小的毒饵投放在室内各种缝隙中或蚁道(蚂蚁的取食线路)上,' \
'利用蚂蚁喜欢搬食的习性,将毒饵搬入巢中,毒杀蚁后、蚁王,达到消灭全巢的目的。使用化学方法时,首先应使用毒饵诱杀全巢蚂蚁,' \
'切记不要一开始就在室内全面施用气雾杀虫剂或喷雾剂,以免造成种群扩散而加重蚁害。'
else:
pass
return str
| true |
9daa1ca7987e301deb1db775ecd554b7d51ca747 | Python | Guy-Pelc/NAND11 | /JackAnalyzer.py | UTF-8 | 1,123 | 3.234375 | 3 | [] | no_license | import JackTokenizer as Tk
import CompilationEngine as CmpE
from sys import argv
import os
"""
The analyzer program operates on a given source, where source is either a file name of the form
Xxx.jack or a directory name containing one or more such files.
For each source Xxx.jack, the analyzer goes through the following logic:
1. Create a JackTokenizer from the Xxx.jack input file.
2. Create an output file called Xxx.xml and prepare it for writing;
3. Use the CompilationEngine to compile the input JackTokenizer into the output file.
"""
def analyzer(file_path):
input_file_path = file_path
output_file_path = file_path[:-5] + ".xml"
output_vm_path = file_path[:-5] + ".vm"
tk = Tk.JackTokenizer(input_file_path)
with open(output_file_path, 'w') as f:
compiler = CmpE.CompilationEngine(tk, f, output_vm_path)
compiler.compile_class()
return
assert len(argv) == 2, 'path argument expected'
if argv[1][-5:] != '.jack':
dirs = os.listdir(argv[1])
for file in dirs:
if file[-5:] == '.jack':
analyzer(argv[1] + '/' + file)
else:
analyzer(argv[1])
| true |
5624e2b2eeb68a12528db73c265f1432ee88ea66 | Python | soyoungkwon/car_accident_UK | /accident_UK.py | UTF-8 | 4,185 | 3.328125 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
# Step: General cleaning of the data
# 1. Identify the areas with the most accidents.
# 2. Most dangerous roads
# 3. Accidents across different seasons
# 4. Most dangerous days
# 5. Most important causes of the accidents
# 6. Create a predictive model to evaluate the probability of car accidents
# 7. Create dashboard
# import libraries
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
import folium
# from arcgis.gis import GIS
# file name & path name
dir_curr = os.getcwd()
dir_car = os.listdir(dir_curr)[2]
accident_files = ['accidents_2005_to_2007.csv','accidents_2009_to_2011.csv', 'accidents_2012_to_2014.csv']
# load csv data
car_list = []
for file in accident_files:
file_fullname = dir_car + '/' + file
car_one = pd.read_csv(file_fullname, index_col=None)
car_list.append(car_one)
car_total = pd.concat(car_list, axis=0, ignore_index=True)
car_total.to_csv('accidents_2005_to_2014.csv')
# 1. Identify the areas with the most accidents.
# visualize accidents in the map
def map_overlay(car_total):
car_total.plot(kind='scatter', x='Longitude', y='Latitude', c = 'Urban_or_Rural_Area', s=3)#, cmap = plt.get_cmap("jet"))
map_hooray = folium.Map(location=[51.5074, 0.1278], zoom_start = 10)
map_overlay(car_total)
# # Urban vs Rural area
# ==== MUST SOLVE =====#
# 2. Most dangerous roads
def plot_roads(car_total):
plt.hist(car_total['Road_Type'])
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light')
# plt.show()
plot_roads(car_total)
# 3. Accidents across different seasons
a = pd.to_datetime(car_total['Date'], dayfirst = True)
car_total['Month'] = a.dt.strftime('%m').astype(str)
car_total['Year'] = a.dt.strftime('%Y').astype(str)
# === Month ===== #
def plot_by_month(car_total):
# n_month = 12
car_month = car_total['Month'].value_counts().sort_index()
n_month = len(car_month)
plt.bar(np.arange(n_month), car_month)
plt.xticks(np.arange(n_month),car_month.index)
plt.show()
plot_by_month(car_total)
def plot_by_year(car_total):
car_year = car_total['Year'].value_counts().sort_index()
n_year = len(car_year)
plt.plot(np.arange(n_year), car_year, '.--')
plt.xticks(np.arange(n_year),car_year.index)#year_list)
plt.show()
#==== Year ======#
plot_by_year(car_total)
# 4. Most dangerous days
# day of week
def bar_dayofweek(car_total):
car_dayofweek = car_total['Day_of_Week'].value_counts().sort_index()
DayNames = ['Sun','Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
ax = car_dayofweek.plot.bar(x='Day_of_Week', color='gray')
ax.set_xticklabels(DayNames, rotation=0)
# plt.show()
bar_dayofweek(car_total)
# 5. Most important causes of the accidents
# plot by road surface
def pie_chart_road_surf(car_total):
car_conds = (car_total['Road_Surface_Conditions'].value_counts())
# car_conds.plot(kind='pie', y='Road_Surface_Conditions', autopct='%1.1f%%',fontsize=10)
car_conds.plot(kind='bar')
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light')
plt.show()
pie_chart_road_surf(car_total)
# plot by weather
def pie_chart_weather(car_total):
car_weather = car_total['Weather_Conditions'].value_counts()
car_weather.plot(kind='bar', y='Weather_Conditions')#, autopct='%1.1f%%', fontsize=10)
# plt.show()
pie_chart_weather(car_total)
def plot_road_conds(car_total):
road_conds = car_total['Road_Surface_Conditions'].value_counts().index
# n_road_conds = len(road_conds)
car_road_conds = car_total['Road_Surface_Conditions'].value_counts()
car_road_conds.plot(kind='bar', y='Road_Surface_Conditions')
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light')
plot_road_conds(car_total)
# 6. Create a predictive model to evaluate the probability of car accidents
# accident by each hour
def car_time(car_total):
n_hours = 24
car_time = np.zeros(n_hours)
for time in range(n_hours):
car_time[time]
pd_hour = pd.to_datetime(car_total['Time'], format = '%H:%M').dt.hour
car_time = pd_hour.value_counts().sort_index()
# car_time.plot(kind='line')
plt.plot(car_time, '.--')
| true |
250c198f1f8f4da9829a27f05d9648eb55e1068a | Python | huytran321/Applied-Algorithmics-340-Project-6-Bank | /binarysearchtree.py | UTF-8 | 6,617 | 3.71875 | 4 | [] | no_license | class Node:
def __init__(self, key, value = None):
self.__key = key
self.__value = value
self.__leftChild = None
self.__rightChild = None
def getLeftChild(self):
return self.__leftChild
def getRightChild(self):
return self.__rightChild
def setLeftChild(self, theNode):
self.__leftChild = theNode
def setRightChild(self, theNode):
self.__rightChild = theNode
def getKey(self):
return self.__key
def getValue(self):
return self.__value
def setValue(self, value):
self.__value = value
def isLeaf(self):
return self.getLeftChild() == None and self.getRightChild == None
def __str__(self):
return str(self.__key) + " " + str(self.__value)
def __repr__(self):
return str(self.__key) + " " + str(self.__value)
class BinarySearchTree:
def __init__(self):
self.__size = 0
self.__root = None
def size(self):
return self.__size
def isEmpty(self):
return self.__size == 0
def get(self, key):
if self.__root == None:
return None
if self.__root.getKey() == key:
return self.__root.getValue()
currentNode = Node(8)
currentNode = self.__root
while currentNode != None and currentNode.getKey() != key:
if currentNode.getKey() > key:
currentNode = currentNode.getLeftChild()
else:
currentNode = currentNode.getRightChild()
if currentNode == None:
return None
else:
return currentNode.getValue()
def __getitem__(self, key):
return self.get(key)
def insert(self, key, value):
if self.__root == None:
self.__root = Node(key, value)
self.__size = 1
return
currentNode = self.__root
while currentNode != None:
if currentNode.getKey() == key:
currentNode.setValue(value)
return
elif currentNode.getKey() > key:
if currentNode.getLeftChild() == None:
newNode = Node(key, value)
currentNode.setLeftChild(newNode)
self.__size += 1
return
else:
currentNode = currentNode.getLeftChild()
else:
if currentNode.getRightChild() == None:
newNode = Node(key, value)
currentNode.setRightChild(newNode)
self.__size += 1
return
else:
currentNode = currentNode.getRightChild()
def __setitem__(self, key, value):
self.insert(key, value)
def inOrderTraversal(self, func):
theNode = self.__root
self.inOrderTraversalRec(self.__root, func)
def inOrderTraversalRec(self, theNode, func):
if theNode != None:
self.inOrderTraversalRec(theNode.getLeftChild(), func)
func(theNode.getKey(), theNode.getValue())
self.inOrderTraversalRec(theNode.getRightChild(), func)
def remove(self, key):
if self.__root == None:
return False
# This block of code deals with the root node being removed
if self.__root.getKey() == key:
if self.__root.isLeaf():
self.__root == None
elif self.__root.getRightChild() == None:
self.__root = self.__root.getLeftChild()
elif self.__root.getLeftChild() == None:
self.__root = self.__getRightChild()
else:
replaceNode = self.__getAndRemoveRightSmall(self.__root)
self.__root.setKey(replaceNode.getKey())
self.__root.setValue(replaceNode.getValue())
self.__size -= 1
return True
#Want currentNode pointer to point to partent of node to remove
currentNode = self.__root
while currentNode != None:
if currentNode.getLeftChild() and currentNode.getLeftChild().getKey() == key:
foundNode = currentNode.getLeftChild()
if foundNode.isLeaf():
currentNode.setLeftChild(None)
elif foundNode.getLeftChild() == None:
currentNode.setLeftChild(foundNode.getRightChild())
elif foundNode.getRightChild() == None:
currentNode.setLeftChild(foundNode.getLeftChild())
else:
replaceNode = self.__getAndRemoveRightSmall(foundNode)
foundNode.setKey(replaceNode.getKey())
foundNode.setValue(replaceNode.getValue())
self.__size -= 1
break
elif currentNode.getRightChild() and currentNode.getRightChild().getKey() == key:
foundNode = currentNode.getRightChild()
if foundNode.isLeaf():
currentNode.setRightChild(None)
elif foundNode.getLeftChild() == None:
currentNode.setRightChild(foundNode.getLeftChild())
elif foundNode.getRightChild() == None:
currentNode.setRightChild(foundNode.getRightChild())
else:
replaceNode = self.__getAndRemoveRightSmall(foundNode)
foundNode.setKey(replaceNode.getKey())
foundNode.setValue(replaceNode.getValue())
self.__size -= 1
break
else:
if currentNode.getKey() > key:
currentNode = currentNode.getLeftChild()
else:
currentNode = currentNode.getRightChild()
return False
def getAndRemoveRightSmall(self):
pass
def contains(self, key):
if self.__root == None:
return False
elif self.__root.getKey() == key:
return True
currentNode = self.__root
while currentNode != None and currentNode.getKey() != key:
if currentNode.getKey() > key:
currentNode = currentNode.getLeftChild()
else:
currentNode = currentNode.getRightChild()
return currentNode != None
def searchTree(self, key):
return self.contains(self.__root, key)
| true |
16ed453242ca2925389aced957efb689dbe79784 | Python | ShepherdCode/Soars2021 | /SimTools/template_program_moseman_copy.py | UTF-8 | 1,923 | 3.453125 | 3 | [
"MIT"
] | permissive | import os, sys, traceback, argparse
class Template_Class():
'''How to write a class.'''
def __init__(self,debug=False):
'''How to write a constructor.'''
self.debug=debug
def show_off(self):
'''How to access instance variables.'''
print("TemplateClass")
if self.debug:
print("\tIn debug mode.")
def write_file(self,filename,lines=10):
'''How to set parameters with defaults.'''
if self.debug:
print("\tWriting %d lines to file: %s."%
(lines,filename))
with open(filename, 'w') as outfa:
for line in range(0,lines):
outfa.write("2 ^ " + str(line) + " = " + str(2**line) + "\n")
print("\tDon't forget to delete the file!")
def no_op(self):
'''How to write a method that does nothing.'''
pass
def args_parse():
'''How to parse command-line arguments.'''
global args
parser = argparse.ArgumentParser(
description='Bare bones Python program.')
parser.add_argument(
'numlines',
help='output file size (10)',
type=int)
parser.add_argument(
'outfile',
help='output filename (fasta)',
type=str)
parser.add_argument(
'--debug',
help='Print traceback after exception.',
action='store_true')
args = parser.parse_args()
if __name__ == "__main__":
'''How to start a program from the command line.'''
try:
args_parse()
numlines=args.numlines
outfile=args.outfile
debug=args.debug
tmp = Template_Class(debug)
tmp.show_off()
tmp.write_file(outfile,numlines)
tmp.no_op()
except Exception:
print()
if args.debug:
print(traceback.format_exc())
else:
print("There was an error.")
print("Run with --debug for traceback.")
| true |