prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
def reconstruct_contour(mode, tp, rec_type='EFA', first_mode=0, last_mode=2):
N_modes = len(mode.locoL.values)
# timepoint -= cell->contour[cellnumber][cell->contourlength[cellnumber]].t*cell->locoefa[cellnumber][1].tau/(2.*M_PI)
if mode.r[1]<0.:
tp = -tp
x = [ 0. for i in tp ]
y = [ 0. for i in tp ]
if rec_type=='EFA':
if first_mode == 0:
x += mode.alpha[0]
y += mode.gamma[0]
for p in range(np.max([1,first_mode]), np.min([last_mode,N_modes+1])):
x += mode.alpha[p] * np.cos(2.*np.pi*p*tp) + mode.beta[p] * np.sin(2.*np.pi*p*tp)
y += mode.gamma[p] * np.cos(2.*np.pi*p*tp) + mode.delta[p] * | np.sin(2.*np.pi*p*tp) | numpy.sin |
"""
COAsT add on with shoothill api wrapper
Created on 2021-11-04
@author: jelt
This package augements the COAsT package acting as a wrapper for the Shoothill
API. This does require a key to be setup. It is assumed that the key is
privately stored in
config_keys.py
The shoothill API aggregates data across the country for a variety of instruments but,
requiring a key, is trickier to set up than the EA API.
To discover the StationId for a particular measurement site check the
integer id in the url or its twitter page having identified it via
https://www.gaugemap.co.uk/#!Map
E.g Liverpool (Gladstone Dock stationId="13482", which is read by default.
Conda environment:
coast + requests,
(E.g. workshop_env w/ requests)
### Build python environment:
## Create an environment with coast installed
yes | conda env remove --name workshop_env
yes | conda create --name workshop_env python=3.8
conda activate workshop_env
yes | conda install -c bodc coast=1.2.7
# enforce the GSW package number (something fishy with the build process bumped up this version number)
yes | conda install -c conda-forge gsw=3.3.1
# install cartopy, not part of coast package
yes | conda install -c conda-forge cartopy=0.20.1
## install request for shoothill server requests
conda install requests
Example usage:
from shoothill_api.shoothill_api import GAUGE
liv = GAUGE()
liv.dataset = liv.read_shoothill_to_xarray(ndays=5)
liv.plot_timeseries()
To do:
* logging doesn't work
"""
import coast
import datetime
import numpy as np
import xarray as xr
import scipy
import logging
logging.basicConfig(filename='shoothill2.log', filemode='w+')
logging.getLogger().setLevel(logging.INFO)
#%% ################################################################################
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
#%% ################################################################################
class GAUGE(coast.Tidegauge):
""" Inherit from COAsT. Add new methods """
def __init__(self, ndays: int=5, startday: datetime=None, endday: datetime=None, station_id="7708"):
try:
import config_keys # Load secret keys
except:
logging.info('Need a Shoothil API Key. Use e.g. create_shoothill_key() having obtained a public key')
#self.SessionHeaderId=config_keys.SHOOTHILL_KEY #'4b6...snip...a5ea'
self.ndays=ndays
self.startday=startday
self.endday=endday
self.station_id=station_id # Shoothill id
self.dataset = None
#self.dataset = self.read_shoothill_to_xarray(station_id="13482") # Liverpool
pass
def get_mean_crossing_time_as_xarray(self, date_start=None, date_end=None):
"""
Get the height (constant) and times of crossing the mean height as xarray
"""
pass
def get_HW_to_xarray(self, date_start=None, date_end=None):
""" Extract actual HW value and time as an xarray """
pass
def find_nearby_high_and_low_water(self, var_str, target_times:xr.DataArray=None, winsize:int=2, method='comp', extrema:str="both"):
"""
WORK IN PROGRESS
Finds high and low water for a given variable, in close proximity to
input xrray of times.
Returns in a new Tidegauge object with similar data format to
a TIDETABLE, and same size as target_times.
winsize: +/- hours search radius
target_times: xr.DataArray of target times to search around (e.g. harmonic predictions)
var_str: root of var name for new variable.
extrema (str): "both". extract max and min (default)
: "max". Extract only max
: "min". Extract only min
"""
#x = self.dataset.time
#y = self.dataset[var_str]
nt = len(target_times)
if extrema == "min":
time_min = np.zeros(nt)
values_min = np.zeros(nt)
for i in range(nt):
HLW = self.get_tide_table_times( time_guess=target_times[i].values, measure_var=var_str, method='window', winsize=winsize )
logging.debug(f"{i}: {coast.stats_util.find_maxima(HLW.time.values, HLW.values, method=method)}")
time_min[i], values_min[i] = coast.stats_util.find_maxima(HLW.time.values, -HLW.values, method=method)
new_dataset = xr.Dataset()
new_dataset.attrs = self.dataset.attrs
new_dataset[var_str + "_lows"] = (var_str + "_lows", -values_min.data)
new_dataset["time_lows"] = ("time_lows", time_min.data)
elif extrema == "max":
time_max = np.zeros(nt)
values_max = np.zeros(nt)
for i in range(nt):
HLW = self.get_tide_table_times( time_guess=target_times[i].values, measure_var=var_str, method='window', winsize=winsize )
logging.debug(f"{i}: {coast.stats_util.find_maxima(HLW.time.values, HLW.values, method=method)}")
time_max[i], values_max[i] = coast.stats_util.find_maxima(HLW.time.values, HLW.values, method=method)
new_dataset = xr.Dataset()
new_dataset.attrs = self.dataset.attrs
new_dataset[var_str + "_highs"] = (var_str + "_highs", values_max.data)
new_dataset["time_highs"] = ("time_highs", time_max.data)
elif extrema == "both":
time_max = np.zeros(nt)
values_max = np.zeros(nt)
time_min = np.zeros(nt)
values_min = np.zeros(nt)
for i in range(nt):
HLW = self.get_tide_table_times( time_guess=target_times[i].values, measure_var=var_str, method='window', winsize=winsize )
logging.debug(f"{i}: {coast.stats_util.find_maxima(HLW.time.values, HLW.values, method=method)}")
time_max[i], values_max[i] = coast.stats_util.find_maxima(HLW.time.values, HLW.values, method=method)
HLW = self.get_tide_table_times( time_guess=target_times[i].values, measure_var=var_str, method='window', winsize=winsize )
logging.debug(f"{i}: {coast.stats_util.find_maxima(HLW.time.values, HLW.values, method=method)}")
time_min[i], values_min[i] = coast.stats_util.find_maxima(HLW.time.values, -HLW.values, method=method)
new_dataset = xr.Dataset()
new_dataset.attrs = self.dataset.attrs
new_dataset[var_str + "_highs"] = (var_str + "_highs", values_max.data)
new_dataset["time_highs"] = ("time_highs", time_max.data)
new_dataset[var_str + "_lows"] = (var_str + "_lows", -values_min.data)
new_dataset["time_lows"] = ("time_lows", time_min.data)
else:
print("Not expecting that extrema case")
pass
#print(time_max)
#print(values_max)
new_object = coast.Tidegauge()
new_object.dataset = new_dataset
return new_object
def find_flood_and_ebb_water(self, var_str, method="comp", **kwargs):
"""
Finds the time and values for the inflection points (between high and
low water) for a given variable. These correspond to the max flood and
ebb points.
Returns in a new TIDEGAUGE object with similar data format to
a TIDETABLE.
Apply rolling smoother to iron out kinks - only interested in the
steepest, near linear, part of the timeseries.
The the derivative is taken (2nd order accurate central difference).
Then maxima/minima of the derivatives are then found and returned.
Methods:
'comp' :: Find inflection by comparison with neighbouring values.
Uses scipy.signal.find_peaks. **kwargs passed to this routine
will be passed to scipy.signal.find_peaks.
'cubic':: Find the inflections using the roots of cubic spline.
Uses scipy.interpolate.InterpolatedUnivariateSpline
and scipy.signal.argrelmax. **kwargs are not activated.
NOTE: Currently only the 'comp' and 'cubic' methods implemented. Future
methods include linear interpolation or refinements.
See also:
coast.Tidegauge.find_high_and_low_water()
Example:
import coast
liv= xr.open_mfdataset("archive_shoothill/liv_2021.nc")
obs_time = np.datetime64('2021-11-01')
winsize = 6
win = GAUGE()
win.dataset = liv.sel( time=slice(obs_time - np.timedelta64(winsize, "h"), obs_time + np.timedelta64(winsize, "h")) )
y = win.dataset.sea_level.compute()
x = win.dataset.time.compute()
f = y.differentiate("time")
time_max, values_max = coast.stats_util.find_maxima(x, f, method="comp")
interp = y.interp(time=time_max)
plt.plot( win.dataset.time, win.dataset.sea_level); plt.plot(interp.time, interp,'+'); plt.show()
"""
y = self.dataset[var_str].rolling(time=3, center=True).mean() # Rolling smoothing. Note we are only interested in the steep bit when it is near linear.
f = y.differentiate("time")
x = self.dataset.time
if(0):
# Convert x to float64 (assuming f is/similar to np.float64)
if type(x.values[0]) == np.datetime64: # convert to decimal sec since 1970
x_float = ((x.values - np.datetime64("1970-01-01T00:00:00")) / np.timedelta64(1, "s")).astype("float64")
# x_float = x.values.astype('float64')
f_float = f.values.astype("float64")
flag_dt64 = True
else:
x_float = x.values.astype("float64")
f_float = f.values.astype("float64")
flag_dt64 = False
if type(f.values[0]) != np.float64:
print("find_maxima(): type(f)=", type(f))
print("I was expecting a np.float64")
## Fit cubic spline
#f_smooth = scipy.interpolate.InterpolatedUnivariateSpline(x_float, f_float, k=5)
#x = np.linspace(0,2*np.pi,100)
#y = np.sin(x) + np.random.random(100) * 0.8
#plot(x, y,'o')
#plot(x, smooth(y,3), 'r-', lw=2)
#plot(x, smooth(y,19), 'g-', lw=2)
#f_smooth = smooth(f_float,5)
#f_smooth = smooth(y,5)
## FROM STATS_UTIL.PY
# Convert back to datetime64 if appropriate (to nearest second)
if flag_dt64:
N = len(x_float)
x_out = [
| np.datetime64("1970-01-01T00:00:00") | numpy.datetime64 |
# coding: utf-8
# ## <u> go_chandra - Python </u>
#
# The follwoing code is a script adapted from Gladstone's *go_chandra* IDL script.
#
# The code takes the corrected file from *sso_freeze* (hardwired by user), peforms a corrdinate transformation on the X-ray emission to wrap the PSF around Jupiter and plots the emission of the poles.
# In[1]:
#Purpose: New public Python pipeline used to produce polar plots of Jupiter's X-ray emission over the full observation and/or over defined time
# intervals. IF using plots produced by this pipeline, please cite Weigt et al. (in prep.) where the pipleine is discussed in some
# detail
#Category:
#Authors: <NAME> (<EMAIL>), apadpted from Randy Gladstone's 'gochandra' IDL script
"""All the relevant packages are imported for code below"""
import go_chandra_analysis_tools as gca_tools # import the defined functions to analysis Chandra data nad perfrom coordinate transformations
import custom_cmap as make_me_colors # import custom color map script
import label_maker as make_me_labels # import script to label mutliple subplots
import numpy as np
import pandas as pd
import scipy
from scipy import interpolate
from astropy.io import ascii
from astropy.io import fits as pyfits
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import colors
import matplotlib.gridspec as gridspec
import os
from datetime import *
"""Setup the font used for plotting"""
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
matplotlib.rcParams['font.size'] = 14
matplotlib.rcParams['xtick.labelsize']=14
matplotlib.rcParams['ytick.labelsize']=14
matplotlib.rcParams['agg.path.chunksize'] = 1000000
# AU to meter conversion - useful later on (probably a function built in already)
AU_2_m = 1.49598E+11
AU_2_km = 1.49598E+8
# ### Reading in Chandra Event file, extracting all the relevant info and defining assumptions used in analysis <br>
#
# User is prompted to enter the file path of the corrected event file. The script finds the file from the selected folder and reads in all the relevent headers. The asusmptions used for the mapping are also defined here.
# In[2]:
# User prompted to enter the file path of the corrected file
print('')
folder_path = input('Enter file path of event file to be analysed (post correction): ')
print('')
cor_evt_location = []
# Script then searches through the folder looking the filename corresponding to the corrected file
# for file in os.listdir(str(folder_path)):
# if file.startswith("hrcf") and file.endswith("pytest_evt2.fits"):
# cor_evt_location.append(os.path.join(str(folder_path), file))
for file in os.listdir(str(folder_path)):
if file.endswith("pytest_evt2.fits"):
cor_evt_location.append(os.path.join(str(folder_path), file))
detector = os.path.basename(cor_evt_location[0])[0:4]
# File is then read in with relevant header information extracted:
hdulist = pyfits.open(cor_evt_location[0], dtype=float)
matplotlib.rcParams['agg.path.chunksize'] = 10000
img_events=hdulist['EVENTS'].data # the data of the event file
img_head = hdulist[1].header # the header information of the event file
#img_data = hdulist[1].data
bigtime = img_events['time'] # time
bigxarr = img_events['X'] # x position of photons
bigyarr = img_events['Y'] # y position of photons
bigchannel = img_events['pha'] # pha channel the photons were found in
obs_id = img_head['OBS_ID'] # observation id of the event
tstart = img_head['TSTART'] # the start and...
tend = img_head['TSTOP'] #... end time of the observation
# The date of the observation is read in...
datestart = img_head['DATE-OBS']
evt_date = pd.to_datetime(datestart) #... and coverted to datetiem format to allow the relevant information to be read to...
evt_hour = evt_date.hour
evt_doy = evt_date.strftime('%j')
evt_mins = evt_date.minute
evt_secs = evt_date.second
evt_DOYFRAC = gca_tools.doy_frac(float(evt_doy), float(evt_hour), float(evt_mins), float(evt_secs)) #... calculated a fractional Day of
# Year (DOY) of the observation
ra_centre, ra_centre_rad = img_head['RA_NOM'], np.deg2rad(img_head['RA_NOM']) # the RA of Jupiter at the centre of the chip is read in as...
dec_centre, dec_centre_rad = img_head['DEC_NOM'], np.deg2rad(img_head['DEC_NOM']) #... well as Jupitr's DEC
j_rotrate = np.rad2deg(1.758533641E-4) # Jupiter's rotation period
#sat_rotrate = np.rad2deg(1.637884058E-4) # Saturn's rotation period
hdulist.close()
# Assumptions used for mapping:
if detector == 'acis':
scale = 0.4920
fwhm = 0.8 # FWHM of the HRC-I point spread function (PSF) - in units of arcsec
psfsize = 25 # size of PSF used - in units of arcsec
alt = 400 # altitude where X-ray emission assumers to occur in Jupiter's ionosphere - in units of km
else:
scale = 0.13175 # scale used when observing Jupiter using Chandra - in units of arcsec/pixel
fwhm = 0.8 # FWHM of the HRC-I point spread function (PSF) - in units of arcsec
psfsize = 25 # size of PSF used - in units of arcsec
alt = 400 # altitude where X-ray emission assumers to occur in Jupiter's ionosphere - in units of km
# ### Reading in Jupiter Horizon's file
#
# Alogrithm uses the start and end date from the observation to generate an epheremis file (from the JPL Horizons server) to use for analysis. The ephermeris file used takes CXO as the observer
# In[3]:
"""Brad's horizons code to extract the ephemeris file"""
from astropy.time import Time #convert between different time coordinates
from astropy.time import TimeDelta #add/subtract time intervals
#-*- coding: utf-8 -*-
from astroquery.jplhorizons import Horizons #automatically download ephemeris
#Need to do this to fix astroquery bug, otherwise it won't find the ephemeris data
from astroquery.jplhorizons import conf
conf.horizons_server = 'https://ssd.jpl.nasa.gov/horizons_batch.cgi'
# The start and end times are taken from the horizons file.
tstart_eph=Time(tstart, format='cxcsec')
tstop_eph=Time(tend, format='cxcsec')
eph_tstart = Time(tstart_eph, out_subfmt='date_hm')
dt = TimeDelta(0.125, format='jd')
eph_tstop = Time(tstop_eph + dt, out_subfmt='date_hm')
# Below sets the parameters of what observer the ephemeris file is generated form. For example, '500' = centre of the Earth, '500@-151' = CXO
obj = Horizons(id=599,location='500@-151',epochs={'start':eph_tstart.iso, 'stop':eph_tstop.iso, 'step':'1m'}, id_type='majorbody')
eph_jup = obj.ephemerides()
# Extracts relevent information needed from ephermeris file
cml_spline_jup = scipy.interpolate.UnivariateSpline(eph_jup['datetime_jd'], eph_jup['PDObsLon'],k=1)
lt_jup = eph_jup['lighttime']
sub_obs_lon_jup = eph_jup['PDObsLon']
sub_obs_lat_jup = eph_jup['PDObsLat']
eph_dates = pd.to_datetime(eph_jup['datetime_str'])
eph_dates = pd.DatetimeIndex(eph_dates)
eph_doy = np.array(eph_dates.strftime('%j')).astype(int)
eph_hours = eph_dates.hour
eph_minutes = eph_dates.minute
eph_seconds = eph_dates.second
eph_DOYFRAC_jup = gca_tools.doy_frac(eph_doy, eph_hours, eph_minutes, eph_seconds) # DOY fraction from ephermeris data
jup_time = (eph_DOYFRAC_jup - evt_DOYFRAC)*86400.0 + tstart # local tiem of Jupiter
# ### Select Region for analysis
#
# Plots the photons (x,y) position on a grid of defined size in arcseconds (defualted at [-50,50] in both x and y). Jupiter is centred on the HRC instrument. The photon information form the defined
# In[4]:
# converting the x and y coordinates from the event file into arcseconds
# Aimpoint of observations -> HRC: (16384.5, 16384.5), ACIS: (4096.5, 4096.5)
if detector == 'acis':
bigxarr_region = (bigxarr - 4096.5)*scale
bigyarr_region = (bigyarr - 4096.5)*scale
xlimits, ylimits = [-30,30], [-30,30]
else:
bigxarr_region = (bigxarr - 16384.5)*scale
bigyarr_region = (bigyarr - 16384.5)*scale
xlimits, ylimits = [-50,50], [-50,50]
# define the x, y, and pha channel limits (0-90 is default here)
cha_min = 0
cha_max = 90 # default 90
# the photon data is stored in a pandas dataframe
evt_df = pd.DataFrame({'time': bigtime, 'x': bigxarr, 'y': bigyarr, 'pha': bigchannel})
# defines the region the photons will be selected from
indx = gca_tools.select_region(xlimits[0], xlimits[1],ylimits[0], ylimits[1],bigxarr_region,bigyarr_region,bigchannel,cha_min,cha_max)
# find the x and y position of the photons
x_ph = bigxarr_region[indx]
y_ph = bigyarr_region[indx]
# plots the selected region (sanity check: Jupiter should be in the centre)
fig, axes=plt.subplots(figsize=(7,7))
axes = plt.gca()
plt.plot(x_ph,y_ph, 'o', markersize=0.5,linestyle='None',color='blue')
plt.title('Selected Region (ObsID %s)' % obs_id)
plt.xlim(xlimits)
plt.ylim(ylimits)
print('')
print('')
print('Once you are happy with the selected region, close the figure window to continue analysis')
print('')
print('')
plt.show()
# saves the selected region as a text file
np.savetxt(str(folder_path) + r"\%s_selected_region.txt" % obs_id, np.c_[x_ph, y_ph, bigtime[indx], bigchannel[indx]])
# ## Implementing the time interval within the data (if necessary)
#
# User is prompted whether or not they would like to separate the data into intervals of dt, where dt is in minutes. The user selects yes (y) or (no). If yes, the user is then prompted for their value of dt in minutes.
# In[27]:
# user prompted if they want to split the observal into equal time intervals...
print('')
time_int_decision = input("Would you like the data split into time intervals? [y/n] : ")
# if 'y', run the below code
if time_int_decision == 'y':
delta_mins = eval(input("Time interval to be used in analysis (in minutes): "))# define interval in minutes
print('')
ph_data = ascii.read(str(folder_path) + r"\%s_selected_region.txt" % obs_id) # read in the selected region data and...
ph_time = ph_data['col3'] #... define the time column
# the photon times are turned into an array and converted to datetime format
np_times = np.array(ph_time)
timeincxo = Time(np_times, format='cxcsec')#, in_subfmt='date_hm')
chandra_evt_time = timeincxo.datetime #- datetime.timedelta(minutes=40)
# from the start end end time of the photons detected, the time interval of dt minutes is created...
obs_start = chandra_evt_time[0]
obs_end = chandra_evt_time[-1]
time_interval = [dt.strftime('%Y-%m-%dT%H:%M:%S') for dt in
gca_tools.datetime_range(obs_start,obs_end,timedelta(minutes=delta_mins))]
time_interval_isot = Time(time_interval, format='isot')
time_interval_cxo = time_interval_isot.cxcsec
time_int_plot = Time(time_interval_isot, format='iso', out_subfmt='date_hm')
#...and is converted in CXO seconds and a format useable for plotting
# if'n', carry on as normal
else:
ph_data = ascii.read(str(folder_path) + r"\%s_selected_region.txt" % obs_id) # read in the selected region data and...
ph_time = ph_data['col3'] #... define the time column
# photon times are turned into an array and converted to datetime format
np_times = np.array(ph_time)
timeincxo = Time(np_times, format='cxcsec')#, in_subfmt='date_hm')
chandra_evt_time = timeincxo.iso
# Chandra time then converted to a plotable format
chandra_evt_time = Time(chandra_evt_time, format='iso', out_subfmt='date_hm')
plot_time = Time.to_datetime(chandra_evt_time)
print('')
print('All observation will be analysed')
# ## Performing the coord transformation on the photons within the selected region
#
# The coordinate transformation is either performed on the full observation or over each defined time interval. The
# In[28]:
cxo_ints = []
sup_props_list = []
sup_time_props_list = []
sup_lat_list = []
sup_lon_list = []
lonj_max = []
latj_max = []
sup_psf_max = []
ph_tevts = []
ph_xevts = []
ph_yevts = []
ph_chavts = []
emiss_evts = []
ph_cmlevts = []
psfmax =[]
# if the data are split into intervals of dt...
if time_int_decision == 'y':
for m in range(len(time_interval_cxo)-1):
interval = (time_interval_cxo[m], time_interval_cxo[m+1]) #...define the time interval between interval m and m+1
cxo_ints.append(interval)
# read in the data from the selecyed region
data = ascii.read(str(folder_path) + r"\%s_selected_region.txt" % obs_id)
# find the data within the specified time interval
int_indx = np.where((data['col3'] >= time_interval_cxo[m]) & (data['col3'] <= time_interval_cxo[m+1]))[0]
data_evts = data[int_indx]
# assign the parameters to a varibale
tevents = data_evts['col3']
xevents = data_evts['col1']
yevents = data_evts['col2']
chaevents = data_evts['col4']
# define the local time and central meridian latitude (CML) during the observation
jup_time = (eph_DOYFRAC_jup - evt_DOYFRAC)*86400.0 + tstart
jup_cml_0 = float(sub_obs_lon_jup[0]) + j_rotrate * (jup_time - jup_time[0])
interpfunc_cml = interpolate.interp1d(jup_time, jup_cml_0)
jup_cml = interpfunc_cml(tevents)
jup_cml = np.deg2rad(jup_cml % 360)
interpfunc_dist = interpolate.interp1d(jup_time, eph_jup['delta'].astype(float)*AU_2_km)
jup_dist = interpfunc_dist(tevents)
dist = sum(jup_dist)/len(jup_dist)
kmtoarc = np.rad2deg(1.0/dist)*3.6E3 # convert from km to arc
kmtopixels = kmtoarc/scale # convert from km to pixels using defined scale
rad_eq_0 = 71492.0 # jupiter radius of equator in km
rad_pole_0 = 66854.0 # jupiter radius of poles in km
ecc = np.sqrt(1.0-(rad_pole_0/rad_eq_0)**2) # oblateness of Jupiter
rad_eq = rad_eq_0 * kmtopixels
rad_pole = rad_pole_0 * kmtopixels # convert both radii form km -> pixels
alt0 = alt * kmtopixels # altitude at which we think emission occurs - agreed in Southampton Nov 15th 2017
# find sublat of Jupiter during each Chandra time interval
interpfunc_sublat = interpolate.interp1d(jup_time, (sub_obs_lat_jup.astype(float)))
jup_sublat = interpfunc_sublat(tevents)
# define the planetocentric S3 coordinates of Jupiter
phi1 = np.deg2rad(sum(jup_sublat)/len(jup_sublat))
nn1 = rad_eq/np.sqrt(1.0 - (ecc*np.sin(phi1))**2)
p = dist/rad_eq
phig = phi1 - np.arcsin(nn1 * ecc**2 * np.sin(phi1)*np.cos(phi1)/p/rad_eq)
h = p * rad_eq *np.cos(phig)/np.cos(phi1) - nn1
interpfunc_nppa = interpolate.interp1d(jup_time, (eph_jup['NPole_ang'].astype(float)))
jup_nppa = interpfunc_nppa(tevents)
gamma = np.deg2rad(sum(jup_nppa)/len(jup_nppa))
omega = 0.0
Del = 1.0
#define latitude and longitude grid for entire surface
lat = np.zeros((int(360) // int(Del))*(int(180) // int(Del) + int(1)))
lng = np.zeros((int(360) // int(Del))*(int(180) // int(Del) + int(1)))
j = np.arange(int(180) // int(Del) + int(1)) * int(Del)
for i in range (int(0), int(360)):# // int(Del) - int(1)):
lat[j * int(360) // int(Del) + i] = (j* int(Del) - int(90))
lng[j * int(360) // int(Del) + i] = (i* int(Del) - int(0))
# perform coordinate transfromation from plentocentric -> planteographic (taking into account the oblateness of Jupiter
# when defining the surface features)
coord_transfo = gca_tools.ltln2xy(alt=alt0, re0=rad_eq_0, rp0=rad_pole_0, r=rad_eq, e=ecc, h=h, phi1=phi1, phig=phig, lambda0=0.0, p=p, d=dist, gamma=gamma,
omega=omega, latc=np.deg2rad(lat), lon=np.deg2rad(lng))
# Assign the corrected transformed position of the X-ray emission
xt = coord_transfo[0]
yt = coord_transfo[1]
cosc = coord_transfo[2]
condition = coord_transfo[3]
count = coord_transfo[4]
# Find latiutde and lonfitude of the surface features
laton = lat[condition] + 90
lngon = lng[condition]
# Define the limb of Jupiter, to ensure only auroral photons are selected for analysis
cosmu = gca_tools.findcosmu(rad_eq, rad_pole, phi1, np.deg2rad(lat), np.deg2rad(lng))
limb = np.where(abs(cosmu) < 0.05)
# This next step creates the parameters used to plot what is measured on Jupiter. In the code, I define this as "props" (properties)
# which has untis of counts/m^2. "timeprops" has units of seconds
# Creating 2D array of the properties and time properties
props = np.zeros((int(360) // int(Del), int(180) // int(Del) + int(1)))
timeprops = np.zeros((int(360) // int(Del), int(180) // int(Del) + int(1)))
num = len(tevents)
# define a Gaussian PSF for the instrument
psfn = np.pi*(fwhm / (2.0 * np.sqrt(np.log(2.0))))**2
# create a grid for the position of the properties
latx = np.zeros(num)
lonx = np.zeros(num)
lonj_max = []
latj_max = []
sup_psf_max = []
ph_tevts = []
ph_xevts = []
ph_yevts = []
ph_chavts = []
emiss_evts = []
ph_cmlevts = []
psfmax =[]
# For entire surface of Jupiter, find the PSF (i.e how much flux) at each point in the longitude and latitude grid
for k in range(0,num-1):
# convert (x,y) position to pixels
xpi = (xevents[k]/scale)
ypi = (yevents[k]/scale)
if xpi**2. + ypi**2 < (30.0/scale)**2:
cmlpi = (np.rad2deg(jup_cml[k]))#.astype(int)
xtj = xt[condition]
ytj = yt[condition]
latj = (laton.astype(int)) % 180
lonj = ((lngon + cmlpi.astype(int) + 360.0).astype(int)) % 360
dd = np.sqrt((xpi-xtj)**2 + (ypi-ytj)**2) * scale
psfdd = np.exp(-(dd/ (fwhm / (2.0 * np.sqrt(np.log(2.0)))))**2) / psfn # define PSF of instrument
psf_max_cond = np.where(psfdd == max(psfdd))[0] # finds the max PSF over each point in the grid
count_mx = np.count_nonzero(psf_max_cond)
if count_mx != 1: # ignore points where there are 2 cases of the same max PSF
continue
else:
props[lonj,latj] = props[lonj,latj] + psfdd # assign the 2D PSF to the each point in the grid
emiss = np.array(np.rad2deg(np.cos(cosc[condition[psf_max_cond]]))) # find the emission angle from each max PSF
# record the corresponding photon data at each peak in the grid...
emiss_evts.append(emiss)
ph_cmlevts.append(cmlpi)
ph_tevts.append(tevents[k])
ph_xevts.append(xevents[k])
ph_yevts.append(yevents[k])
ph_chavts.append(chaevents[k])
psfmax.append(psfdd[psf_max_cond])
latj_max.append(latj[psf_max_cond])
lonj_max.append(lonj[psf_max_cond])
#... and save it as a text file
np.savetxt(str(folder_path) + r"\%s_photonlist_timeint%s.txt" % (obs_id,m+1),
np.c_[ph_tevts, ph_xevts, ph_yevts, ph_chavts, latj_max, lonj_max, ph_cmlevts, emiss_evts, psfmax], delimiter=',', header="t(s),x(arcsec),y(arcsec),PHA,lat (deg), SIII_lon (deg),CML (deg),emiss (deg),Max PSF")
# record the fluxes and position of the max PSFS
sup_props_list.append(props)
sup_lat_list.append(np.concatenate(latj_max, axis=0))
sup_lon_list.append(np.concatenate(lonj_max, axis=0))
# effectivelt, do the same idea except for exposure time
obs_start_times = tevents.min()
obs_end_times = tevents.max()
interval = obs_end_times - obs_start_times
#print(interval)
if interval > 1000.0:
step = interval/100.0
elif interval > 100.0:
step = interval/10.0
else:
step = interval/2.0
#print(step)
time_vals = np.arange(round(int(interval/step)))*step + step/2 + obs_start_times
interpfunc_time_cml = interpolate.interp1d(jup_time,jup_cml_0)
time_cml = interpfunc_time_cml(time_vals)
for j in range(0, len(time_vals)):
timeprops[((lngon + time_cml[j].astype(int))%360).astype(int),laton.astype(int)] = timeprops[((lngon + time_cml[j].astype(int))%360).astype(int),laton.astype(int)] + step
sup_time_props_list.append(timeprops)
print('Coordinate transformation completed for interval #%s'%(m+1))
# if 'n', perform the coordinate transformation for entire observation
else:
# read in data from photons in selected region and assign to variables
ph_data = ascii.read(str(folder_path)+ r"\%s_selected_region.txt" % obs_id)
tevents = ph_data['col3']
xevents = ph_data['col1']
yevents = ph_data['col2']
chaevents = ph_data['col4']
"""CODING THE SIII COORD TRANSFORMATION - works the same as above for the full observation"""
# define the local time and central meridian latitude (CML) during the observation
jup_time = (eph_DOYFRAC_jup - evt_DOYFRAC)*86400.0 + tstart
jup_cml_0 = float(eph_jup['PDObsLon'][0]) + j_rotrate * (jup_time - jup_time[0])
interpfunc_cml = interpolate.interp1d(jup_time, jup_cml_0)
jup_cml = interpfunc_cml(tevents)
jup_cml = np.deg2rad(jup_cml % 360)
# find the distance between Jupiter and Chandra throughout the observation, convert to km
interpfunc_dist = interpolate.interp1d(jup_time, (eph_jup['delta'].astype(float))*AU_2_km)
jup_dist = interpfunc_dist(tevents)
dist = sum(jup_dist)/len(jup_dist)
kmtoarc = np.rad2deg(1.0/dist)*3.6E3
kmtoarc = np.rad2deg(1.0/dist)*3.6E3 # convert from km to arc
kmtopixels = kmtoarc/scale # convert from km to pixels using defined scale
rad_eq_0 = 71492.0 # radius of equator in km
rad_pole_0 = 66854.0 # radius of poles in km
ecc = np.sqrt(1.0-(rad_pole_0/rad_eq_0)**2) # oblateness of Jupiter
rad_eq = rad_eq_0 * kmtopixels
rad_pole = rad_pole_0 * kmtopixels # convert both radii form km -> pixels
alt0 = alt * kmtopixels # altitude at which we think emission occurs - agreed in Southampton Nov 15th 2017
# find sublat of Jupiter during each Chandra time interval
interpfunc_sublat = interpolate.interp1d(jup_time, (sub_obs_lat_jup.astype(float)))
jup_sublat = interpfunc_sublat(tevents)
# define the planetocentric S3 coordinates of Jupiter
phi1 = np.deg2rad(sum(jup_sublat)/len(jup_sublat))
nn1 = rad_eq/np.sqrt(1.0 - (ecc*np.sin(phi1))**2)
p = dist/rad_eq
phig = phi1 - np.arcsin(nn1 * ecc**2 * np.sin(phi1)*np.cos(phi1)/p/rad_eq)
h = p * rad_eq *np.cos(phig)/np.cos(phi1) - nn1
interpfunc_nppa = interpolate.interp1d(jup_time, (eph_jup['NPole_ang'].astype(float)))
jup_nppa = interpfunc_nppa(tevents)
gamma = np.deg2rad(sum(jup_nppa)/len(jup_nppa))
omega = 0.0
Del = 1.0
#define latitude and longitude grid for entire surface
lat = np.zeros((int(360) // int(Del))*(int(180) // int(Del) + int(1)))
lng = np.zeros((int(360) // int(Del))*(int(180) // int(Del) + int(1)))
j = np.arange(int(180) // int(Del) + int(1)) * int(Del)
for i in range (int(0), int(360)):# // int(Del) - int(1)):
lat[j * int(360) // int(Del) + i] = (j* int(Del) - int(90))
lng[j * int(360) // int(Del) + i] = (i* int(Del) - int(0))
# perform coordinate transfromation from plentocentric -> planteographic (taking into account the oblateness of Jupiter
# when defining the surface features)
coord_transfo = gca_tools.ltln2xy(alt=alt0, re0=rad_eq_0, rp0=rad_pole_0, r=rad_eq, e=ecc, h=h, phi1=phi1, phig=phig, lambda0=0.0, p=p, d=dist, gamma=gamma, omega=omega, latc=np.deg2rad(lat), lon= | np.deg2rad(lng) | numpy.deg2rad |
import numpy as np
def qubit():
#checking Valadity
if (np.absolute(complx_a))**2 + (np.absolute(complx_b)**2) ==1:
#dot product
ket=np.array([[complx_a], [complx_b]])
print(f"Valid Qubit\n{ket}\n")
#Transposing and conjugating
bra= ket.conjugate().transpose()
print(f"The transpose of the conjugate of {ket} is\n{bra}\n")
else:
print(f"Invalid Qubit\n{complx_a}\n{complx_b}\n")
def standard_basis(n):
#creating an empty list
s_matrix = []
for bin_num in range(0, 2 ** n):
#creating a list for every no in 2^n
temp = []
for digit in range(n):
temp.insert(0, int((bin_num >> digit) % 2 == 1))
s_matrix.append(temp)
#converting list to array
s_matrix = np.array(s_matrix)
print(f"The no of Qubits are\n{s_matrix}\n")
def measure_multiple():
conjugate_s_matrix=s_matrix.conjugate().transpose()
#dot product
m_multiple= | np.dot(s_matrix,conjugate_s_matrix) | numpy.dot |
import os.path
import random
import cv2
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset
from utils.dataset_utils import letterbox_image
# 随机数生成,用于随机数据增强
def rand(a=0, b=1):
return np.random.rand() * (b - a) + a
# DataLoader中collate_fn参数 将一个batch中的np数组类型的图像和标签拼接起来
# batchsize=64时,images (192, 3, 224, 224)
def dataset_collate(batch):
images = []
labels = []
for img, label in batch:
images.append(img)
labels.append(label)
images1 = np.array(images)[:, 0, :, :, :]
images2 = np.array(images)[:, 1, :, :, :]
images3 = np.array(images)[:, 2, :, :, :]
images = np.concatenate([images1, images2, images3], 0)
labels1 = np.array(labels)[:, 0]
labels2 = np.array(labels)[:, 1]
labels3 = np.array(labels)[:, 2]
labels = np.concatenate([labels1, labels2, labels3], 0)
return images, labels
class DogFaceDataset(Dataset):
# input_shape (H, W, C) (224, 224, 3)
def __init__(self, input_shape, dataset_path, num_train, num_classes):
super(DogFaceDataset, self).__init__()
self.dataset_path = dataset_path
self.image_height = input_shape[0]
self.image_width = input_shape[1]
self.channel = input_shape[2]
self.paths = []
self.labels = []
self.num_train = num_train
self.num_classes = num_classes
self.load_dataset()
def __len__(self):
return self.num_train
# 从cls_train.txt中读取信息,获得路径和标签
def load_dataset(self):
for path in self.dataset_path:
# cls_train.txt 中,;前为类别,后为路径
path_split = path.split(";")
self.paths.append(path_split[1].split()[0])
self.labels.append(int(path_split[0]))
self.paths = np.array(self.paths, dtype=np.object)
self.labels = np.array(self.labels)
# 随机给定一张图片途径,对图片进行预处理和增强 包括缩放、翻转、旋转和颜色调整
def get_random_data(self, image, input_shape, jitter=0.1, hue=.05, sat=1.3, val=1.3, flip_signal=True):
image = image.convert("RGB")
h, w = input_shape
rand_jit1 = rand(1 - jitter, 1 + jitter)
rand_jit2 = rand(1 - jitter, 1 + jitter)
new_ar = w / h * rand_jit1 / rand_jit2
# 随机缩放
scale = rand(0.9, 1.1)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# 随机翻转
flip = rand() < .5
if flip and flip_signal:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
dx = int(rand(0, w - nw))
dy = int(rand(0, h - nh))
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image = new_image
# 随机旋转
rotate = rand() < .5
if rotate:
angle = np.random.randint(-5, 5)
a, b = w / 2, h / 2
M = cv2.getRotationMatrix2D((a, b), angle, 1)
image = cv2.warpAffine(np.array(image), M, (w, h), borderValue=[128, 128, 128])
# 随机调整色调和饱和度
hue = rand(-hue, hue)
sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) * 255
if self.channel == 1:
image_data = Image.fromarray(np.uint8(image_data)).convert("L") # 从array转换成img
return image_data
def __getitem__(self, index):
# images包含anchor positive negative (N=3, C, H, W)
images = np.zeros((3, self.channel, self.image_height, self.image_width))
labels = np.zeros(3)
# ------------------------------#
# 先获得两张同一只狗的狗脸,作为anchor和positive
# 随机选择一只狗,获取它的所有照片的路径
# ------------------------------#
c = random.randint(0, self.num_classes - 1)
selected_path = self.paths[self.labels[:] == c]
while len(selected_path) < 2:
c = random.randint(0, self.num_classes - 1)
selected_path = self.paths[self.labels[:] == c]
# ------------------------------#
# 从中随机选择两张
# ------------------------------#
image_indexes = np.random.choice(range(0, len(selected_path)), 2)
# 1st image
image = Image.open(selected_path[image_indexes[0]])
image = self.get_random_data(image, [self.image_height, self.image_width])
image = np.transpose(np.asarray(image).astype(np.float64), [2, 0, 1]) / 255
if self.channel == 1:
images[0, 0, :, :] = image
else:
images[0, :, :, :] = image
labels[0] = c
# 2nd image
image = Image.open(selected_path[image_indexes[1]])
image = self.get_random_data(image, [self.image_height, self.image_width])
image = np.transpose(np.asarray(image).astype(np.float64), [2, 0, 1]) / 255
if self.channel == 1:
images[1, 0, :, :] = image
else:
images[1, :, :, :] = image
labels[1] = c
# ------------------------------#
# 取得一张negative作为对照
# ------------------------------#
different_c = list(range(self.num_classes))
different_c.pop(c) # 去掉已选择的狗
different_c_index = np.random.choice(range(0, self.num_classes - 1), 1)
current_c = different_c[different_c_index[0]]
selected_path = self.paths[self.labels == current_c]
while len(selected_path) < 1:
different_c_index = np.random.choice(range(0, self.num_classes - 1), 1)
current_c = different_c[different_c_index[0]]
selected_path = self.paths[self.labels == current_c]
# ------------------------------#
# 随机选择一张
# ------------------------------#
image_indexes = np.random.choice(range(0, len(selected_path)), 1)
image = Image.open(selected_path[image_indexes[0]])
image = self.get_random_data(image, [self.image_height, self.image_width])
image = np.transpose(np.asarray(image).astype(np.float64), [2, 0, 1]) / 255
if self.channel == 1:
images[2, 0, :, :] = image
else:
images[2, :, :, :] = image
labels[2] = current_c
return images, labels
# --------------
# 用于可视化展示 返回三张Image类型图片
# --------------
def get_one_triplet(self):
c = random.randint(0, self.num_classes - 1)
selected_path = self.paths[self.labels[:] == c]
while len(selected_path) < 2:
c = random.randint(0, self.num_classes - 1)
selected_path = self.paths[self.labels[:] == c]
image_indexes = np.random.choice(range(0, len(selected_path)), 2)
anchor = Image.open(selected_path[image_indexes[0]])
positive = Image.open(selected_path[image_indexes[1]])
different_c = list(range(self.num_classes))
different_c.pop(c) # 去掉已选择的狗
different_c_index = np.random.choice(range(0, self.num_classes - 1), 1)
current_c = different_c[different_c_index[0]]
selected_path = self.paths[self.labels == current_c]
while len(selected_path) < 1:
different_c_index = np.random.choice(range(0, self.num_classes - 1), 1)
current_c = different_c[different_c_index[0]]
selected_path = self.paths[self.labels == current_c]
image_indexes = np.random.choice(range(0, len(selected_path)), 1)
negative = Image.open(selected_path[image_indexes[0]])
return anchor, positive, negative
# ------------------------------------------
# 每个样本有两张图片。样本分为正样本、负样本两种。
# 正样本中使用同一只狗的照片,负样本不同狗。
# 同时返回一个is_same标识,用来区分正负样本
# ------------------------------------------
class EvalDataset(Dataset):
def __init__(self, eval_set_path, pairs_path, image_size):
'''
:param eval_set_path: 验证数据集的路径
:param pairs_path: 验证数据集标签txt的路径
:param image_size: 图片尺寸
'''
super(EvalDataset, self).__init__()
self.image_shape = image_size
self.pairs_path = pairs_path
self.samples_list = self.get_samples(eval_set_path)
def get_random_pair(self):
index = random.randint(0, len(self.samples_list) - 1)
return self.samples_list[index]
def get_samples(self, eval_set_path, file_ext='jpg'):
# 正样本:pairs_list[i] = ['Name', '1', '4'] 1表示为该狗第一张图片,4表示为第四张
# 负样本:pairs_list[j] = ['Name_1', '1', 'Name_2', '2']
pairs_list = []
with open(self.pairs_path, 'r') as f:
for line in f.readlines()[1:]: # 从第二行开始读,第一行记录了fold数和每个fold的正负样本数量
pair = line.strip().split()
pairs_list.append(pair)
samples_list = [] # 存储样本信息 该list的每一个元素皆为tuple,tuple中包含两张图片的路径和正负样本判别信号is_same
for i in range(len(pairs_list)):
pair = pairs_list[i]
if len(pair) == 3: # 正样本
path_1st_dog = os.path.join(eval_set_path, pair[0], pair[0] + '_' + '%04d' % int(pair[1]) + '.' + file_ext)
path_2nd_dog = os.path.join(eval_set_path, pair[0], pair[0] + '_' + '%04d' % int(pair[2]) + '.' + file_ext)
is_same_dog = True
elif len(pair) == 4: # 负样本
path_1st_dog = os.path.join(eval_set_path, pair[0], pair[0] + '_' + '%04d' % int(pair[1]) + '.' + file_ext)
path_2nd_dog = os.path.join(eval_set_path, pair[2], pair[2] + '_' + '%04d' % int(pair[3]) + '.' + file_ext)
is_same_dog = False
if os.path.exists(path_1st_dog) and os.path.exists(path_2nd_dog): # Only add the pair if both paths exist
samples_list.append((path_1st_dog, path_2nd_dog, is_same_dog))
return samples_list
def __len__(self):
return len(self.samples_list)
def __getitem__(self, index):
(path_1st_dog, path_2nd_dog, is_same_dog) = self.samples_list[index]
# letterbox填充处理
img_1st_dog, img_2nd_dog = Image.open(path_1st_dog), Image.open(path_2nd_dog)
img_1st_dog = letterbox_image(img_1st_dog, [self.image_shape[1], self.image_shape[0]])
img_2nd_dog = letterbox_image(img_2nd_dog, [self.image_shape[1], self.image_shape[0]])
# 标准化处理
img_1st_dog, img_2nd_dog = np.array(img_1st_dog) / 255, np.array(img_2nd_dog) / 255
img_1st_dog = | np.transpose(img_1st_dog, [2, 0, 1]) | numpy.transpose |
import os
import quaternion
import numpy as np
from simulator import Simulation
import torch
import json
#Helper functions
vec_to_rot_matrix = lambda x: quaternion.as_rotation_matrix(quaternion.from_rotation_vector(x))
rot_matrix_to_vec = lambda y: quaternion.as_rotation_vector(quaternion.from_rotation_matrix(y))
def convert_blender_to_sim_pose(pose):
#Incoming pose converts body canonical frame to world canonical frame. We want a pose conversion from body
#sim frame to world sim frame.
world2sim = np.array([[1., 0., 0.],
[0., 0., 1.],
[0., -1., 0.]])
body2cam = world2sim
rot = pose[:3, :3] #Rotation from body to world canonical
trans = pose[:3, 3]
rot_c2s = world2sim @ rot @ body2cam.T
trans_sim = world2sim @ trans
print('Trans', trans)
print('Trans sim', trans_sim)
c2w = | np.zeros((4, 4)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 18:28:38 2018
@author: ning
decoding the order effect of the encoding period - there is no order effect
predict performance using encoding period signals
"""
if __name__ == '__main__':
import os
os.chdir('D:/working_memory/working_memory/scripts')
from helper_functions import make_clf#,prediction_pipeline
import numpy as np
import mne
from matplotlib import pyplot as plt
from matplotlib import colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pandas as pd
from mne.decoding import get_coef
from sklearn import metrics
from scipy import stats as stats
import pickle
import re
working_dir = 'D:/working_memory/encode_delay_prode_RSA_preprocessing/'
saving_dir = 'D:/working_memory/delay performance/'
if not os.path.exists(saving_dir):
os.mkdir(saving_dir)
from glob import glob
from tqdm import tqdm
from sklearn.model_selection import (StratifiedKFold,permutation_test_score,cross_val_score,LeaveOneOut,
StratifiedShuffleSplit,cross_val_predict)
from sklearn.multiclass import OneVsOneClassifier,OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from mne.decoding import Vectorizer,LinearModel
from sklearn.utils import shuffle
from imblearn import under_sampling,ensemble,over_sampling
from imblearn.pipeline import make_pipeline
from mne.decoding import GeneralizingEstimator,cross_val_multiscore,SlidingEstimator
def ST(X):
return (X - X.mean(0))/X.std(0)
def Comnt_dict(x):
if x == 'Correct Rejection':
return 1
elif x == 'Hit':
return 1
elif x == 'False Alarm':
return 0
elif x == 'Miss':
return 0
else:
return 0
"""
condition = 'load5'
event_dir = 'D:\\working_memory\\EVT_load5\\*_probe.csv'
epoch_files = glob(os.path.join(working_dir,'*%s*-epo.fif'%(condition)))
event_files = glob(event_dir)
# stack the normalized within subject data together
#X = np.concatenate([ST(mne.read_epochs(e).resample(100,n_jobs=4).copy().crop(-6,0).get_data()[:,:,:600]) for e in epoch_files[:-4]],axis=0)
X=[]
labels = []
for e,e_ in zip(epoch_files[:-3],event_files[:-3]):
epochs = mne.read_epochs(e,preload=True)
epochs.resample(100,n_jobs=4)
xx = stats.zscore(epochs.copy().crop(-6,0).get_data()[:,:,:600],axis=0,ddof=1)
X.append(xx)
event = epochs.events
sub,load,day = re.findall('\d+',e)
# get the order of the stimulu
trial_orders = pd.read_excel('D:\\working_memory\\working_memory\\EEG Load 5 and 2 Design Fall 2015.xlsx',sheetname='EEG_Load5_WM',header=None)
trial_orders.columns = ['load','image1','image2','image3','image4','image5','target','probe']
trial_orders['target'] = 1- trial_orders['target']
trial_orders["row"] = np.arange(1,41)
original_events = pd.read_csv('D:\\working_memory\\signal detection\\suj%s_wml%s_day%s-photo_WM_TS'%(sub,load,day),sep='\t')
original_events = original_events[np.abs(original_events['TriNo']-80)<5]
if original_events.shape == (0,6):
original_events = pd.read_csv('D:\\working_memory\\signal detection\\suj%s_wml%s_day%s-photo_WM_TS'%(sub,load,day),sep='\t')
original_events = original_events[original_events['TriNo']==8]
event = pd.DataFrame(event,columns=['tms','e','Comnt'])
event['trial']=[np.where(original_events['TMS']==time_)[0][0]+1 for time_ in event['tms']]
working_trial_orders = trial_orders.iloc[event['trial']-1]
working_events = original_events.iloc[event['trial']-1]
labels_ = working_events['Comnt'].apply(Comnt_dict)
print(working_events['Comnt'])
labels.append(labels_)
labels_load5 = np.concatenate(labels).astype(int)
X_load5 = np.concatenate(X).astype(np.float32)
condition = 'load2'
event_dir = 'D:\\working_memory\\EVT\\*_probe.csv'
epoch_files = glob(os.path.join(working_dir,'*%s*-epo.fif'%(condition)))
event_files = glob(event_dir)
missing = np.hstack([np.arange(11,17),[18]])#missing 26 and 64
X = []
labels = []
for e, e_ in zip(epoch_files[:-3],event_files[:-3]):
#e = epoch_files[0] # debugging stuff
#e_= event_files[0] # debugging stuff
sub,load,day = re.findall('\d+',e)
epochs = mne.read_epochs(e,preload=True)
epochs.resample(100,n_jobs=4)
event = epochs.events
# # experiment setting
trial_orders = pd.read_excel('D:\\working_memory\\working_memory\\EEG Load 5 and 2 Design Fall 2015.xlsx',sheetname='EEG_Load2_WM',header=None)
trial_orders.columns = ['load','image1','image2','target','probe']
trial_orders['target'] = 1- trial_orders['target']
trial_orders["row"] = np.arange(1,101)
sub,load,day = re.findall('\d+',e)
original_events = pd.read_csv('D:\\working_memory\\signal detection\\suj%s_wml%s_day%s-photo_WM_TS'%(sub,load,day),sep='\t')
original_events = original_events[np.abs(original_events['TriNo']-80)<5]
if original_events.shape == (0,6):
original_events = pd.read_csv('D:\\working_memory\\signal detection\\suj%s_wml%s_day%s-photo_WM_TS'%(sub,load,day),sep='\t')
# print(original_events['Comnt'])
event = pd.DataFrame(event,columns=['tms','e','Comnt'])
try:
event['trial']=[np.where(original_events['TMS']==time_)[0][0]+1 for time_ in event['tms']]
working_trial_orders = trial_orders.iloc[event['trial']-1]
working_events = original_events.iloc[event['trial']-1]
labels_ = working_events['Comnt'].apply(Comnt_dict)
print(working_events['Comnt'],labels_)
labels.append(labels_)
X.append(stats.zscore(epochs.copy().crop(-6,0).get_data()[:,:,:600],axis=0,ddof=1))
except:
print(sub,load,day)
pass
# temp1 = []
# for time_ in event['tms']:
# if len(np.where(original_events['TMS']==time_)[0])>0:
# temp1.append(np.where(original_events['TMS']==time_)[0][0]+1)
# temp2 = []
# for time_ in original_events['TMS']:
# if len(np.where(event['tms']==time_)[0])>0:
# temp2.append(np.where(event['tms']==time_)[0][0]+1)
# temp=list(set(temp1) & set(temp2))
# event['trial']=temp
X_load2 = np.concatenate(X).astype(np.float32)
labels_load2 = np.concatenate(labels).astype(int)
data = {'load2':X_load2,'load5':X_load5,'l2':labels_load2,'l5':labels_load5}
pickle.dump(data,open(saving_dir+'delay_performance_25','wb'))
"""
data = pickle.load(open(saving_dir+'delay_performance_25','rb'))
X_load2,X_load5,labels_load2,labels_load5=data['load2'],data['load5'],data['l2'],data['l5']
cv = StratifiedShuffleSplit(n_splits=10,random_state=12345,test_size=.35)
vec = Vectorizer()
sm = under_sampling.RandomUnderSampler(random_state=12345)
est = SVC(kernel='linear',class_weight='balanced',random_state=12345)
clf = make_pipeline(vec,sm,est)
# fit in load 2
clf.fit(X_load2,labels_load2)
# test in load 5
print(metrics.classification_report(labels_load5,clf.predict(X_load5)))
print(metrics.roc_auc_score(labels_load5,clf.predict(X_load5)))
# fit in load 5
clf.fit(X_load5,labels_load5)
# test in load 2
print(metrics.classification_report(labels_load2,clf.predict(X_load2)))
print(metrics.roc_auc_score(labels_load2,clf.predict(X_load2)))
# train test in load 2
cv = StratifiedShuffleSplit(n_splits=10,random_state=12345,test_size=.35)
vec = Vectorizer()
sm = under_sampling.RandomUnderSampler(random_state=12345)
est = SVC(kernel='linear',class_weight='balanced',random_state=12345)
clf = make_pipeline(vec,sm,est)
scores_within_load2 = []
scores_cross_load5 = []
for train,test in cv.split(X_load2,labels_load2):
time_gen = GeneralizingEstimator(clf,scoring='roc_auc',n_jobs=4)
time_gen.fit(X_load2[train],labels_load2[train])
scores_=time_gen.score(X_load2[test],labels_load2[test])
scores__ = time_gen.score(X_load5,labels_load5)
scores_within_load2.append(scores_)
scores_cross_load5.append(scores__)
scores_within_load2 = np.array(scores_within_load2)
scores_cross_load5 = np.array(scores_cross_load5)
pickle.dump(scores_within_load2,open(saving_dir+'scores_within_load2.p','wb'))
pickle.dump(scores_cross_load5,open(saving_dir+'scores_cross_load5','wb'))
# train test in load 5
scores_within_load5 = []
scores_cross_load2 = []
for train,test in cv.split(X_load5,labels_load5):
time_gen = GeneralizingEstimator(clf,scoring='roc_auc',n_jobs=4)
time_gen.fit(X_load5[train],labels_load5[train])
scores_=time_gen.score(X_load5[test],labels_load5[test])
scores__ = time_gen.score(X_load2,labels_load2)
scores_within_load5.append(scores_)
scores_cross_load2.append(scores__)
scores_within_load5 = np.array(scores_within_load5)
scores_cross_load2 = np.array(scores_cross_load2)
pickle.dump(scores_within_load5,open(saving_dir+'scores_within_load5','wb'))
pickle.dump(scores_cross_load2,open(saving_dir+'scores_cross_load2','wb'))
###############################################################################################################################################
########################### plotting ######################################################################################################
###############################################################################################################################################
scores_within_load2 = pickle.load(open(saving_dir+'scores_within_load2.p','rb'))
scores_cross_load5 = pickle.load(open(saving_dir+'scores_cross_load5','rb'))
scores_within_load5 = pickle.load(open(saving_dir+'scores_within_load5','rb'))
scores_cross_load2 = pickle.load(open(saving_dir+'scores_cross_load2','rb'))
vmax = .57
fig,axes = plt.subplots(figsize=(25,20),nrows=2,ncols=2)
ax = axes[0][0] # train-test in load 2
im = ax.imshow(scores_within_load2.mean(0),origin='lower',aspect='auto',extent=[0,6000,0,6000],
vmin=.5,vmax=vmax,cmap=plt.cm.RdBu_r)
ax.set(ylabel='Train in load2\n\n\ntrain time (ms)',title='Test in Load 2',xticks=[])
ax = axes[0][1] # train in load 2 and test in load 5
im = ax.imshow(scores_cross_load5.mean(0),origin='lower',aspect='auto',extent=[0,6000,0,6000],
vmin=.5,vmax=vmax,cmap=plt.cm.RdBu_r)
ax.set(title='Test in load 5',yticks=[],xticks=[])
ax = axes[1][0] # train in load 5 and test in load 2
im = ax.imshow(scores_cross_load2.mean(0),origin='lower',aspect='auto',extent=[0,6000,0,6000],
vmin=.5,vmax=vmax,cmap=plt.cm.RdBu_r)
ax.set(ylabel='Train in load5\n\n\ntrain time (ms)',xlabel='test time (ms)',)
ax = axes[1][1]# train in load 5 and test in load 5
im = ax.imshow(scores_within_load5.mean(0),origin='lower',aspect='auto',extent=[0,6000,0,6000],
vmin=.5,vmax=vmax,cmap=plt.cm.RdBu_r)
ax.set(xlabel='test time (ms)',yticks=[])
fig.tight_layout()
fig.subplots_adjust(bottom=0.1, top=0.96, left=0.1, right=0.8,
wspace=0.02, hspace=0.02)
# add an axes, lower left corner in [0.83, 0.1] measured in figure coordinate with
# axes width 0.02 and height 0.8
cb_ax = fig.add_axes([.83, 0.1, 0.02, 0.8])
cbar = fig.colorbar(im, cax=cb_ax)
fig.suptitle('Cross Condition Temporal Generalization Decoding\nCorrect VS. Incorrect')
fig.savefig(saving_dir+'Cross Condition Temporal Generalization Decoding_Correct VS Incorrect.png',dpi=600)
#### interpolate
vmax = .57
interpolate = 'hamming'
fig,axes = plt.subplots(figsize=(25,20),nrows=2,ncols=2)
ax = axes[0][0] # train-test in load 2
im = ax.imshow(scores_within_load2.mean(0),origin='lower',aspect='auto',extent=[0,6000,0,6000],
vmin=.5,vmax=vmax,cmap=plt.cm.RdBu_r,interpolation=interpolate)
ax.set(ylabel='Train in load2\n\n\ntrain time (ms)',title='Test in Load 2',xticks=[])
ax = axes[0][1] # train in load 2 and test in load 5
im = ax.imshow(scores_cross_load5.mean(0),origin='lower',aspect='auto',extent=[0,6000,0,6000],
vmin=.5,vmax=vmax,cmap=plt.cm.RdBu_r,interpolation=interpolate)
ax.set(title='Test in load 5',yticks=[],xticks=[])
ax = axes[1][0] # train in load 5 and test in load 2
im = ax.imshow(scores_cross_load2.mean(0),origin='lower',aspect='auto',extent=[0,6000,0,6000],
vmin=.5,vmax=vmax,cmap=plt.cm.RdBu_r,interpolation=interpolate)
ax.set(ylabel='Train in load5\n\n\ntrain time (ms)',xlabel='test time (ms)',)
ax = axes[1][1]# train in load 5 and test in load 5
im = ax.imshow(scores_within_load5.mean(0),origin='lower',aspect='auto',extent=[0,6000,0,6000],
vmin=.5,vmax=vmax,cmap=plt.cm.RdBu_r,interpolation=interpolate)
ax.set(xlabel='test time (ms)',yticks=[])
fig.tight_layout()
fig.subplots_adjust(bottom=0.1, top=0.96, left=0.1, right=0.8,
wspace=0.02, hspace=0.02)
# add an axes, lower left corner in [0.83, 0.1] measured in figure coordinate with
# axes width 0.02 and height 0.8
cb_ax = fig.add_axes([.83, 0.1, 0.02, 0.8])
cbar = fig.colorbar(im, cax=cb_ax)
fig.suptitle('Cross Condition Temporal Generalization Decoding\nCorrect VS. Incorrect')
fig.savefig(saving_dir+'Cross Condition Temporal Generalization Decoding_Correct VS Incorrect (interpolate).png',dpi=600)
# temporal decoding of load 2
# temporal decoding of load 5
cv = StratifiedShuffleSplit(n_splits=10,random_state=12345,test_size=.35)
vec = Vectorizer()
sm = under_sampling.RandomUnderSampler(random_state=12345)
est = SVC(kernel='linear',class_weight='balanced',random_state=12345)
clf = make_pipeline(vec,sm,est)
time_dec = SlidingEstimator(clf,scoring='roc_auc')
sc2 = cross_val_multiscore(time_dec,X_load2,labels_load2,cv=cv,n_jobs=4)
time_dec = SlidingEstimator(clf,scoring='roc_auc')
sc5 = cross_val_multiscore(time_dec,X_load5,labels_load5,cv=cv,n_jobs=4)
fig, axes = plt.subplots(figsize=(20,12),nrows=2)
ax = axes[0]
ax.plot(np.linspace(0,6000,sc2.shape[1]),sc2.mean(0),color='k',alpha=1.,label='Decoding Scores')
ax.fill_between(np.linspace(0,6000,sc2.shape[1]),
sc2.mean(0)-sc2.std(0)/np.sqrt(10),
sc2.mean(0)+sc2.std(0)/np.sqrt(10),
color='red',alpha=.5,label='Standard Error')
ax.legend(loc='best')
ax.axhline(0.5,linestyle='--',color='blue',alpha=.7,label='Chance Level')
ax.set(xlabel='Time (ms)',ylabel='Classifi.Score (ROC AUC)',title='Temporal Decoding [load 2]',xlim=(0,6000))
ax = axes[1]
ax.plot(np.linspace(0,6000,sc5.shape[1]),sc5.mean(0),color='k',alpha=1.,label='Decoding Scores')
ax.fill_between(np.linspace(0,6000,sc5.shape[1]),
sc5.mean(0)-sc5.std(0)/np.sqrt(10),
sc5.mean(0)+sc5.std(0)/np.sqrt(10),
color='red',alpha=.5,label='Standard Error')
ax.legend(loc='best')
ax.axhline(0.5,linestyle='--',color='blue',alpha=.7,label='Chance Level')
ax.set(xlabel='Time (ms)',ylabel='Classifi.Score (ROC AUC)',title='Temporal Decoding [load 5]',xlim=(0,6000))
fig.savefig(saving_dir+'Temporal Decoding.png',dpi=600)
# patterns in load 2
patterns_2 = []
for train, test in tqdm(cv.split(X_load2,labels_load2),desc='load2'):
X = X_load2[train]
y = labels_load2[train]
clf = make_pipeline(vec,sm,LinearModel(est))
clfs = [make_pipeline(vec,sm,LinearModel(est)).fit(X[:,:,ii],y) for ii in range(X.shape[-1])]
patterns_ = [get_coef(clfs[ii],attr='patterns_',inverse_transform=True) for ii in range(X.shape[-1])]
patterns_2.append(np.array(patterns_))
# patterns in load 5
patterns_5 = []
for train, test in tqdm(cv.split(X_load5,labels_load5),desc='load5'):
X = X_load5[train]
y = labels_load5[train]
clf = make_pipeline(vec,sm,LinearModel(est))
clfs = [make_pipeline(vec,sm,LinearModel(est)).fit(X[:,:,ii],y) for ii in range(X.shape[-1])]
patterns_ = [get_coef(clfs[ii],attr='patterns_',inverse_transform=True) for ii in range(X.shape[-1])]
patterns_5.append(np.array(patterns_))
temp_ = mne.read_epochs('D:\\working_memory\\encode_delay_prode_RSA_preprocessing\\sub_11_load2_day2_encode_delay_probe-epo.fif',
preload=False)
info = temp_.info
patterns_2 = | np.array(patterns_2) | numpy.array |
#!/usr/bin/env python
'''
Uses VTK python to allow for editing point clouds associated with the contour
method. Full interaction requires a 3-button mouse and keyboard.
-------------------------------------------------------------------------------
Current mapping is as follows:
LMB - rotate about point cloud centroid.
MMB - pan
RMB - zoom/refresh window extents
1 - view 1, default, looks down z axis onto xy plane
2 - view 2, looks down x axis onto zy plane
3 - view 3, looks down y axis onto zx plane
r - enter/exit picking mode, LMB is used to generate a selection window. Exiting
picking mode will highlight selected points.
z - increase aspect ratio
x - decrease aspect ratio
c - return to default aspect
f - flip colors from white on dark to dark on white
i - save output to .png in current working directory
a - toggles axes
o - toggles outline (if present)
r - starts picking
-------------------------------------------------------------------------------
1.1 - Fixed array orientation, clipping issue, compass scaling and sped up writing output
Added ReadMask
1.2 - Fixed window handling, now exits cleanly
1.3 - Modified to run in Python 3.x, uses VTK keyboard interrupts to start picking, Qt button for this function has been commented out.
1.4 - Added the ability to 'level' incoming data based on AFRC input
1.5 - Added SVD analysis/transformations
1.6 - Added ability to read PC-DMIS csv files
1.7 - Added outline generation for unregistered point clouds & rotation of reference data
'''
__author__ = "<NAME>"
__version__ = "1.7"
__email__ = "<EMAIL>"
__status__ = "Experimental"
__copyright__ = "(c) <NAME>, 2014-2019"
import sys
import os.path
from pkg_resources import Requirement, resource_filename
import numpy as np
import scipy.io as sio
from scipy.spatial import Delaunay
import vtk
import vtk.util.numpy_support as vtk_to_numpy
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from PyQt5 import QtCore, QtGui, QtWidgets
from pyCM.pyCMcommon import *
try:
from shapely.ops import cascaded_union, polygonize
import shapely.geometry as geometry
except:
print('Package missing for outline processing.')
nosio=False
def mask_def(*args,**kwargs):
"""
Main function, builds qt interaction
"""
app = QtWidgets.QApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
spl_fname=resource_filename("pyCM","meta/pyCM_logo.png")
splash_pix = QtGui.QPixmap(spl_fname,'PNG')
splash = QtWidgets.QSplashScreen(splash_pix)
splash.setMask(splash_pix.mask())
splash.show()
app.processEvents()
window = pnt_interactor(None)
if len(args)==2:
pnt_interactor.get_input_data(window,args[0],args[1])
elif len(args)==1:
pnt_interactor.get_input_data(window,args[0],None)
else:
pnt_interactor.get_input_data(window,None,None)
window.show()
splash.finish(window)
window.iren.Initialize() # Need this line to actually show the render inside Qt
ret = app.exec_()
if sys.stdin.isatty() and not hasattr(sys,'ps1'):
sys.exit(ret)
else:
return window
class pt_main_window(object):
"""
Class to build qt interaction, including VTK widget
setupUi builds, initialize starts VTK widget
"""
def setupUi(self, MainWindow):
MainWindow.setWindowTitle(("pyCM - Point editor v%s" %__version__))
MainWindow.setWindowIcon(QtGui.QIcon(resource_filename("pyCM","meta/pyCM_icon.png")))
self.centralWidget = QtWidgets.QWidget(MainWindow)
if hasattr(MainWindow,'setCentralWidget'):
MainWindow.setCentralWidget(self.centralWidget)
else:
self.centralWidget=MainWindow
self.mainlayout=QtWidgets.QGridLayout(self.centralWidget)
self.vtkWidget = QVTKRenderWindowInteractor(self.centralWidget)
mainUiBox = QtWidgets.QGridLayout()
self.vtkWidget.setMinimumSize(QtCore.QSize(1050, 600))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.vtkWidget.sizePolicy().hasHeightForWidth())
self.vtkWidget.setSizePolicy(sizePolicy)
self.statLabel=QtWidgets.QLabel("Idle")
self.statLabel.setWordWrap(True)
self.statLabel.setFont(QtGui.QFont("Helvetica",italic=True))
self.statLabel.setMinimumWidth(100)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.statLabel.sizePolicy().hasHeightForWidth())
self.statLabel.setSizePolicy(sizePolicy)
headFont=QtGui.QFont("Helvetica [Cronyx]",weight=QtGui.QFont.Bold)
#define buttons/widgets
self.reloadButton = QtWidgets.QPushButton('New profile')
scalingLabel=QtWidgets.QLabel("Active axis for scaling")
scalingLabel.setFont(headFont)
self.xsButton=QtWidgets.QRadioButton("x")
self.ysButton=QtWidgets.QRadioButton("y")
self.zsButton=QtWidgets.QRadioButton("z")
self.zsButton.setChecked(True)
self.scalingButtonGroup = QtWidgets.QButtonGroup()
self.scalingButtonGroup.addButton(self.xsButton)
self.scalingButtonGroup.addButton(self.ysButton)
self.scalingButtonGroup.addButton(self.zsButton)
self.scalingButtonGroup.setExclusive(True)
scaleBoxlayout = QtWidgets.QGridLayout()
scaleBoxlayout.addWidget(self.xsButton,1,1)
scaleBoxlayout.addWidget(self.ysButton,1,2)
scaleBoxlayout.addWidget(self.zsButton,1,3)
self.levelButton=QtWidgets.QRadioButton("Translate to mean z value")
rotateZlabel=QtWidgets.QLabel("Rotate")
self.rotateZ= QtWidgets.QDoubleSpinBox()
self.rotateZ.setToolTip('Degrees, positive is clockwise')
self.rotateZ.setValue(0)
self.rotateZ.setMaximum(180)
self.rotateZ.setMinimum(-180)
self.impose_rotation = QtWidgets.QPushButton('Apply')
self.impose_rotation.setToolTip('Manually impose rotation about z axis')
self.auto_rotate = QtWidgets.QPushButton('Auto')
self.auto_rotate.setToolTip('Align current bounding box to closest major axis by rotating about z axis')
zRotationBoxlayout = QtWidgets.QGridLayout()
zRotationBoxlayout.addWidget(rotateZlabel,1,1)
zRotationBoxlayout.addWidget(self.rotateZ,1,2)
zRotationBoxlayout.addWidget(self.impose_rotation,1,3)
zRotationBoxlayout.addWidget(self.auto_rotate,1,4)
svdLabel=QtWidgets.QLabel("Perform SVD reorientation")
svdLabel.setFont(headFont)
self.rxButton_pos=QtWidgets.QRadioButton("Rx+")
self.ryButton_pos=QtWidgets.QRadioButton("Ry+")
self.rxButton_neg=QtWidgets.QRadioButton("Rx-")
self.ryButton_neg=QtWidgets.QRadioButton("Ry-")
svdButtonGroup = QtWidgets.QButtonGroup()
svdButtonGroup.addButton(self.rxButton_pos)
svdButtonGroup.addButton(self.ryButton_pos)
svdButtonGroup.addButton(self.rxButton_neg)
svdButtonGroup.addButton(self.ryButton_neg)
svdButtonGroup.setExclusive(False)
svdBoxlayout = QtWidgets.QGridLayout()
svdBoxlayout.addWidget(self.rxButton_pos,1,1)
svdBoxlayout.addWidget(self.rxButton_neg,1,2)
svdBoxlayout.addWidget(self.ryButton_pos,1,3)
svdBoxlayout.addWidget(self.ryButton_neg,1,4)
self.reduce = QtWidgets.QSpinBox()
self.reduce.setValue(0)
self.reduce.setMinimum(0)
self.reduce.setMaximum(99)
self.reduce.setToolTip('Percentage of points to keep')
self.reduceButton = QtWidgets.QPushButton('Reduce')
self.apply_reduce = QtWidgets.QPushButton('Apply')
self.revertButton = QtWidgets.QPushButton('Undo all/reload')
self.reduceButton.setEnabled(False)
self.apply_reduce.setEnabled(False)
self.reduce.setEnabled(False)
horizLine1=QtWidgets.QFrame()
horizLine1.setFrameStyle(QtWidgets.QFrame.HLine)
pickLabel=QtWidgets.QLabel("Pick options")
pickLabel.setFont(headFont)
self.pickHelpLabel=QtWidgets.QLabel("Press R to activate")
self.pickActiveLabel=QtWidgets.QLabel("Pick active")
self.pickActiveLabel.setStyleSheet("QLabel { background-color : gray; color : darkGray; }")
self.pickActiveLabel.setFont(QtGui.QFont("Helvetica",italic=True))
self.undoLastPickButton=QtWidgets.QPushButton('Undo last pick')
horizLine2=QtWidgets.QFrame()
horizLine2.setFrameStyle(QtWidgets.QFrame.HLine)
horizLine3=QtWidgets.QFrame()
horizLine3.setFrameStyle(QtWidgets.QFrame.HLine)
outlineGenLabel=QtWidgets.QLabel("Outline")
outlineGenLabel.setFont(headFont)
self.triLabel = QtWidgets.QLabel("Triangulated")
self.triLabel.setStyleSheet("QLabel { background-color : gray; color : darkGray; }")
self.triLabel.setFont(QtGui.QFont("Helvetica",italic=True))
self.z_cutoff = QtWidgets.QDoubleSpinBox()
self.z_cutoff.setValue(0)
self.z_cutoff.setMinimum(-1000)
self.z_cutoff.setMaximum(1000)
self.z_cutoff.setDecimals(3)
self.impose_z_cutoff = QtWidgets.QPushButton('z cutoff')
self.impose_z_cutoff.setToolTip('Points greater than this z value will be ignored')
self.apply_z_cutoff = QtWidgets.QPushButton('Apply')
self.norm_cutoff = QtWidgets.QDoubleSpinBox()
self.norm_cutoff.setValue(0.9)
self.norm_cutoff.setDecimals(3)
self.norm_cutoff.setMinimum(0.5)
self.norm_cutoff.setMaximum(0.999999)
self.impose_norm_cutoff = QtWidgets.QPushButton('z norm cutoff')
self.impose_norm_cutoff.setToolTip('Points comprising triangulation having a z normal component greater than this value will be ignored')
self.apply_norm_cutoff = QtWidgets.QPushButton('Apply')
self.alpha_cutoff = QtWidgets.QDoubleSpinBox()
self.alpha_cutoff.setMinimum(0.000001)
self.alpha_cutoff.setMaximum(10000)
self.alpha_cutoff.setDecimals(3)
self.alpha_cutoff.setValue(0)
self.genOutlineButton = QtWidgets.QPushButton('Generate outline')
self.genOutlineButton.setToolTip('Generate outline from triangulation semiperimeters greater than this value')
self.accept_outline = QtWidgets.QPushButton('Accept')
outlineBoxlayout = QtWidgets.QGridLayout()
outlineBoxlayout.addWidget(outlineGenLabel,0,0,1,3)
outlineBoxlayout.addWidget(self.reduce,1,0,1,1)
outlineBoxlayout.addWidget(self.reduceButton,1,1,1,1)
outlineBoxlayout.addWidget(self.apply_reduce,1,2,1,1)
outlineBoxlayout.addWidget(self.z_cutoff,2,0,1,1)
outlineBoxlayout.addWidget(self.impose_z_cutoff,2,1,1,1)
outlineBoxlayout.addWidget(self.apply_z_cutoff,2,2,1,1)
outlineBoxlayout.addWidget(self.triLabel,3,0,1,3)
outlineBoxlayout.addWidget(self.norm_cutoff,4,0,1,1)
outlineBoxlayout.addWidget(self.impose_norm_cutoff,4,1,1,1)
outlineBoxlayout.addWidget(self.apply_norm_cutoff,4,2,1,1)
outlineBoxlayout.addWidget(self.alpha_cutoff,5,0,1,1)
outlineBoxlayout.addWidget(self.genOutlineButton,5,1,1,1)
outlineBoxlayout.addWidget(self.accept_outline,5,2,1,1)
outlineBoxlayout.addLayout(zRotationBoxlayout,6,0,1,3)
outputLabel=QtWidgets.QLabel("Write output")
outputLabel.setFont(headFont)
self.refButton=QtWidgets.QRadioButton("Reference")
self.floatButton=QtWidgets.QRadioButton("Floating")
self.refButton.setChecked(True)
self.writeButtonGroup = QtWidgets.QButtonGroup()
self.writeButtonGroup.addButton(self.floatButton)
self.writeButtonGroup.addButton(self.refButton)
self.writeButtonGroup.setExclusive(True)
self.writeButton=QtWidgets.QPushButton('Write')
horizLine4=QtWidgets.QFrame()
horizLine4.setFrameStyle(QtWidgets.QFrame.HLine)
showLabel=QtWidgets.QLabel("Load result")
showLabel.setFont(headFont)
self.showRefButton=QtWidgets.QRadioButton("Reference")
self.showRefButton.setChecked(True)
self.showFloatButton=QtWidgets.QRadioButton("Floating")
self.showButtonGroup = QtWidgets.QButtonGroup()
self.showButtonGroup.addButton(self.showFloatButton)
self.showButtonGroup.addButton(self.showRefButton)
self.showButtonGroup.setExclusive(True)
self.showButton=QtWidgets.QPushButton("View")
horizLine5=QtWidgets.QFrame()
horizLine5.setFrameStyle(QtWidgets.QFrame.HLine)
horizLine6=QtWidgets.QFrame()
horizLine6.setFrameStyle(QtWidgets.QFrame.HLine)
#add widgets to ui
mainUiBox.addWidget(self.reloadButton,0,0,1,2)
mainUiBox.addWidget(scalingLabel,1,0,1,2)
mainUiBox.addLayout(scaleBoxlayout,2,0,1,2)
mainUiBox.addWidget(self.levelButton,3,0,1,2)
mainUiBox.addWidget(horizLine2,4,0,1,2)
mainUiBox.addLayout(outlineBoxlayout,5,0,1,2)
mainUiBox.addWidget(horizLine3,6,0,1,2)
mainUiBox.addWidget(svdLabel,7,0,1,2)
mainUiBox.addLayout(svdBoxlayout,8,0,1,2)
mainUiBox.addWidget(horizLine1,9,0,1,2)
mainUiBox.addWidget(pickLabel,10,0,1,2)
mainUiBox.addWidget(self.pickHelpLabel,11,0,1,1)
mainUiBox.addWidget(self.pickActiveLabel,11,1,1,1)
mainUiBox.addWidget(self.undoLastPickButton,12,0,1,1)
mainUiBox.addWidget(self.revertButton,12,1,1,1)
mainUiBox.addWidget(horizLine4,14,0,1,2)
mainUiBox.addWidget(outputLabel,15,0,1,2)
mainUiBox.addWidget(self.refButton,16,0,1,1)
mainUiBox.addWidget(self.floatButton,16,1,1,1)
mainUiBox.addWidget(self.writeButton,17,0,1,2)
mainUiBox.addWidget(horizLine5,18,0,1,2)
mainUiBox.addWidget(showLabel,19,0,1,2)
mainUiBox.addWidget(self.showRefButton,20,0,1,1)
mainUiBox.addWidget(self.showFloatButton,20,1,1,1)
mainUiBox.addWidget(self.showButton,21,0,1,2)
mainUiBox.addWidget(horizLine6,22,0,1,2)
lvLayout=QtWidgets.QVBoxLayout()
lvLayout.addLayout(mainUiBox)
lvLayout.addStretch(1)
self.mainlayout.addWidget(self.vtkWidget,0,0,1,1)
self.mainlayout.addLayout(lvLayout,0,1,1,1)
self.mainlayout.addWidget(self.statLabel,1,0,1,2)
def initialize(self):
self.vtkWidget.start()
class pnt_interactor(QtWidgets.QWidget):
def __init__(self, parent):
super(pnt_interactor,self).__init__(parent)
self.ui = pt_main_window()
self.ui.setupUi(self)
self.ren = vtk.vtkRenderer()
self.ren.SetBackground(0.1, 0.2, 0.4)
self.ui.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.ui.vtkWidget.GetRenderWindow().GetInteractor()
style=vtk.vtkInteractorStyleTrackballCamera()
style.AutoAdjustCameraClippingRangeOn()
self.iren.SetInteractorStyle(style)
self.ren.GetActiveCamera().ParallelProjectionOn()
self.cp=self.ren.GetActiveCamera().GetPosition()
self.fp=self.ren.GetActiveCamera().GetFocalPoint()
self.iren.AddObserver("KeyPressEvent", self.keypress)
self.PointSize=2
self.LineWidth=1
self.Zaspect=1.0
self.limits=np.empty(6)
self.picking=False
self.refWritten = False
self.floatWritten = False
self.ui.reloadButton.clicked.connect(lambda: self.get_input_data(None,None))
self.ui.undoLastPickButton.clicked.connect(lambda: self.undo_pick())
self.ui.writeButton.clicked.connect(lambda: self.write_new())
self.ui.revertButton.clicked.connect(lambda: self.undo_revert())
self.ui.reduceButton.clicked.connect(lambda: self.reduce_pnts(None,'show'))
self.ui.apply_reduce.clicked.connect(lambda: self.reduce_pnts(None,None))
self.ui.levelButton.clicked.connect(lambda: self.level_pnts())
self.ui.rxButton_pos.clicked.connect(lambda: self.svd('x',False))
self.ui.ryButton_pos.clicked.connect(lambda: self.svd('y',False))
self.ui.rxButton_neg.clicked.connect(lambda: self.svd('x',True))
self.ui.ryButton_neg.clicked.connect(lambda: self.svd('y',True))
self.ui.impose_z_cutoff.clicked.connect(lambda: self.reduce_pnts(self.ui.z_cutoff.value(),'show'))
self.ui.apply_z_cutoff.clicked.connect(lambda: self.reduce_pnts(self.ui.z_cutoff.value(),None))
self.ui.impose_norm_cutoff.clicked.connect(lambda: self.norm_cutoff('show'))
self.ui.apply_norm_cutoff.clicked.connect(lambda: self.norm_cutoff(None))
self.ui.genOutlineButton.clicked.connect(lambda: self.process_outline('show'))
self.ui.accept_outline.clicked.connect(lambda: self.process_outline(None))
self.ui.impose_rotation.clicked.connect(lambda: self.rotate(self.ui.rotateZ.value()))
self.ui.auto_rotate.clicked.connect(lambda: self.rotate(None))
self.ui.showButton.clicked.connect(lambda: self.load_mat())
self.ui.floatButton.clicked.connect(lambda: self.deactivate_rotation(True))
self.ui.refButton.clicked.connect(lambda: self.deactivate_rotation(False))
def deactivate_rotation(self,state):
if state:
self.ui.auto_rotate.setEnabled(False)
self.ui.impose_rotation.setEnabled(False)
else:
self.ui.auto_rotate.setEnabled(True)
self.ui.impose_rotation.setEnabled(True)
def rotate(self,value):
'''
If no outline available, inform user on first call
If an outline and a value provided, rotate both outline and surface
If an outline and no value (None) then align based on *currrent* bounding box so that longest side is aligned to the x axis.
'''
if not hasattr(self,'outlineActor'):
msg=QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Generate outline first.")
msg.setWindowTitle("pyCM Error")
msg.exec_()
return
#move outline to centroid
color=(70, 171, 176)
centroid = np.mean(self.Outline, axis = 0)
self.ren.RemoveActor(self.pointActor)
self.ren.RemoveActor(self.outlineActor)
self.Outline = self.Outline - centroid
self.rawPnts = self.rawPnts - centroid
if value == None:
#Calculate 2D corners
d=np.array([])
for j in range(len(self.Outline[:,0])):
d=np.append(d,
np.sqrt((self.limits[0]-self.Outline[j,0])**2+(self.limits[2]-self.Outline[j,1])**2)
)
ind=np.where(d==np.amin(d))[0][0] #to avoid making ind an array
#reorder the points so that ind is first
self.Outline=np.vstack((self.Outline[ind::,:],self.Outline[0:ind+1,:]))
c_target=np.array([
[self.limits[0],self.limits[3]], #xmin,ymax
[self.limits[1],self.limits[3]], #xmax,ymax
[self.limits[1],self.limits[2]] #xmax,ymin
])
ind=np.array([])
for i in c_target:
d=np.array([])
for j in range(len(self.Outline[:,0])):
d=np.append(d,
np.sqrt((i[0]-self.Outline[j,0])**2+(i[1]-self.Outline[j,1])**2)
)
ind=np.append(ind,np.where(d==np.amin(d)))
corners = self.Outline[np.sort(np.append(ind,0)).astype(int),:]
#calculate side lengths - follow standard 2D element face numbering
s1 = corners[1,:] - corners[0,:]
s2 = corners[2,:] - corners[1,:]
s3 = corners[3,:] - corners[2,:]
s4 = corners[0,:] - corners[3,:]
s = np.vstack((s1,s2,s3,s4))
mag = np.sqrt((s*s).sum(axis=1))
#find u (x axis, longest)
u = s[mag == np.amax(mag),:][0]
u = u/np.linalg.norm(u)
#v vector will be the cross product of u and z axis
v = np.cross(u,[0,0,1])
#normalize
v = v/np.linalg.norm(v)
#make rotation matrix
R = np.array([[u[0],v[0], 0],[u[1],v[1], 0],[0,0,1]] )
else:
a=np.deg2rad(float(-value)) #negative for clockwise
R = np.identity(3)
R[0:2,0:2]=np.array([[np.cos(a),-np.sin(a)],[np.sin(a),np.cos(a)]])
self.Outline = R @ self.Outline.T
self.Outline = self.Outline.T + centroid
self.rawPnts = R @ self.rawPnts.T
self.rawPnts = self.rawPnts.T + centroid
#update both outline and actors
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
self.outlineActor, _ =gen_outline(self.Outline,tuple(np.array(color)/float(255)),self.PointSize)
#modify point coloration based on mask
#find points to be painted red
localind=np.asarray(range(len(self.bool_pnt)))
localind=localind[np.where(np.logical_not(self.bool_pnt))]
for i in localind:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ren.AddActor(self.pointActor)
self.ren.AddActor(self.outlineActor)
#get limits
self.limits = get_limits(self.rawPnts)
s,nl,axs=self.get_scale()
self.pointActor.SetScale(s)
# self.outlineActor.SetScale(s)
self.pointActor.Modified()
self.outlineActor.Modified()
self.ren.RemoveActor(self.axisActor)
self.axisActor = add_axis(self.ren,nl,axs)
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def svd(self,dir,reverse):
'''
Moves point cloud and outline to the centroid of the point cloud, finds SVD difference between X & Y axes of masked point cloud, and applies transformation, and then moves it back to the starting point.
'''
color=(70, 171, 176)
self.ren.RemoveActor(self.pointActor)
self.ren.RemoveActor(self.outlineActor)
#then move all points to have centroid at x,y=0
#get translation vector
t=np.mean(self.rawPnts,axis=0)
RP=self.rawPnts
RP[:,0]=RP[:,0]-t[0]
RP[:,1]=RP[:,1]-t[1]
RP[:,2]=RP[:,2]-t[2]
OP=self.Outline
OP[:,0]=OP[:,0]-t[0]
OP[:,1]=OP[:,1]-t[1]
OP[:,2]=OP[:,2]-t[2]
#debug
# _,_,vh = np.linalg.svd(RP) #vh is transpose from MATLAB's svd, returns normalised vectors
# #rows of vh are orthnormal vectors
# # print('X:',vh[0,:] / np.linalg.norm(vh[0,:]))
# # print('Y:',vh[1,:] / np.linalg.norm(vh[1,:]))
# # print('Z:',vh[2,:] / np.linalg.norm(vh[2,:]))
# #handles the case if the dataset is net convex vs. concave
# if vh[2,-1]<0:
# c=np.array([0,0,-1])
# else:
# c=np.array([0,0,1])
# vh_y_norm = np.array([vh[2,0],0,vh[2,2]]) / np.linalg.norm(np.array([vh[2,0],0,vh[2,2]])) #xz plane projection
# vh_x_norm = np.array([0,vh[2,1],vh[2,2]]) / np.linalg.norm(np.array([0,vh[2,1],vh[2,2]])) #yz plane projection
# #solve for angle, update console
# a_y=np.arccos(np.clip(np.dot(vh_y_norm,c), -1.0, 1.0))
# a_x=np.arccos(np.clip(np.dot(vh_x_norm,c), -1.0, 1.0))
# print('SVD difference about X and Y axis in degrees prior to transform:\n'a_x*57.3,a_y*57.3)
# Ry=np.matrix([[np.cos(-a_y),0,np.sin(-a_y)],[0,1,0],[-np.sin(-a_y),0,np.cos(-a_y)]])
# Rx=np.matrix([[1,0,0],[0,np.cos(-a_x),-np.sin(-a_x)],[0,np.sin(-a_x),np.cos(-a_x)]])
#debug
# if hasattr(self,'svd_arrow_actor'):
# self.ren.RemoveActor(self.svd_arrow_actor)
# self.ren.RemoveActor(self.ref1_arrow_actor)
# self.ren.RemoveActor(self.ref2_arrow_actor)
#arrow size is 10% max size of domain
# asize=np.maximum(self.limits[1]-self.limits[0],self.limits[3]-self.limits[2])*0.10
# self.svd_arrow_actor=draw_arrow(t,asize,-vh[2,:],self.ren,False,(1,0,0))
# self.ref1_arrow_actor=draw_arrow(t,asize,-vh[0,:],self.ren,False,(0,1,0)) #xaxis, green
# self.ref2_arrow_actor=draw_arrow(t,asize,-vh[1,:],self.ren,False,(0,0,3)) #yaxis, blue
#find rotation and pickup which rotation to apply based on masked points
print('Before SVD:')
Rx0,Ry0=get_svd_rotation_matrix(RP[self.bool_pnt,:])
if reverse:
Rx0,Ry0=np.linalg.inv(Rx0),np.linalg.inv(Ry0)
if dir == 'y':
RP = Ry0*RP.T
OP = Ry0*OP.T
else:
RP = Rx0*RP.T
OP = Ry0*OP.T
RP = RP.T
OP = OP.T
#check rotation
print('After SVD:')
Rx1,Ry1=get_svd_rotation_matrix(RP[self.bool_pnt,:])
# #add translation back on
RP[:,0]=RP[:,0]+t[0]
RP[:,1]=RP[:,1]+t[1]
RP[:,2]=RP[:,2]+t[2]
OP[:,0]=OP[:,0]+t[0]
OP[:,1]=OP[:,1]+t[1]
OP[:,2]=OP[:,2]+t[2]
#update status UI
if np.allclose(Rx1,np.eye(3)) and np.allclose(Ry1,np.eye(3)):
#returned identity matrix and therefore 'aligned'
self.ui.statLabel.setText("SVD completed. See console for results.")
#update everything
self.rawPnts = np.asarray(RP)
self.Outline = np.asarray(OP)
#update both outline and actors
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
#modify point coloration based on mask
#find points to be painted red
localind=np.asarray(range(len(self.bool_pnt)))
localind=localind[np.where(np.logical_not(self.bool_pnt))]
for i in localind:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ren.AddActor(self.pointActor)
self.ren.AddActor(self.outlineActor)
s,nl,axs=self.get_scale()
self.pointActor.SetScale(s)
self.outlineActor.SetScale(s)
self.pointActor.Modified()
self.outlineActor.Modified()
self.ren.RemoveActor(self.axisActor)
self.axisActor = add_axis(self.ren,nl,axs)
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def undo_revert(self):
'''
Reloads all data based on filec & filep (if it exists), will re-initialize data read in from results file to be unmasked.
'''
try:
if self.filep == 'Not applicable':
self.get_input_data(self.filec,None)
else:
self.get_input_data(self.filep,self.filec)
self.unsaved_changes=True
except: #its been loaded from an existing results file
ret=QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"Existing mask of profile will be lost, continue?", \
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if ret == QtWidgets.QMessageBox.No: #don't overwrite
return
else:
#flip all values in bool_pnt & update color
localind=np.asarray(range(len(self.bool_pnt)))
localind=localind[np.where(np.logical_not(self.bool_pnt))]
for i in localind:
#show them as being unmasked
self.colors.SetTuple(i,(70, 171, 176))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
#re-initialise the mask
self.bool_pnt=np.ones(self.bool_pnt.shape,dtype='bool')
#set flag on ui to show that data has been modified
self.unsaved_changes=True
self.manage_tri()
def level_pnts(self):
'''
Translates outline and profile by the mean of z so that scaling occurs about 0.
'''
color=(70, 171, 176)
self.ren.RemoveActor(self.pointActor)
self.ren.RemoveActor(self.outlineActor)
#adjust to z mean of outline
self.Outline[:,2]=self.Outline[:,2]-np.mean(self.Outline[:,2])
#adjust to z mean of point cloud
self.rawPnts[:,2]=self.rawPnts[:,2]-np.mean(self.rawPnts[:,2])
self.outlineActor, _ =gen_outline(self.Outline,tuple(np.array(color)/float(255)),self.PointSize)
#get limits
try:
self.limits = get_limits(np.vstack((self.Outline,self.rawPnts)))
except:
self.limits = get_limits(self.rawPnts)
#add axes
self.ren.RemoveActor(self.axisActor)
self.axisActor = add_axis(self.ren,self.limits,[1,1,1])
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
self.ren.AddActor(self.pointActor)
self.ren.AddActor(self.outlineActor)
self.pointActor.Modified()
self.outlineActor.Modified()
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def reduce_pnts(self, z_value, state):
'''
Reduces or shows the number of points to be permanently discarded:
If no z_value: according to the percentage of what's in the spinbox
0 -> means nothing, 10 means leave 90 percent of the points.
If z_value: according to what's in the spin box
If state is 'show' then paint them coral, if state is None, remove them.
'''
localind=np.asarray(range(len(self.rawPnts)))
if z_value is None:
red = (100-float(self.ui.reduce.value()))/100
ind = np.linspace(0, len(self.rawPnts[self.bool_pnt,:])-1, num=int(red*len(self.rawPnts[self.bool_pnt,:])))
ind = localind[ind.astype(int)]
else:
ind=self.rawPnts[self.bool_pnt,-1] > z_value
if state == None: #remove points and redraw
self.rawPnts = self.rawPnts[ind,:]
self.bool_pnt = self.bool_pnt[ind]
self.ren.RemoveActor(self.pointActor)
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,(70, 171, 176),self.PointSize)
self.bool_pnt=np.ones(len(self.rawPnts), dtype=bool)
self.ren.AddActor(self.pointActor)
self.limits = get_limits(self.rawPnts)
s,nl,axs=self.get_scale()
self.manage_tri()
#find points to be painted red
localind=np.asarray(range(len(self.bool_pnt)))
localind=localind[np.where(np.logical_not(self.bool_pnt))]
for i in localind:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.pointActor.SetScale(s)
self.pointActor.Modified()
self.ren.RemoveActor(self.axisActor)
self.axisActor = add_axis(self.ren,nl,axs)
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
elif state == 'show':
for i in localind:#show the points that will dissappear
self.colors.SetTuple(i,(70, 171, 176))
for i in localind[np.invert(ind)]:
self.colors.SetTuple(i,(255,127,80))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
def manage_tri(self):
#debug
# print('Deleting triangulation.')
self.ui.triLabel.setStyleSheet("QLabel { background-color : gray; color : darkGray; }")
if hasattr(self,'tri'):
del self.tri
del self.tri_normals
def load_mat(self):
"""
Loads the content of a *.mat file pertaining to this particular step
"""
color=(70, 171, 176)
if self.ui.showRefButton.isChecked():
str_d='ref'
if self.ui.showFloatButton.isChecked():
str_d='float'
if hasattr(self,'pointActor'):
self.ren.RemoveActor(self.pointActor)
if hasattr(self,'outlineActor'):
self.ren.RemoveActor(self.outlineActor)
if not hasattr(self,'fileo'):
self.fileo, _, =get_file('*.mat')
if hasattr(self,'fileo'): #check variables
if self.fileo == None:
return
mat_contents = sio.loadmat(self.fileo)
#check contents
if 'ref' in mat_contents:
self.ui.refButton.setStyleSheet("background-color :rgb(77, 209, 97);")
self.refWritten = True
if 'float' in mat_contents:
self.ui.floatButton.setStyleSheet("background-color :rgb(77, 209, 97);")
self.floatWritten = True
try:
self.rawPnts=mat_contents[str_d]['rawPnts'][0][0]
self.bool_pnt=mat_contents[str_d]['mask'][0][0][0]
self.Outline=mat_contents[str_d]['x_out'][0][0]
self.outlineActor, _ =gen_outline(self.Outline,tuple(np.array(color)/float(255)),self.PointSize)
self.ren.AddActor(self.outlineActor)
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
#find points to be painted red
localind=np.asarray(range(len(self.bool_pnt)))
localind=localind[np.where(np.logical_not(self.bool_pnt))]
for i in localind:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ren.AddActor(self.pointActor)
except:
QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"The %s dataset could not be loaded."%(str_d))
#get limits
try:
self.limits = get_limits(np.vstack((self.Outline,self.rawPnts)))
except:
self.limits = get_limits(self.rawPnts)
#add axes
try: self.ren.RemoveActor(self.axisActor)
except: pass
self.axisActor = add_axis(self.ren,self.limits,[1,1,1])
#update
self.manage_tri()
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def write_new(self):
if self.ui.refButton.isChecked():
str_d='ref'
self.refWritten=True
if self.ui.floatButton.isChecked():
str_d='float'
self.floatWritten=True
if not hasattr(self,'fileo'):
self.fileo, _, = get_open_file('*.mat',os.getcwd())
if self.fileo:
x_o=self.rawPnts[self.bool_pnt,0]
y_o=self.rawPnts[self.bool_pnt,1]
z_o=self.rawPnts[self.bool_pnt,2]
sio.savemat(self.fileo,{str_d : {'x_out':self.Outline,'rawPnts':self.rawPnts,'mask': self.bool_pnt,'x':x_o,'y':y_o,'z':z_o,'fname':self.filec}})
if self.ui.refButton.isChecked():
self.ui.refButton.setStyleSheet("background-color :rgb(77, 209, 97);")
if self.ui.floatButton.isChecked():
self.ui.floatButton.setStyleSheet("background-color : rgb(77, 209, 97);")
#reset flag on ui to show that data has been modified
else:
if not self.fileo:
self.fileo, _, = get_open_file('*.mat',os.getcwd())
mat_vars=sio.whosmat(self.fileo)
if str_d in [item for sublist in mat_vars for item in sublist]: #tell the user that they might overwrite their data
ret=QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"There is already data for this step - doing this will invalidate all further existing analysis steps. Continue?", \
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if ret == QtWidgets.QMessageBox.No: #don't overwrite
return
mat_contents=sio.loadmat(self.fileo)
x_o=self.rawPnts[self.bool_pnt,0]
y_o=self.rawPnts[self.bool_pnt,1]
z_o=self.rawPnts[self.bool_pnt,2]
new={str_d : {'x_out':self.Outline,'rawPnts':self.rawPnts,'mask': self.bool_pnt,'x':x_o,'y':y_o,'z':z_o}}
mat_contents.update(new) #update the dictionary
if self.ui.refButton.isChecked():
self.ui.refButton.setStyleSheet("background-color : rgb(77, 209, 97);")
if self.ui.floatButton.isChecked():
self.ui.floatButton.setStyleSheet("background-color : rgb(77, 209, 97);")
sio.savemat(self.fileo,mat_contents)
#update status
self.ui.statLabel.setText("Wrote %s data to output file %s."%(str_d,self.fileo))
#check on write
if self.refWritten==True and self.floatWritten==True:
self.unsaved_changes=False
def undo_pick(self):
if hasattr(self,"lastSelectedIds"):
for i in range(self.lastSelectedIds.GetNumberOfTuples()):
#turn them from red to starting color
self.colors.SetTuple(self.lastSelectedIds.GetValue(i),(70, 171, 176))
self.bool_pnt[self.lastSelectedIds.GetValue(i)]=True
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
else:
self.ui.statLabel.setText("No picked selection to revert.")
self.manage_tri()
def picker_callback(self,obj,event):
extract = vtk.vtkExtractSelectedFrustum()
fPlanes=obj.GetFrustum() #collection of planes based on unscaled display
#scale frustum to account for the zaspect
scaledPlanes=vtk.vtkPlanes()
scaledNormals=vtk.vtkDoubleArray()
scaledNormals.SetNumberOfComponents(3)
scaledNormals.SetNumberOfTuples(6)
scaledOrigins=vtk.vtkPoints()
for j in range(6):
i=fPlanes.GetPlane(j)
k=i.GetOrigin()
q=i.GetNormal()
scaledOrigins.InsertNextPoint(k[0],k[1],k[2]/float(self.Zaspect))
scaledNormals.SetTuple(j,(q[0],q[1],q[2]*float(self.Zaspect)))
scaledPlanes.SetNormals(scaledNormals)
scaledPlanes.SetPoints(scaledOrigins)
extract.SetFrustum(scaledPlanes)
extract.SetInputData(self.vtkPntsPolyData)
extract.Update()
extracted = extract.GetOutput()
ids = vtk.vtkIdTypeArray()
ids = extracted.GetPointData().GetArray("vtkOriginalPointIds")
if ids:
#store them in an array for an undo operation
self.lastSelectedIds=ids
for i in range(ids.GetNumberOfTuples()):
#turn them red
self.colors.SetTuple(ids.GetValue(i),(255,0,0))
self.bool_pnt[ids.GetValue(i)]=False
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
#set flag on ui to show that data has been modified
self.unsaved_changes=True
self.manage_tri()
def show_picking(self):
#Updates when the 'r' button is pressed to provide a link between VTK & Qt hooks
if self.picking == True:
self.ui.pickActiveLabel.setStyleSheet("QLabel { background-color : red; color : white; }");
else:
self.ui.pickActiveLabel.setStyleSheet("QLabel { background-color : gray; color : darkGray; }");
def start_pick(self):
#Required to change interactor
style=vtk.vtkInteractorStyleRubberBandPick()
self.iren.SetInteractorStyle(style)
picker = vtk.vtkAreaPicker()
self.iren.SetPicker(picker)
picker.AddObserver("EndPickEvent", self.picker_callback)
def get_input_data(self,filep,filec):
'''
Read in a variety of different potential types of data, either a pair of files (outline/perimeter followed by point cloud) or an unregistered point cloud that requires outline processing. Can call activate_outline & generate a triagulation as required if unregistered.
'''
self.registered = True #whether or not an outline has been generated
self.activate_outline(False)
color=(70, 171, 176)
if hasattr(self,'pointActor'):
self.ren.RemoveActor(self.pointActor)
if hasattr(self,'outlineActor'):
self.ren.RemoveActor(self.outlineActor)
if hasattr(self,'rActor'):
self.ren.RemoveActor(self.rActor)
if hasattr(self,'fActor'):
self.ren.RemoveActor(self.fActor)
self.ui.levelButton.setChecked(False)
if filep is None:
filep,startdir=get_file('*.txt')
if filep is None:
return
if not(os.path.isfile(filep)):
print('Data file invalid.')
return
#test if filep returned a dat file
_, ext = os.path.splitext(filep)
if ext.lower() == '.dat':
#then this is a nanofocus type file
self.registered = False
#return focus
self.ui.vtkWidget.setFocus()
if filec is None and self.registered:
filec,startdir=get_file('*.txt',startdir) #get filec
#catch if cancel was pressed on file dialog or if a bad path was specified
if filec != None and not(os.path.isfile(filec)) and self.registered:
if hasattr(self,'vtkPntsPolyData'):
print('No file selected, retaining current data.')
else:
return
print('Loading data . . .')
if filep != None: #because filediag can be cancelled
#identify route based on delimiter and registration
if self.registered:
with open(filep) as f:
first_line = f.readline()
if ',' in first_line: #NAMRC formatted file
self.Outline=np.genfromtxt(filep,delimiter=",")
print('NAMRC outline data type recognised.')
else:
self.Outline=np.genfromtxt(filep)
self.outlineActor, _ =gen_outline(self.Outline,tuple(np.array(color)/float(255)),self.PointSize)
self.ren.AddActor(self.outlineActor)
self.filep=filep
else:
self.rawPnts=np.genfromtxt(filep,skip_header=1) / 1e3 #convert from micron to mm
self.filep = 'Not applicable'
self.filec = filep #to eliminate getting another file
#activate outline processing
self.activate_outline(True)
if self.registered:
_, ext = os.path.splitext(filec)
if ext.lower() == '.txt':
self.rawPnts=np.genfromtxt(filec)
elif ext.lower() == '.csv':
self.rawPnts=np.genfromtxt(filec,skip_header=1,delimiter=',',usecols=(0,1,2))
self.filec=filec
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,color,self.PointSize)
self.bool_pnt=np.ones(len(self.rawPnts), dtype=bool)
self.ren.AddActor(self.pointActor)
print('Data read.')
#get limits
try:
self.limits = get_limits(np.vstack((self.Outline,self.rawPnts)))
except:
self.limits = get_limits(self.rawPnts)
#add axes
try: self.ren.RemoveActor(self.axisActor)
except: pass
self.axisActor = add_axis(self.ren,self.limits,[1,1,1])
#update status
self.ui.statLabel.setText("Current perimeter file:%s Current point cloud file:%s"%(self.filep,self.filec))
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def activate_outline(self,state):
'''
(De)Activates outline processing
'''
if state:
self.ui.z_cutoff.setEnabled(True)
self.ui.impose_z_cutoff.setEnabled(True)
self.ui.norm_cutoff.setEnabled(True)
self.ui.impose_norm_cutoff.setEnabled(True)
self.ui.alpha_cutoff.setEnabled(True)
self.ui.genOutlineButton.setEnabled(True)
self.ui.apply_z_cutoff.setEnabled(True)
self.ui.apply_norm_cutoff.setEnabled(True)
self.ui.accept_outline.setEnabled(True)
self.ui.reduceButton.setEnabled(True)
self.ui.apply_reduce.setEnabled(True)
self.ui.reduce.setEnabled(True)
else:
self.ui.z_cutoff.setEnabled(False)
self.ui.impose_z_cutoff.setEnabled(False)
self.ui.norm_cutoff.setEnabled(False)
self.ui.impose_norm_cutoff.setEnabled(False)
self.ui.alpha_cutoff.setEnabled(False)
self.ui.genOutlineButton.setEnabled(False)
self.ui.apply_z_cutoff.setEnabled(False)
self.ui.apply_norm_cutoff.setEnabled(False)
self.ui.accept_outline.setEnabled(False)
self.ui.reduceButton.setEnabled(False)
self.ui.apply_reduce.setEnabled(False)
self.ui.reduce.setEnabled(False)
def norm_cutoff(self, state):
'''
Creates a triangulation if there isn't one already. Filters this based on normals of each triangle, and either paints points belonging to them coral, or removes them and updates raw_pnts and bool_pnts as necessary, depending on state. Similar operation to reduce_pnts
'''
if not hasattr(self,'tri'):
ret=QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"No triangulation of points recognised. This operation requires one and may take some time. Continue?", \
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)
if ret == QtWidgets.QMessageBox.No: #don't overwrite
return
else:
print('Calculating Delaunay . . .')
self.tri = Delaunay(self.rawPnts[:,0:2])
print('Delaunay complete')
print('Calculating triangulation normals . . .')
self.tri_normals, dist = normal_z(self.rawPnts,self.tri)
print('Normal calculation complete')
self.ui.triLabel.setStyleSheet("background-color :rgb(77, 209, 97);")
self.ui.alpha_cutoff.setValue(4*dist)
localind=np.asarray(range(len(self.rawPnts)))
filt_tri = self.tri_normals > self.ui.norm_cutoff.value()
ind = np.unique(self.tri.simplices[filt_tri,:].copy().flatten())
if state == None:
self.rawPnts = self.rawPnts[ind,:]
self.bool_pnt = self.bool_pnt[ind]
self.ren.RemoveActor(self.pointActor)
self.vtkPntsPolyData, \
self.pointActor, self.colors = \
gen_point_cloud(self.rawPnts,(70, 171, 176),self.PointSize)
self.bool_pnt=np.ones(len(self.rawPnts), dtype=bool)
self.ren.AddActor(self.pointActor)
self.limits = get_limits(self.rawPnts)
s,nl,axs=self.get_scale()
self.manage_tri()
for i in localind[np.where(np.logical_not(self.bool_pnt))]:
#turn them red
self.colors.SetTuple(i,(255,0,0))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.pointActor.SetScale(s)
self.pointActor.Modified()
try: self.ren.RemoveActor(self.axisActor)
except: pass
self.axisActor = add_axis(self.ren,nl,axs)
#update
self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
elif state == 'show':
for i in localind:#turn everything that will change blue
self.colors.SetTuple(i,(70, 171, 176))
#turn everything that will dissappear coral
for i in np.setdiff1d(localind,localind[ind]):
self.colors.SetTuple(i,(255,127,80))
self.vtkPntsPolyData.GetPointData().SetScalars(self.colors)
self.vtkPntsPolyData.Modified()
self.ui.vtkWidget.update()
def process_outline(self,state):
'''
Based on current *masked* rawPnts, call the outline processor in pyCommon and update the interactor to either show the resulting outline, or to impose it permanently writing the necessary data objects
'''
if not hasattr(self,'tri'):
ret=QtWidgets.QMessageBox.warning(self, "pyCM Warning", \
"No triangulation of points recognised. This operation requires one and may take some time. Continue?", \
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)
if ret == QtWidgets.QMessageBox.No: #don't overwrite
return
else:
print('Calculating Delaunay . . .')
self.tri = Delaunay(self.rawPnts[self.bool_pnt][:,0:2])
print('Delaunay complete')
self.tri_normals,dist = normal_z(self.rawPnts,self.tri)
self.ui.triLabel.setStyleSheet("background-color :rgb(77, 209, 97);")
if self.ui.alpha_cutoff.value() == 0:
self.ui.alpha_cutoff.setValue(4*dist)
if state == 'show':
#if it has an outline already, remove it
if hasattr(self,'outlineActor'):
self.ren.RemoveActor(self.outlineActor)
if 'Delaunay' in sys.modules: print('Import happened.')
print('Calculating hull . . .')
# try:
chull = alpha_shape(self.rawPnts[self.bool_pnt][:,0:2],self.tri,self.ui.alpha_cutoff.value())
x,y = chull.exterior.coords.xy
# except Exception as e:
# print('Hull failed, try increasing cutoff.')
# print(e)
# return
print('Hull calculated.')
self.Outline = np.column_stack((x,y,np.zeros(len(x)))) #outline appears at z=0
self.outlineActor, _ =gen_outline(self.Outline,tuple(np.array((255,127,80))/float(255)),self.PointSize)
self.ren.AddActor(self.outlineActor)
else:
if hasattr(self,'outlineActor'):
self.outlineActor.GetProperty().SetColor(tuple(np.array((70, 171, 176))/float(255)))
else:
print('Calculating hull . . .')
try:
chull = alpha_shape(self.rawPnts[self.bool_pnt][:,0:2],self.tri,self.ui.alpha_cutoff.value())
x,y = chull.exterior.coords.xy
except:
print('Hull failed, try increasing cutoff.')
return
print('Hull calculated.')
self.Outline = np.column_stack((x,y,np.zeros(len(x)))) #outline appears at z=0
self.outlineActor, _ =gen_outline(self.Outline,tuple(np.array((70,171,176))/float(255)),self.PointSize)
self.ren.AddActor(self.outlineActor)
self.activate_outline(False)
#update
# self.ren.ResetCamera()
self.ui.vtkWidget.update()
self.ui.vtkWidget.setFocus()
def get_scale(self):
'''
Returns array for the keypress function based on what radio button is selected.
'''
if self.ui.xsButton.isChecked():
s=np.array([self.Zaspect,1,1])
nl=np.append([self.limits[0]*self.Zaspect,self.limits[1]*self.Zaspect],self.limits[2:])
axs=np.array([1/self.Zaspect,1,1])
elif self.ui.ysButton.isChecked():
s=np.array([1,self.Zaspect,1])
nl= | np.append(self.limits[0:2],([self.limits[2]*self.Zaspect,self.limits[3]*self.Zaspect],self.limits[4:])) | numpy.append |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gates that target four qubits."""
from typing import Optional, Union, Tuple
import numpy
import cirq
def state_swap_eigen_component(x: str, y: str, sign: int = 1):
"""The +/- eigen-component of the operation that swaps states x and y.
For example, state_swap_eigen_component('01', '10', ±1) returns
┌ ┐
│0 0 0 0│
│0 0.5 ±0.5 0│
│0 ±0.5 0.5 0│
│0 0 0 0│
└ ┘
Args:
x, y: The states to swap, as bitstrings.
sign: The sign of the off-diagonal elements (indicated by +/-1).
Returns: The eigen-component.
Raises:
ValueError:
* x and y have different lengths
* x or y contains a character other than '0' and '1'
* x and y are the same
* sign is not -1 or 1
TypeError: x or y is not a string
"""
if not (isinstance(x, str) and isinstance(y, str)):
raise TypeError('not (isinstance(x, str) and isinstance(y, str))')
if len(x) != len(y):
raise ValueError('len(x) != len(y)')
if set(x).union(y).difference('01'):
raise ValueError('Arguments must be 0-1 strings.')
if x == y:
raise ValueError('x == y')
if sign not in (-1, 1):
raise ValueError('sign not in (-1, 1)')
dim = 2 ** len(x)
i, j = int(x, 2), int(y, 2)
component = | numpy.zeros((dim, dim)) | numpy.zeros |
from __future__ import print_function, division
import numpy as np
from .helpers import ensure_rng, unique_rows
def _hashable(x):
""" ensure that an point is hashable by a python dict """
return tuple(map(float, x))
class TargetSpace(object):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.observe_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def __init__(self, target_func, pbounds, steps, constraints, constraintParams, extraParam, random_state=None):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self.random_state = ensure_rng(random_state)
# Some function to be optimized
self.target_func = target_func
# Get the name of the parameters
self.keys = list(pbounds.keys())
# Create an array with parameters bounds
self.bounds = np.array(list(pbounds.values()), dtype=np.float)
self.steps = np.array(list(steps.values()), dtype=np.float)
self.constraints = constraints
self.constraintParams = constraintParams
self.extraParam = extraParam
# Find number of parameters
self.dim = len(self.keys)
# preallocated memory for X and Y points
self._Xarr = None
self._Yarr = None
# Number of observations
self._length = 0
# Views of the preallocated arrays showing only populated data
self._Xview = None
self._Yview = None
self._cache = {} # keep track of unique points we have seen so far
@property
def X(self):
return self._Xview
@property
def Y(self):
return self._Yview
def __contains__(self, x):
return _hashable(x) in self._cache
def __len__(self):
return self._length
def _dict_to_points(self, points_dict):
"""
Example:
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> points_dict = {'p1': [0, .5, 1], 'p2': [0, 1, 2]}
>>> space._dict_to_points(points_dict)
[[0, 0], [1, 0.5], [2, 1]]
"""
# Consistency check
param_tup_lens = []
for key in self.keys:
param_tup_lens.append(len(list(points_dict[key])))
if all([e == param_tup_lens[0] for e in param_tup_lens]):
pass
else:
raise ValueError('The same number of initialization points '
'must be entered for every parameter.')
# Turn into list of lists
all_points = []
for key in self.keys:
all_points.append(points_dict[key])
# Take transpose of list
points = list(map(list, zip(*all_points)))
return points
def observe_point(self, x):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = np.asarray(x).ravel()
assert x.size == self.dim, 'x must have the same dimensions'
if x in self:
# Lookup previously seen point
y = self._cache[_hashable(x)]
else:
# measure the target function
params = dict(zip(self.keys, x))
y = self.target_func(self.extraParam,**params)
self.add_observation(x, y)
return y
def add_observation(self, x, y):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
if x in self:
raise KeyError('Data point {} is not unique'.format(x))
if self._length >= self._n_alloc_rows:
self._allocate((self._length + 1) * 2)
x = np.asarray(x).ravel()
# Insert data into unique dictionary
self._cache[_hashable(x)] = y
# Insert data into preallocated arrays
self._Xarr[self._length] = x
self._Yarr[self._length] = y
# Expand views to encompass the new data point
self._length += 1
# Create views of the data
self._Xview = self._Xarr[:self._length]
self._Yview = self._Yarr[:self._length]
def _allocate(self, num):
"""
Allocate enough memory to store `num` points
"""
if num <= self._n_alloc_rows:
raise ValueError('num must be larger than current array length')
self._assert_internal_invariants()
# Allocate new memory
_Xnew = np.empty((num, self.bounds.shape[0]))
_Ynew = np.empty(num)
# Copy the old data into the new
if self._Xarr is not None:
_Xnew[:self._length] = self._Xarr[:self._length]
_Ynew[:self._length] = self._Yarr[:self._length]
self._Xarr = _Xnew
self._Yarr = _Ynew
# Create views of the data
self._Xview = self._Xarr[:self._length]
self._Yview = self._Yarr[:self._length]
@property
def _n_alloc_rows(self):
""" Number of allocated rows """
return 0 if self._Xarr is None else self._Xarr.shape[0]
def random_points(self, num):
"""
Creates random points within the bounds of the space
Parameters
----------
num : int
Number of random points to create
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self.keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(3)
array([[ 55.33253689, 0.54488318],
[ 71.80374727, 0.4236548 ],
[ 60.67357423, 0.64589411]])
"""
# TODO: support integer, category, and basic scipy.optimize constraints
data = | np.empty((num, self.dim)) | numpy.empty |
from GA_TOPMD import GaTopMd
from PSO_TOP import PSO
import gc
from datetime import datetime
import os
import re
import numpy as np
paths = [
'GATOPMD/mapas/artigo/mapa_4r_40_1d.txt',
]
prizes = [
'GATOPMD/mapas/artigo/premio_4r_40_1d.txt',
]
size_population = [.1,
]
costs = [
[20, 23, 25, 30],
]
points_init = [
[0, 0, 0, 0],
]
points_end = [
[0, 0, 0, 0],
]
deposits = [
[0, 1, 2, 3, 4],
]
number_executions = 30
main_path = './GATOPMD/Result/'
data = datetime.now()
execucao = str(data.strftime(("%d-%m-%Y_%H-%M-%S_execucao")))
result_folder = main_path + '' + 'grafico'
os.mkdir(result_folder)
print(os.getcwd())
for i in range(len(paths)):
name = 'path_' + str(i + 1)
path_current = paths[i]
prize_current = prizes[i]
cost_current = costs[i]
current_init = points_init[i]
current_end = points_end[i]
current_deposits = deposits[i]
population_current = size_population[i]
# ga_execution = GaTopMd(
# generation=1000,
# population=100,
# limit_population=20,
# crossover_rate= .6,
# mutation_rate=.8,
# cost_rate=2,
# prizes_rate=5,
# map_points=path_current,
# prizes=prize_current,
# max_cost=cost_current,
# start_point=current_init,
# end_point=current_end,
# depositos=current_deposits)
folder_cenary = result_folder + '/results_' + re.findall('([\w]+)\.', path_current)[0]
folder_chart = folder_cenary+'/charts'+name
if not os.path.exists(folder_cenary):
os.mkdir(folder_cenary)
if not os.path.exists(folder_chart):
os.mkdir(folder_chart)
with open(folder_cenary + '/Results_Execution.txt', 'a+') as out:
out.write('Cenario: ' + path_current + '\n')
print('Cenario: ' + path_current + '\n')
with open(folder_cenary + '/Results_Execution_melhor_elemento_custo_premio.csv', 'a+') as out:
out.write(name + '\n')
for numberExecution in range(number_executions):
pso_execution = PSO(
iterations=1,
size_population=1,
beta=.3,
alfa=.8,
cost_rate=2,
prizes_rate=5,
map_points=path_current,
prizes=prize_current,
max_cost=cost_current,
start_point=current_init,
end_point=current_end,
depositos=current_deposits)
print('####### Inicio Execucao: ' + str(numberExecution))
gbest, primeiro, ultimo = pso_execution.run()
mapaa = list()
mapaa.append(np.fromstring('0, 19, 18, 12, 11, 7, 8, 13, 0', dtype=int, sep=','))
mapaa.append(np.fromstring('0, 20, 14, 9, 5, 15, 16, 21, 24, 0', dtype=int, sep=','))
mapaa.append(np.fromstring('0, 28, 29, 27, 34, 33, 37, 41, 38, 0', dtype=int, sep=','))
mapaa.append(np.fromstring('0, 25, 31, 32, 26, 40, 39, 43, 44, 36, 30, 0', dtype=int, sep=','))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
1))
mapaa = list()
mapaa.append(np.fromstring('0, 35, 38, 41, 37, 34, 27, 29, 28, 0', dtype=int, sep=','))
mapaa.append(np.fromstring('0, 13, 8, 7, 11, 6, 12, 23, 18, 19, 0', dtype=int, sep=','))
mapaa.append(np.fromstring('0, 30, 36, 44, 43, 39, 40, 26, 32, 31, 25, 0', dtype=int, sep=','))
mapaa.append(np.fromstring('', dtype=int, sep=','))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
2))
mapaa = list()
mapaa.append(np.fromstring('0, 23, 18, 19, 13, 8, 7, 11, 12, 6, 1', dtype=int, sep=','))
mapaa.append(np.fromstring('0, 20, 14, 9, 5, 15, 16, 21, 17, 10, 2', dtype=int, sep=','))
mapaa.append(np.fromstring('0, 28, 35, 42, 41, 38, 34, 29, 27, 33, 37, 3', dtype=int, sep=','))
mapaa.append(np.fromstring('0, 25, 24, 26, 32, 31, 30, 36, 44, 43, 39, 40, 4', dtype=int, sep=','))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
3))
mapaa = list()
mapaa.append(np.fromstring('0 14 9 5 15 20 0', dtype=int, sep=' '))
mapaa.append(np.fromstring('0 13 7 11 6 12 18 19 0', dtype=int, sep=' '))
mapaa.append(np.fromstring('0 28 29 34 38 41 37 33 27 0', dtype=int, sep=' '))
mapaa.append(np.fromstring('0 30 31 43 39 40 26 25 24 0', dtype=int, sep=' '))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
4))
mapaa = list()
mapaa.append(np.fromstring('0 13 7 11 6 12 18 19 0', dtype=int, sep=' '))
mapaa.append(np.fromstring('0 28 29 34 38 41 37 33 27 0', dtype=int, sep=' '))
mapaa.append(np.fromstring('0 30 44 43 39 40 26 31 25 0', dtype=int, sep=' '))
pso_execution.plota_rotas_TOP(cidades=pso_execution.map_points, rota=mapaa, file_plot=True,
name_file_plot=folder_chart + '/Plot_Path_melhor_elemento_' + name + '_execution_' + str(
5))
mapaa = list()
mapaa.append( | np.fromstring('0 23 18 19 13 8 7 11 12 6 1', dtype=int, sep=' ') | numpy.fromstring |
#!/usr/bin/env python
"""Very simple SVG rasterizer
NOT SUPPORTED:
- markers
- symbol
- color-interpolation and filter-color-interpolation attributes
PARTIALLY SUPPORTED:
- text (textPath is not supported)
- fonts
- font resolution logic is very basic
- style font attribute is not parsed only font-* attrs are supported
KNOWN PROBLEMS:
- multiple pathes over going over the same pixels are breakin antialising
(would draw all pixels with multiplied AA coverage (clamped)).
"""
from __future__ import annotations
import builtins
import gzip
import io
import math
import numpy as np
import numpy.typing as npt
import os
import re
import struct
import sys
import textwrap
import time
import warnings
import xml.etree.ElementTree as etree
import zlib
from functools import reduce, partial
from typing import Any, Callable, NamedTuple, List, Tuple, Optional, Dict
EPSILON = sys.float_info.epsilon
FLOAT_RE = re.compile(r"[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?")
FLOAT = np.float64
# ------------------------------------------------------------------------------
# Layer
# ------------------------------------------------------------------------------
COMPOSE_OVER = 0
COMPOSE_OUT = 1
COMPOSE_IN = 2
COMPOSE_ATOP = 3
COMPOSE_XOR = 4
COMPOSE_PRE_ALPHA = {COMPOSE_OVER, COMPOSE_OUT, COMPOSE_IN, COMPOSE_ATOP, COMPOSE_XOR}
BBox = Tuple[float, float, float, float]
FNDArray = npt.NDArray[FLOAT]
class Layer(NamedTuple):
image: np.ndarray[Tuple[int, int, int], FLOAT]
offset: Tuple[int, int]
pre_alpha: bool
linear_rgb: bool
@property
def x(self) -> int:
return self.offset[0]
@property
def y(self) -> int:
return self.offset[1]
@property
def width(self) -> int:
return self.image.shape[1]
@property
def height(self) -> int:
return self.image.shape[0]
@property
def channels(self) -> int:
return self.image.shape[2]
@property
def bbox(self) -> BBox:
return (*self.offset, *self.image.shape[:2])
def translate(self, x: int, y: int) -> Layer:
offset = (self.x + x, self.y + y)
return Layer(self.image, offset, self.pre_alpha, self.linear_rgb)
def color_matrix(self, matrix: np.ndarray) -> Layer:
"""Apply color matrix transformation"""
if not isinstance(matrix, np.ndarray) or matrix.shape != (4, 5):
raise ValueError("expected 4x5 matrix")
layer = self.convert(pre_alpha=False, linear_rgb=True)
M = matrix[:, :4]
B = matrix[:, 4]
image = np.matmul(layer.image, M.T) + B
np.clip(image, 0, 1, out=image)
return Layer(image, layer.offset, pre_alpha=False, linear_rgb=True)
def convolve(self, kernel: np.ndarray) -> Layer:
"""Convlve layer"""
try:
from scipy.signal import convolve
layer = self.convert(pre_alpha=False, linear_rgb=True)
kw, kh = kernel.shape
image = convolve(layer.image, kernel[..., None])
x, y = int(layer.x - kw / 2), int(layer.y - kh / 2)
return Layer(image, (x, y), pre_alpha=False, linear_rgb=True)
except ImportError:
warnings.warn("Layer::convolve requires `scipy`")
return self
def morphology(self, x: int, y: int, method: str) -> Layer:
"""Morphology filter operation
Morphology is essentially {min|max} pooling with [1, 1] stride
"""
layer = self.convert(pre_alpha=True, linear_rgb=True)
image = pooling(layer.image, ksize=(x, y), stride=(1, 1), method=method)
return Layer(image, layer.offset, pre_alpha=True, linear_rgb=True)
def convert(self, pre_alpha=None, linear_rgb=None) -> Layer:
"""Convert image if needed to specified alpha and colorspace"""
pre_alpha = self.pre_alpha if pre_alpha is None else pre_alpha
linear_rgb = self.linear_rgb if linear_rgb is None else linear_rgb
if self.channels == 1:
# single channel value assumed to be alpha
return Layer(self.image, self.offset, pre_alpha, linear_rgb)
in_image, out_offset, out_pre_alpha, out_linear_rgb = self
out_image = None
if out_linear_rgb != linear_rgb:
out_image = in_image.copy()
# convert to straight alpha first if needed
if out_pre_alpha:
out_image = color_pre_to_straight_alpha(out_image)
out_pre_alpha = False
if linear_rgb:
out_image = color_srgb_to_linear(out_image)
else:
out_image = color_linear_to_srgb(out_image)
out_linear_rgb = linear_rgb
if out_pre_alpha != pre_alpha:
if out_image is None:
out_image = in_image.copy()
if pre_alpha:
out_image = color_straight_to_pre_alpha(out_image)
else:
out_image = color_pre_to_straight_alpha(out_image)
out_pre_alpha = pre_alpha
if out_image is None:
return self
return Layer(out_image, out_offset, out_pre_alpha, out_linear_rgb)
def background(self, color: np.ndarray) -> Layer:
layer = self.convert(pre_alpha=True, linear_rgb=True)
image = canvas_compose(COMPOSE_OVER, color[None, None, ...], layer.image)
return Layer(image, layer.offset, pre_alpha=True, linear_rgb=True)
def opacity(self, opacity: float, linear_rgb=False) -> Layer:
"""Apply additinal opacity"""
layer = self.convert(pre_alpha=True, linear_rgb=linear_rgb)
image = layer.image * opacity
return Layer(image, layer.offset, pre_alpha=True, linear_rgb=linear_rgb)
@staticmethod
def compose(layers: List[Layer], method=COMPOSE_OVER, linear_rgb=False) -> Optional[Layer]:
"""Compose multiple layers into one with specified `method`
Composition in linear RGB is correct one but SVG composes in sRGB
by default. Only filter is composing in linear RGB by default.
"""
if not layers:
return None
elif len(layers) == 1:
return layers[0]
images = []
pre_alpha = method in COMPOSE_PRE_ALPHA
for layer in layers:
layer = layer.convert(pre_alpha=pre_alpha, linear_rgb=linear_rgb)
images.append((layer.image, layer.offset))
#print([i[0].shape for i in images])
blend = partial(canvas_compose, method)
if method == COMPOSE_IN:
result = canvas_merge_intersect(images, blend)
elif method == COMPOSE_OVER:
start = time.time()
result = canvas_merge_union(images, full=False, blend=blend)
print("render from image,offset pair take:",time.time()-start)
else:
result = canvas_merge_union(images, full=True, blend=blend)
if result is None:
return None
image, offset = result
return Layer(image, offset, pre_alpha=pre_alpha, linear_rgb=linear_rgb)
def write_png(self, output=None):
if self.channels != 4:
raise ValueError("Only RGBA layers are supported")
layer = self.convert(pre_alpha=False, linear_rgb=False)
return canvas_to_png(layer.image, output)
def __repr__(self):
return "Layer(x={}, y={}, w={}, h={}, pre_alpha={}, linear_rgb={})".format(
self.x, self.y, self.width, self.height, self.pre_alpha, self.linear_rgb
)
def show(self, format=None):
"""Show layer on terminal if `imshow` if available
NOTE: used only for debugging
"""
try:
from imshow import show
layer = self.convert(pre_alpha=False, linear_rgb=False)
show(layer.image, format=format)
except ImportError:
warnings.warn("to be able to show layer on terminal imshow is required")
def canvas_create(width, height, bg=None):
"""Create canvas of a specified size
Returns (canvas, transform) tuple:
canvas - float64 ndarray of (height, width, 4) shape
transform - transform from (x, y) to canvas pixel coordinates
"""
if bg is None:
canvas = np.zeros((height, width, 4), dtype=FLOAT)
else:
canvas = np.broadcast_to(bg, (height, width, 4)).copy()
return canvas, Transform().matrix(0, 1, 0, 1, 0, 0)
def canvas_to_png(canvas, output=None):
"""Convert (height, width, rgba{float64}) to PNG"""
def png_pack(output, tag, data):
checksum = 0xFFFFFFFF & zlib.crc32(data, zlib.crc32(tag))
output.write(struct.pack("!I", len(data)))
output.write(tag)
output.write(data)
output.write(struct.pack("!I", checksum))
height, width, _ = canvas.shape
data = io.BytesIO()
comp = zlib.compressobj(level=9)
for row in np.round(canvas * 255.0).astype(np.uint8):
data.write(comp.compress(b"\x00"))
data.write(comp.compress(row.tobytes()))
data.write(comp.flush())
output = io.BytesIO() if output is None else output
output.write(b"\x89PNG\r\n\x1a\n")
png_pack(output, b"IHDR", struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(output, b"IDAT", data.getvalue()),
png_pack(output, b"IEND", b"")
return output
def canvas_compose(mode, dst, src):
"""Compose two alpha premultiplied images
https://ciechanow.ski/alpha-compositing/
http://ssp.impulsetrain.com/porterduff.html
"""
src_a = src[..., -1:] if len(src.shape) == 3 else src
dst_a = dst[..., -1:] if len(dst.shape) == 3 else dst
if mode == COMPOSE_OVER:
return src + dst * (1 - src_a)
elif mode == COMPOSE_OUT:
return src * (1 - dst_a)
elif mode == COMPOSE_IN:
return src * dst_a
elif mode == COMPOSE_ATOP:
return src * dst_a + dst * (1 - src_a)
elif mode == COMPOSE_XOR:
return src * (1 - dst_a) + dst * (1 - src_a)
elif isinstance(mode, tuple) and len(mode) == 4:
k1, k2, k3, k4 = mode
return (k1 * src * dst + k2 * src + k3 * dst + k4).clip(0, 1)
raise ValueError(f"invalid compose mode: {mode}")
canvas_compose_over = partial(canvas_compose, COMPOSE_OVER)
def canvas_merge_at(base, overlay, offset, blend=canvas_compose_over):
"""Alpha blend `overlay` on top of `base` at offset coordintate
Updates `base` with `overlay` in place.
"""
x, y = offset
b_h, b_w = base.shape[:2]
o_h, o_w = overlay.shape[:2]
clip = lambda v, l, h: l if v < l else h if v > h else v
b_x_low, b_x_high = clip(x, 0, b_h), clip(x + o_h, 0, b_h)
b_y_low, b_y_high = clip(y, 0, b_w), clip(y + o_w, 0, b_w)
effected = base[b_x_low:b_x_high, b_y_low:b_y_high]
if effected.size == 0:
return
o_x_low, o_x_high = clip(-x, 0, o_h), clip(b_h - x, 0, o_h)
o_y_low, o_y_high = clip(-y, 0, o_w), clip(b_w - y, 0, o_w)
overlay = overlay[o_x_low:o_x_high, o_y_low:o_y_high]
if overlay.size == 0:
return
effected[...] = blend(effected, overlay).clip(0, 1)
return base
def canvas_merge_union(layers, full=True, blend=canvas_compose_over):
"""Blend multiple `layers` into single large enough image"""
if not layers:
raise ValueError("can not blend zero layers")
elif len(layers) == 1:
return layers[0]
min_x, min_y, max_x, max_y = None, None, None, None
for image, offset in layers:
x, y = offset
w, h = image.shape[:2]
if min_x is None:
min_x, min_y = x, y
max_x, max_y = x + w, y + h
else:
min_x, min_y = min(min_x, x), min(min_y, y)
max_x, max_y = max(max_x, x + w), max(max_y, y + h)
width, height = max_x - min_x, max_y - min_y
if full:
output = None
for image, offset in layers:
x, y = offset
w, h = image.shape[:2]
ox, oy = x - min_x, y - min_y
image_full = np.zeros((width, height, 4), dtype=FLOAT)
image_full[ox : ox + w, oy : oy + h] = image
if output is None:
output = image_full
else:
output = blend(output, image_full)
else:
# this is optimization for method `over` blending
output = np.zeros((max_x - min_x, max_y - min_y, 4), dtype=FLOAT)
for index, (image, offset) in enumerate(layers):
x, y = offset
w, h = image.shape[:2]
ox, oy = x - min_x, y - min_y
effected = output[ox : ox + w, oy : oy + h]
if index == 0:
effected[...] = image
else:
effected[...] = blend(effected, image)
return output, (min_x, min_y)
def canvas_merge_intersect(layers, blend=canvas_compose_over):
"""Blend multiple `layers` into single image coverd by all layers"""
if not layers:
raise ValueError("can not blend zero layers")
elif len(layers) == 1:
return layers[0]
min_x, min_y, max_x, max_y = None, None, None, None
for layer, offset in layers:
x, y = offset
w, h = layer.shape[:2]
if min_x is None:
min_x, min_y = x, y
max_x, max_y = x + w, y + h
else:
min_x, min_y = max(min_x, x), max(min_y, y)
max_x, max_y = min(max_x, x + w), min(max_y, y + h)
if min_x >= max_x or min_y >= max_y:
return None # empty intersection
(first, (fx, fy)), *rest = layers
output = first[min_x - fx : max_x - fx, min_y - fy : max_y - fy]
w, h, c = output.shape
if c == 1:
output = np.broadcast_to(output, (w, h, 4))
output = output.copy()
for layer, offset in rest:
x, y = offset
output[...] = blend(output, layer[min_x - x : max_x - x, min_y - y : max_y - y])
return output, (min_x, min_y)
def pooling(mat, ksize, stride=None, method="max", pad=False):
"""Overlapping pooling on 2D or 3D data.
<mat>: ndarray, input array to pool.
<ksize>: tuple of 2, kernel size in (ky, kx).
<stride>: tuple of 2 or None, stride of pooling window.
If None, same as <ksize> (non-overlapping pooling).
<method>: str, 'max for max-pooling,
'mean' for mean-pooling.
<pad>: bool, pad <mat> or not. If no pad, output has size
(n-f)//s+1, n being <mat> size, f being kernel size, s stride.
if pad, output has size ceil(n/s).
Return <result>: pooled matrix.
"""
m, n = mat.shape[:2]
ky, kx = ksize
if stride is None:
stride = (ky, kx)
sy, sx = stride
if pad:
nx = int(np.ceil(n / float(sx)))
ny = int(np.ceil(m / float(sy)))
size = ((ny - 1) * sy + ky, (nx - 1) * sx + kx) + mat.shape[2:]
mat_pad = np.full(size, np.nan)
mat_pad[:m, :n, ...] = mat
else:
mat_pad = mat[: (m - ky) // sy * sy + ky, : (n - kx) // sx * sx + kx, ...]
# Get a strided sub-matrices view of an ndarray.
s0, s1 = mat_pad.strides[:2]
m1, n1 = mat_pad.shape[:2]
m2, n2 = ksize
view_shape = (1 + (m1 - m2) // stride[0], 1 + (n1 - n2) // stride[1], m2, n2) + mat_pad.shape[
2:
]
strides = (stride[0] * s0, stride[1] * s1, s0, s1) + mat_pad.strides[2:]
view = np.lib.stride_tricks.as_strided(mat_pad, view_shape, strides=strides)
if method == "max":
result = np.nanmax(view, axis=(2, 3))
elif method == "min":
result = np.nanmin(view, axis=(2, 3))
elif method == "mean":
result = np.nanmean(view, axis=(2, 3))
else:
raise ValueError(f"invalid poll method: {method}")
return result
def color_pre_to_straight_alpha(rgba):
"""Convert from premultiplied alpha inplace"""
rgb = rgba[..., :-1]
alpha = rgba[..., -1:]
np.divide(rgb, alpha, out=rgb, where=alpha > 0.0001)
np.clip(rgba, 0, 1, out=rgba)
return rgba
def color_straight_to_pre_alpha(rgba):
"""Convert to premultiplied alpha inplace"""
rgba[..., :-1] *= rgba[..., -1:]
return rgba
def color_linear_to_srgb(rgba):
"""Convert pixels from linear RGB to sRGB inplace"""
rgb = rgba[..., :-1]
small = rgb <= 0.0031308
rgb[small] = rgb[small] * 12.92
large = ~small
rgb[large] = 1.055 * np.power(rgb[large], 1.0 / 2.4) - 0.055
return rgba
def color_srgb_to_linear(rgba):
"""Convert pixels from sRGB to linear RGB inplace"""
rgb = rgba[..., :-1]
small = rgb <= 0.04045
rgb[small] = rgb[small] / 12.92
large = ~small
rgb[large] = np.power((rgb[large] + 0.055) / 1.055, 2.4)
return rgba
# ------------------------------------------------------------------------------
# Transform
# ------------------------------------------------------------------------------
class Transform:
__slots__: List[str] = ["m", "_m_inv"]
m: np.ndarray[Tuple[int, int], FLOAT]
_m_inv: np.ndarray[Tuple[int, int], FLOAT]
def __init__(self, matrix=None, matrix_inv=None):
if matrix is None:
self.m = np.identity(3)
self._m_inv = self.m
else:
self.m = matrix
self._m_inv = matrix_inv
def __matmul__(self, other: Transform) -> Transform:
return Transform(self.m @ other.m)
@property
def invert(self) -> Transform:
if self._m_inv is None:
self._m_inv = np.linalg.inv(self.m)
return Transform(self._m_inv, self.m)
def __call__(self, points: FNDArray) -> FNDArray:
if len(points) == 0:
return points
return points @ self.m[:2, :2].T + self.m[:2, 2]
def apply(self) -> Callable[[FNDArray], FNDArray]:
M = self.m[:2, :2].T
B = self.m[:2, 2]
return lambda points: points @ M + B
def matrix(self, m00, m01, m02, m10, m11, m12):
return Transform(self.m @ np.array([[m00, m01, m02], [m10, m11, m12], [0, 0, 1]]))
def translate(self, tx: float, ty: float) -> Transform:
return Transform(self.m @ np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]))
def scale(self, sx, sy=None):
sy = sx if sy is None else sy
return Transform(self.m @ np.array([[sx, 0, 0], [0, sy, 0], [0, 0, 1]]))
def rotate(self, angle):
cos_a = math.cos(angle)
sin_a = math.sin(angle)
return Transform(self.m @ np.array([[cos_a, -sin_a, 0], [sin_a, cos_a, 0], [0, 0, 1]]))
def skew(self, ax, ay):
return Transform(
np.matmul(self.m, np.array([[1, math.tan(ax), 0], [math.tan(ay), 1, 0], [0, 0, 1]]))
)
def __repr__(self):
return str(np.around(self.m, 4).tolist()[:2])
def no_translate(self):
m = self.m.copy()
m[0, 2] = 0
m[1, 2] = 0
return Transform(m)
# ------------------------------------------------------------------------------
# Render scene
# ------------------------------------------------------------------------------
RENDER_FILL = 0
RENDER_STROKE = 1
RENDER_GROUP = 2
RENDER_OPACITY = 3
RENDER_CLIP = 4
RENDER_TRANSFORM = 5
RENDER_FILTER = 6
RENDER_MASK = 7
class Scene(tuple):
__slots__: List[str] = []
def __new__(cls, type, args):
return tuple.__new__(cls, (type, args))
@classmethod
def fill(cls, path, paint, fill_rule=None):
return cls(RENDER_FILL, (path, paint, fill_rule))
@classmethod
def stroke(cls, path, paint, width, linecap=None, linejoin=None):
return cls(RENDER_STROKE, (path, paint, width, linecap, linejoin))
@classmethod
def group(cls, children):
if not children:
raise ValueError("group have to contain at least one child")
if len(children) == 1:
return children[0]
return cls(RENDER_GROUP, children)
def opacity(self, opacity):
if opacity > 0.999:
return self
return Scene(RENDER_OPACITY, (self, opacity))
def clip(self, clip, bbox_units=False):
return Scene(RENDER_CLIP, (self, clip, bbox_units))
def mask(self, mask, bbox_units=False):
return Scene(RENDER_MASK, (self, mask, bbox_units))
def transform(self, transform):
type, args = self
if type == RENDER_TRANSFORM:
target, target_transform = args
return Scene(RENDER_TRANSFORM, (target, transform @ target_transform))
else:
return Scene(RENDER_TRANSFORM, (self, transform))
def filter(self, filter):
return Scene(RENDER_FILTER, (self, filter))
def render(self, transform, mask_only=False, viewport=None, linear_rgb=False):
"""Render graph"""
type, args = self
if type == RENDER_FILL:
path, paint, fill_rule = args
if mask_only:
return path.mask(transform, fill_rule=fill_rule, viewport=viewport)
else:
return path.fill(
transform, paint, fill_rule=fill_rule, viewport=viewport, linear_rgb=linear_rgb
)
elif type == RENDER_STROKE:
path, paint, width, linecap, linejoin = args
stroke = path.stroke(width, linecap, linejoin)
if mask_only:
return stroke.mask(transform, viewport=viewport)
else:
return stroke.fill(transform, paint, viewport=viewport, linear_rgb=linear_rgb)
elif type == RENDER_GROUP:
layers, hulls = [], []
start = time.time()
for child in args:
layer = child.render(transform, mask_only, viewport, linear_rgb)
if layer is None:
continue
layer, hull = layer
layers.append(layer)
hulls.append(hull)
group = Layer.compose(layers, COMPOSE_OVER, linear_rgb)
if not group:
return None
return group, ConvexHull.merge(hulls)
elif type == RENDER_OPACITY:
target, opacity = args
layer = target.render(transform, mask_only, viewport, linear_rgb)
if layer is None:
return None
layer, hull = layer
return layer.opacity(opacity, linear_rgb), hull
elif type == RENDER_CLIP:
target, clip, bbox_units = args
image_result = target.render(transform, mask_only, viewport, linear_rgb)
if image_result is None:
return None
image, hull = image_result
if bbox_units:
transform = hull.bbox_transform(transform)
clip_result = clip.render(transform, True, viewport, linear_rgb)
if clip_result is None:
return None
mask, _ = clip_result
result = Layer.compose([mask, image], COMPOSE_IN, linear_rgb)
if result is None:
return None
return result, hull
elif type == RENDER_TRANSFORM:
target, target_transfrom = args
return target.render(transform @ target_transfrom, mask_only, viewport, linear_rgb)
elif type == RENDER_MASK:
target, mask_scene, bbox_units = args
image_result = target.render(transform, mask_only, viewport, linear_rgb)
if image_result is None:
return None
image, hull = image_result
if bbox_units:
transform = hull.bbox_transform(transform)
mask_result = mask_scene.render(transform, mask_only, viewport, linear_rgb)
if mask_result is None:
return None
mask, _ = mask_result
mask = mask.convert(pre_alpha=False, linear_rgb=linear_rgb)
mask_image = mask.image[..., :3] @ [0.2125, 0.7154, 0.072] * mask.image[..., 3]
mask = Layer(mask_image[..., None], mask.offset, pre_alpha=False, linear_rgb=linear_rgb)
result = Layer.compose([mask, image], COMPOSE_IN, linear_rgb)
if result is None:
return None
return result, hull
elif type == RENDER_FILTER:
target, filter = args
image_result = target.render(transform, mask_only, viewport, linear_rgb)
if image_result is None:
return None
image, hull = image_result
return filter(transform, image), hull
else:
raise ValueError(f"unhandled scene type: {type}")
def to_path(self, transform: Transform):
"""Try to convert whole scene to a path (used only for testing)"""
def to_path(scene, transform):
type, args = scene
if type == RENDER_FILL:
path, _paint, _fill_rule = args
yield path.transform(transform)
elif type == RENDER_STROKE:
path, paint, width, linecap, linejoin = args
yield path.transform(transform).stroke(width, linecap, linejoin)
elif type == RENDER_GROUP:
for child in args:
yield from to_path(child, transform)
elif type == RENDER_OPACITY:
target, _opacity = args
yield from to_path(target, transform)
elif type == RENDER_CLIP:
target, _clip, _bbox_units = args
yield from to_path(target, transform)
elif type == RENDER_TRANSFORM:
target, target_transfrom = args
yield from to_path(target, transform @ target_transfrom)
elif type == RENDER_MASK:
target, _mask_scene, _bbox_units = args
yield from to_path(target, transform)
elif type == RENDER_FILTER:
target, _filter = args
yield from to_path(target, transform)
else:
raise ValueError(f"unhandled scene type: {type}")
subpaths = [spath for path in to_path(self, transform) for spath in path.subpaths]
return Path(subpaths)
def __repr__(self) -> str:
def repr_rec(scene, output, depth):
output.write(indent * depth)
type, args = scene
if type == RENDER_FILL:
path, paint, fill_rule = args
if isinstance(paint, np.ndarray):
paint = format_color(paint)
output.write(f"FILL fill_rule:{fill_rule} paint:{paint}\n")
output.write(textwrap.indent(repr(path), indent * (depth + 1)))
output.write("\n")
elif type == RENDER_STROKE:
path, paint, width, linecap, linejoin = args
if isinstance(paint, np.ndarray):
paint = format_color(paint)
output.write(f"STROKE ")
output.write(f"width:{width} ")
output.write(f"linecap:{linecap} ")
output.write(f"linejoin:{linejoin} ")
output.write(f"paint:{paint}\n")
output.write(textwrap.indent(repr(path), indent * (depth + 1)))
output.write("\n")
elif type == RENDER_GROUP:
output.write("GROUP\n")
for child in args:
repr_rec(child, output, depth + 1)
elif type == RENDER_OPACITY:
target, opacity = args
output.write(f"OPACITY {opacity}\n")
repr_rec(target, output, depth + 1)
elif type == RENDER_CLIP:
target, clip, bbox_units = args
output.write(f"CLIP bbox_units:{bbox_units}\n")
output.write(indent * (depth + 1))
output.write("CLIP_PATH\n")
repr_rec(clip, output, depth + 2)
output.write(indent * (depth + 1))
output.write("CLIP_TARGET\n")
repr_rec(target, output, depth + 2)
elif type == RENDER_MASK:
target, mask, bbox_units = args
output.write(f"MASK bbox_units:{bbox_units}\n")
output.write(indent * (depth + 1))
output.write("MAKS_PATH\n")
repr_rec(mask, output, depth + 2)
output.write(indent * (depth + 1))
output.write("MASK_TARGET\n")
repr_rec(target, output, depth + 2)
elif type == RENDER_TRANSFORM:
target, transform = args
output.write(f"TRANSFORM {transform}\n")
repr_rec(target, output, depth + 1)
elif type == RENDER_FILTER:
target, filter = args
output.write(f"FILTER {filter}\n")
repr_rec(target, output, depth + 1)
else:
raise ValueError(f"unhandled scene type: {type}")
return output
def format_color(cs):
return "#" + "".join(f"{c:0<2x}" for c in (cs * 255).astype(np.uint8))
indent = " "
return repr_rec(self, io.StringIO(), 0).getvalue()[:-1]
# ------------------------------------------------------------------------------
# Path
# ------------------------------------------------------------------------------
PATH_LINE = 0
PATH_QUAD = 1
PATH_CUBIC = 2
PATH_ARC = 3
PATH_CLOSED = 4
PATH_UNCLOSED = 5
PATH_LINES = {PATH_LINE, PATH_CLOSED, PATH_UNCLOSED}
PATH_FILL_NONZERO = "nonzero"
PATH_FILL_EVENODD = "evenodd"
STROKE_JOIN_MITER = "miter"
STROKE_JOIN_ROUND = "round"
STROKE_JOIN_BEVEL = "bevel"
STROKE_CAP_BUTT = "butt"
STROKE_CAP_ROUND = "round"
STROKE_CAP_SQUARE = "square"
class Path:
"""Single rendering unit that can be filled or converted to stroke path
`subpaths` is a list of tuples:
- `(PATH_LINE, (p0, p1))` - line from p0 to p1
- `(PATH_CUBIC, (p0, c0, c1, p1))` - cubic bezier curve from p0 to p1 with control c0, c1
- `(PATH_QUAD, (p0, c0, p1))` - quadratic bezier curve from p0 to p1 with control c0
- `(PATH_ARC, (center, rx, ry, phi, eta, eta_delta)` - arc with a center and to radii rx, ry
rotated to phi angle, going from inital eta to eta + eta_delta angle.
- `(PATH_CLOSED | PATH_UNCLOSED, (p0, p1))` - last segment of subpath `"closed"` if
path was closed and `"unclosed"` if path was not closed. p0 - end subpath
p1 - beggining of this subpath.
"""
__slots__ = ["subpaths"]
subpaths: List[List[Tuple[int, Tuple[Any, ...]]]]
def __init__(self, subpaths):
self.subpaths = subpaths
def __iter__(self):
"""Itearte over subpaths"""
return iter(self.subpaths)
def __bool__(self) -> bool:
return bool(self.subpaths)
def mask(
self,
transform: Transform,
fill_rule: Optional[str] = None,
viewport: Optional[BBox] = None,
):
"""Render path as a mask (alpha channel only image)"""
# convert all curves to cubic curves and lines
lines_defs, cubics_defs = [], []
for path in self.subpaths:
if not path:
continue
for cmd, args in path:
if cmd in PATH_LINES:
lines_defs.append(args)
elif cmd == PATH_CUBIC:
cubics_defs.append(args)
elif cmd == PATH_QUAD:
cubics_defs.append(bezier2_to_bezier3(args))
elif cmd == PATH_ARC:
cubics_defs.extend(arc_to_bezier3(*args))
else:
raise ValueError(f"unsupported path type: `{cmd}`")
#def __call__(self, points: FNDArray) -> FNDArray:
#if len(points) == 0:
#return points
#return points @ self.m[:2, :2].T + self.m[:2, 2]
# transform all curves into presentation coordinate system
lines = transform(np.array(lines_defs, dtype=FLOAT))
cubics = transform(np.array(cubics_defs, dtype=FLOAT))
# flattend (convet to lines) all curves
if cubics.size != 0:
# flatness of 0.1px gives good accuracy
if lines.size != 0:
lines = np.concatenate([lines, bezier3_flatten_batch(cubics, 0.1)])
else:
lines = bezier3_flatten_batch(cubics, 0.1)
if lines.size == 0:
return
# calculate size of the mask
min_x, min_y = np.floor(lines.reshape(-1, 2).min(axis=0)).astype(int) - 1
max_x, max_y = np.ceil(lines.reshape(-1, 2).max(axis=0)).astype(int) + 1
if viewport is not None:
vx, vy, vw, vh = viewport
min_x, min_y = max(vx, min_x), max(vy, min_y)
max_x, max_y = min(vx + vw, max_x), min(vy + vh, max_y)
width = max_x - min_x
height = max_y - min_y
if width <= 0 or height <= 0:
return
# create trace (signed coverage)
trace = np.zeros((width, height), dtype=FLOAT)
for points in lines - np.array([min_x, min_y]):
line_signed_coverage(trace, points)
# render mask
mask = np.cumsum(trace, axis=1)
if fill_rule is None or fill_rule == PATH_FILL_NONZERO:
mask = np.fabs(mask).clip(0, 1)
elif fill_rule == PATH_FILL_EVENODD:
mask = np.fabs(np.remainder(mask + 1.0, 2.0) - 1.0)
else:
raise ValueError(f"Invalid fill rule: {fill_rule}")
mask[mask < 1e-6] = 0 # reound down to zero very small mask values
output = Layer(mask[..., None], (min_x, min_y), pre_alpha=True, linear_rgb=True)
return output, ConvexHull(lines)
def fill(self, transform, paint, fill_rule=None, viewport=None, linear_rgb=True):
"""Render path by fill-ing it."""
if paint is None:
return None
# create a mask
mask = self.mask(transform, fill_rule, viewport)
if mask is None:
return None
mask, hull = mask
# create background with specified paint
if isinstance(paint, np.ndarray) and paint.shape == (4,):
if not linear_rgb:
paint = color_pre_to_straight_alpha(paint.copy())
paint = color_linear_to_srgb(paint)
paint = color_straight_to_pre_alpha(paint)
output = Layer(mask.image * paint, mask.offset, pre_alpha=True, linear_rgb=linear_rgb)
elif isinstance(paint, (GradLinear, GradRadial)):
if paint.bbox_units:
user_tr = hull.bbox_transform(transform).invert
else:
user_tr = transform.invert
# convert grad pixels to user coordinate system
pixels = user_tr(grad_pixels(mask.bbox))
if paint.linear_rgb is not None:
linear_rgb = paint.linear_rgb
image = paint.fill(pixels, linear_rgb=linear_rgb)
# NOTE: consider optimizing calculation of grad only for unmasked points
# masked = mask.image > EPSILON
# painted = paint.fill(
# pixels[np.broadcast_to(masked, pixels.shape)].reshape(-1, 2),
# linear_rgb=linear_rgb,
# )
# image = np.zeros((mask.width, mask.height, 4), dtype=FLOAT)
# image[np.broadcast_to(masked, image.shape)] = painted.reshape(-1)
background = Layer(image, mask.offset, pre_alpha=True, linear_rgb=linear_rgb)
# use `canvas_compose` directly to avoid needless allocation
background = background.convert(pre_alpha=True, linear_rgb=linear_rgb)
mask = mask.convert(pre_alpha=True, linear_rgb=linear_rgb)
image = canvas_compose(COMPOSE_IN, mask.image, background.image)
output = Layer(image, mask.offset, pre_alpha=True, linear_rgb=linear_rgb)
elif isinstance(paint, Pattern):
# render pattern
pat_tr = transform.no_translate()
if paint.scene_view_box:
if paint.bbox_units:
px, py, pw, ph = paint.bbox()
_hx, _hy, hw, hh = hull.bbox(transform)
bbox = (px * hw, py * hh, pw * hw, ph * hh)
else:
bbox = paint.bbox()
pat_tr @= svg_viewbox_transform(bbox, paint.scene_view_box)
elif paint.scene_bbox_units:
pat_tr = hull.bbox_transform(pat_tr)
pat_tr @= paint.transform
result = paint.scene.render(pat_tr, linear_rgb=linear_rgb)
if result is None:
return None
pat_layer, _pat_hull = result
# repeat pattern
repeat_tr = transform
if paint.bbox_units:
repeat_tr = hull.bbox_transform(repeat_tr)
repeat_tr @= paint.transform
repeat_tr = repeat_tr.no_translate()
offsets = repeat_tr.invert(grad_pixels(mask.bbox))
offsets = repeat_tr(
np.remainder(offsets - [paint.x, paint.y], [paint.width, paint.height])
)
offsets = offsets.astype(int)
corners = repeat_tr(
[
[0, 0],
[paint.width, 0],
[0, paint.height],
[paint.width, paint.height],
]
)
max_x, max_y = corners.max(axis=0).astype(int)
min_x, min_y = corners.min(axis=0).astype(int)
w, h = max_x - min_x, max_y - min_y
offsets -= [min_x, min_y]
pat = np.zeros((w + 1, h + 1, 4))
pat = canvas_merge_at(pat, pat_layer.image, (pat_layer.x - min_x, pat_layer.y - min_y))
image = canvas_compose(COMPOSE_IN, mask.image, pat[offsets[..., 0], offsets[..., 1]])
output = Layer(
image, mask.offset, pre_alpha=pat_layer.pre_alpha, linear_rgb=pat_layer.linear_rgb
)
else:
warnings.warn(f"fill method is not implemented: {paint}")
return None
return output, hull
def stroke(self, width, linecap=None, linejoin=None) -> "Path":
"""Convert path to stroked path"""
curve_names = {2: PATH_LINE, 3: PATH_QUAD, 4: PATH_CUBIC}
dist = width / 2
outputs = []
for path in self:
if not path:
continue
# offset curves
forward, backward = [], []
for cmd, args in path:
if cmd == PATH_LINE or cmd == PATH_CLOSED:
line = np.array(args)
line_forward = line_offset(line, dist)
if line_forward is None:
continue
forward.append(line_forward)
backward.append(line_offset(line, -dist))
elif cmd == PATH_CUBIC:
cubic = np.array(args)
forward.extend(bezier3_offset(cubic, dist))
backward.extend(bezier3_offset(cubic, -dist))
elif cmd == PATH_QUAD:
cubic = bezier2_to_bezier3(args)
forward.extend(bezier3_offset(cubic, dist))
backward.extend(bezier3_offset(cubic, -dist))
elif cmd == PATH_ARC:
for cubic in arc_to_bezier3(*args):
forward.extend(bezier3_offset(cubic, dist))
backward.extend(bezier3_offset(cubic, -dist))
elif cmd == PATH_UNCLOSED:
continue
else:
raise ValueError(f"unsupported path type: `{cmd}`")
closed = cmd == PATH_CLOSED
if not forward:
continue
# connect curves
curves = []
for curve in forward:
if not curves:
curves.append(curve)
continue
curves.extend(stroke_line_join(curves[-1], curve, linejoin))
curves.append(curve)
# complete subpath if path is closed or add line cap
if closed:
curves.extend(stroke_line_join(curves[-1], curves[0], linejoin))
outputs.append([(curve_names[len(curve)], np.array(curve)) for curve in curves])
curves = []
else:
curves.extend(stroke_line_cap(curves[-1][-1], backward[-1][-1], linecap))
# extend subpath with backward path
while backward:
curve = list(reversed(backward.pop()))
if not curves:
curves.append(curve)
continue
curves.extend(stroke_line_join(curves[-1], curve, linejoin))
curves.append(curve)
# complete subpath
if closed:
curves.extend(stroke_line_join(curves[-1], curves[0], linejoin))
else:
curves.extend(stroke_line_cap(curves[-1][-1], curves[0][0], linecap))
outputs.append([(curve_names[len(curve)], np.array(curve)) for curve in curves])
return Path(outputs)
def transform(self, transform: Transform) -> "Path":
"""Apply transformation to a path
This method is usually not used directly but rather transformation is
passed to mask/fill method.
"""
paths_out = []
for path_in in self.subpaths:
path_out = []
if not path_in:
continue
for cmd, args in path_in:
if cmd == PATH_ARC:
cubics = arc_to_bezier3(*args)
for cubic in transform(cubics):
path_out.append((PATH_CUBIC, cubic.tolist()))
else:
points = transform(np.array(args)).tolist()
path_out.append((cmd, points))
paths_out.append(path_out)
return Path(paths_out)
def to_svg(self) -> str:
"""Convert to SVG path"""
output = io.StringIO()
for path in self.subpaths:
if not path:
continue
cmd_prev = None
for cmd, args in path:
if cmd == PATH_LINE:
(x0, y0), (x1, y1) = args
if cmd_prev != cmd:
if cmd_prev is None:
output.write(f"M{x0:g},{y0:g} ")
else:
output.write("L")
output.write(f"{x1:g},{y1:g} ")
cmd_prev = PATH_LINE
elif cmd == PATH_QUAD:
(x0, y0), (x1, y1), (x2, y2) = args
if cmd_prev != cmd:
if cmd_prev is None:
output.write(f"M{x0:g},{y0:g} ")
output.write("Q")
output.write(f"{x1:g},{y1:g} {x2:g},{y2:g} ")
cmd_prev = PATH_QUAD
elif cmd in {PATH_CUBIC, PATH_ARC}:
if cmd == PATH_ARC:
cubics = arc_to_bezier3(*args)
else:
cubics = [args]
for args in cubics:
(x0, y0), (x1, y1), (x2, y2), (x3, y3) = args
if cmd_prev != cmd:
if cmd_prev is None:
output.write(f"M{x0:g},{y0:g} ")
output.write("C")
output.write(f"{x1:g},{y1:g} {x2:g},{y2:g} {x3:g},{y3:g} ")
cmd_prev = PATH_CUBIC
elif cmd == PATH_CLOSED:
output.write("Z ")
cmd_prev = None
elif cmd == PATH_UNCLOSED:
cmd_prev = None
else:
raise ValueError("unhandled path type: `{cmd}`")
output.write("\n")
return output.getvalue()[:-1]
@staticmethod
def from_svg(input: str) -> "Path":
"""Parse SVG path
For more info see [SVG spec](https://www.w3.org/TR/SVG11/paths.html)
"""
input_len = len(input)
input_offset = 0
WHITESPACE = set(" \t\r\n,")
COMMANDS = set("MmZzLlHhVvCcSsQqTtAa")
def position(is_relative, pos, dst):
return [pos[0] + dst[0], pos[1] + dst[1]] if is_relative else dst
def smooth(points):
px, py = points[-1]
cx, cy = points[-2]
return [px * 2 - cx, py * 2 - cy]
# parser state
paths = []
path = []
args = []
cmd = None
pos = [0.0, 0.0]
first = True # true if this is a frist command
start = [0.0, 0.0]
smooth_cubic = None
smooth_quad = None
while input_offset <= input_len:
char = input[input_offset] if input_offset < input_len else None
if char in WHITESPACE:
# remove whitespaces
input_offset += 1
elif char is None or char in COMMANDS:
# process current command
cmd_args, args = args, []
if cmd is None:
pass
elif cmd in "Mm":
# terminate current path
if path:
path.append((PATH_UNCLOSED, [pos, start]))
paths.append(path)
path = []
is_relative = cmd == "m"
(move, *lineto) = chunk(cmd_args, 2)
pos = position(is_relative and not first, pos, move)
start = pos
for dst in lineto:
dst = position(is_relative, pos, dst)
path.append((PATH_LINE, [pos, dst]))
pos = dst
# line to
elif cmd in "Ll":
for dst in chunk(cmd_args, 2):
dst = position(cmd == "l", pos, dst)
path.append((PATH_LINE, [pos, dst]))
pos = dst
# vertical line to
elif cmd in "Vv":
if not cmd_args:
raise ValueError(f"command '{cmd}' expects at least one argument")
is_relative = cmd == "v"
for dst in cmd_args:
dst = position(is_relative, pos, [0 if is_relative else pos[0], dst])
path.append((PATH_LINE, [pos, dst]))
pos = dst
# horizontal line to
elif cmd in "Hh":
if not cmd_args:
raise ValueError(f"command '{cmd}' expects at least one argument")
is_relative = cmd == "h"
for dst in cmd_args:
dst = position(is_relative, pos, [dst, 0 if is_relative else pos[1]])
path.append((PATH_LINE, [pos, dst]))
pos = dst
# cubic bezier curve
elif cmd in "Cc":
for points in chunk(cmd_args, 6):
points = [position(cmd == "c", pos, point) for point in chunk(points, 2)]
path.append((PATH_CUBIC, [pos, *points]))
pos = points[-1]
smooth_cubic = smooth(points)
# smooth cubic bezier curve
elif cmd in "Ss":
for points in chunk(cmd_args, 4):
points = [position(cmd == "s", pos, point) for point in chunk(points, 2)]
if smooth_cubic is None:
smooth_cubic = pos
path.append((PATH_CUBIC, [pos, smooth_cubic, *points]))
pos = points[-1]
smooth_cubic = smooth(points)
# quadratic bezier curve
elif cmd in "Qq":
for points in chunk(cmd_args, 4):
points = [position(cmd == "q", pos, point) for point in chunk(points, 2)]
path.append((PATH_QUAD, [pos, *points]))
pos = points[-1]
smooth_quad = smooth(points)
# smooth quadratic bezier curve
elif cmd in "Tt":
for points in chunk(cmd_args, 2):
points = position(cmd == "t", pos, points)
if smooth_quad is None:
smooth_quad = pos
points = [pos, smooth_quad, points]
path.append((PATH_QUAD, points))
pos = points[-1]
smooth_quad = smooth(points)
# elliptical arc
elif cmd in "Aa":
# NOTE: `large_f`, and `sweep_f` are not float but flags which can only be
# 0 or 1 and as the result some svg minimizers merge them with next
# float which may break current parser logic.
for points in chunk(cmd_args, 7):
rx, ry, x_axis_rot, large_f, sweep_f, dst_x, dst_y = points
dst = position(cmd == "a", pos, [dst_x, dst_y])
src, pos = pos, dst
if rx == 0 or ry == 0:
path.append((PATH_LINE, [pos, dst]))
else:
path.append(
(
PATH_ARC,
arc_svg_to_parametric(
src,
dst,
rx,
ry,
x_axis_rot,
large_f > 0.001,
sweep_f > 0.001,
),
)
)
# close current path
elif cmd in "Zz":
if cmd_args:
raise ValueError(f"`z` command does not accept any argmuents: {cmd_args}")
path.append((PATH_CLOSED, [pos, start]))
if path:
paths.append(path)
path = []
pos = start
else:
raise ValueError(f"unsuppported command '{cmd}' at: {input_offset}")
if cmd is not None and cmd not in "CcSs":
smooth_cubic = None
if cmd is not None and cmd not in "QqTt":
smooth_quad = None
first = False
input_offset += 1
cmd = char
else:
# parse float arguments
match = FLOAT_RE.match(input, input_offset)
if match:
match_str = match.group(0)
args.append(float(match_str))
input_offset += len(match_str)
else:
raise ValueError(f"not recognized command '{char}' at: {input_offset}")
if path:
path.append((PATH_UNCLOSED, [pos, start]))
paths.append(path)
return Path(paths)
def is_empty(self):
return not bool(self.subpaths)
def __repr__(self):
if not self.subpaths:
return "EMPTY"
output = io.StringIO()
for subpath in self.subpaths:
for type, coords in subpath:
if type == PATH_LINE:
output.write(f"LINE {repr_coords(coords)}\n")
elif type == PATH_CUBIC:
output.write(f"CUBIC {repr_coords(coords)}\n")
elif type == PATH_QUAD:
output.write(f"QUAD {repr_coords(coords)}\n")
elif type == PATH_ARC:
center, rx, ry, phi, eta, eta_delta = coords
output.write(f"ARC ")
output.write(f"{repr_coords([center])} ")
output.write(f"{rx:.4g} {ry:.4g} ")
output.write(f"{phi:.3g} {eta:.3g} {eta_delta:.3g}\n")
elif type == PATH_CLOSED:
output.write("CLOSE\n")
return output.getvalue()[:-1]
def repr_coords(coords):
return " ".join(f"{x:.4g},{y:.4g}" for x, y in coords)
# offset along tanget to approximate circle with four bezier3 curves
CIRCLE_BEIZER_OFFSET = 4 * (math.sqrt(2) - 1) / 3
def stroke_line_cap(p0, p1, linecap=None):
"""Generate path connecting two curves p0 and p1 with a cap"""
if linecap is None:
linecap = STROKE_CAP_BUTT
if np.allclose(p0, p1):
return []
if linecap == STROKE_CAP_BUTT:
return [ | np.array([p0, p1]) | numpy.array |
import os
import sys
import colorsys
sys.path.insert(0, './')
import glob
import string
import numpy as np
import pyvista as pv
import tensorflow as tf
from utils import helpers, tf_utils
def rotate_boxes(boxes, centers, theta):
pts_out = np.zeros((boxes.shape[0], 8, 3), np.float32)
for i, (b, c, r) in enumerate(zip(boxes, centers, theta)):
pts_out[i] = helpers.rotate_box(b, c, r)
return pts_out
def plot(pts, colors, labels):
labels_mask = labels.astype(np.bool)[:, 0]
labels = labels[labels_mask]
centers = labels[:, :3]
ext = labels[:, 3:6]
theta = labels[:, 6:8]
boxes_min = centers - (ext / 2)
boxes_max = centers + (ext / 2)
boxes = np.hstack((boxes_min, boxes_max))
obj_pts = rotate_boxes(boxes, centers, theta)
plot = pv.Plotter()
plot.view_xy()
# Remove ceiling
colors = colors[pts[:, 2] < np.max(pts[:, 2])-1.]
pts = pts[pts[:, 2] < np.max(pts[:, 2])-1.]
plot.add_points(pts, scalars=colors, rgb=True, render_points_as_spheres=True, point_size=15)
plot.add_points(labels[:, :3], color=[0, 0, 1], render_points_as_spheres=True, point_size=20)
classes = np.linspace(0, 1, obj_pts.shape[0]+1)
rgb_classes = np.array([colorsys.hsv_to_rgb(c, 0.8, 0.8) for c in classes])
for i, pts in enumerate(obj_pts):
lines = helpers.make_lines(pts)
for l in lines:
plot.add_mesh(l, color=rgb_classes[i], line_width=6)
plot.show()
def create_example(pts, colors, labels):
n_inst = labels.shape[0] if len(labels.shape) > 0 else 0
feature = {
'points' : tf_utils.float_list_feature(pts.reshape(-1, 1)),
'colors' : tf_utils.float_list_feature(colors.reshape(-1, 1)),
'labels' : tf_utils.float_list_feature(labels.reshape(-1, 1)),
'n_inst' : tf_utils.int64_feature(n_inst)
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def crop_s3dis():
filelist = glob.glob(os.path.join(config['in_dir'], '*.npy'))
box_size = config['box_size']
overlap = config['overlap']
saved = 0
with tf.io.TFRecordWriter(config['out_train_file']) as train_writer, tf.io.TFRecordWriter(config['out_test_file']) as test_writer:
bar = helpers.progbar(len(filelist))
bar.start()
max_labels = 0
rotations = np.radians(np.array([0, 90, 180, 270])) if config['rotate'] == True else np.array([0.])
for i, f in enumerate(filelist):
bar.update(i+1)
scene = np.load(f)
area = '_'.join(f.split('/')[-1].split('_')[:2])
room = '_'.join(f.split('/')[-1].split('.')[0].split('_')[2:])
area_n = int(f.split('/')[-1].split('_')[1])
object_paths = glob.glob(os.path.join(config['root_dir'], area, room, 'Annotations', '*{}*.npy'.format(config['label_object'])))
objects = np.array([np.load(o_f)[:, :3] for o_f in object_paths])
object_means_orig = np.array([np.mean(o, axis=0) for o in objects])
if object_means_orig.shape[0] == 0: continue
object_thetas_orig, object_extents = helpers.get_oabb(objects)
area = int(f.split('/')[-1].split('_')[1])
scene_extent = [
np.min(scene[:, 0]), np.min(scene[:, 1]), np.min(scene[:, 2]),
np.max(scene[:, 0]), np.max(scene[:, 1]), np.max(scene[:, 2])
]
x_stride_len = box_size[0]
y_stride_len = box_size[1]
num_xstrides = int(np.ceil((scene_extent[3] - scene_extent[0])/box_size[0]))
num_ystrides = int(np.ceil((scene_extent[4] - scene_extent[1])/box_size[1]))
for x_stride in range(num_xstrides):
for y_stride in range(num_ystrides):
bbox = [
scene_extent[0] + (x_stride*x_stride_len) - overlap[0]/2,
scene_extent[1] + (y_stride*y_stride_len) - overlap[0]/2,
-1e10,
scene_extent[0] + ((x_stride*x_stride_len) + x_stride_len) + overlap[0]/2,
scene_extent[1] + ((y_stride*y_stride_len) + y_stride_len) + overlap[0]/2,
1e10
]
scene_crop_orig = helpers.crop_bbox(scene, bbox)
if scene_crop_orig.shape[0] < config['n_pts'] / 2: continue
for angle in rotations:
_, scene_crop = helpers.get_fixed_pts(scene_crop_orig, config['n_pts'])
object_means = object_means_orig.copy()
object_thetas = object_thetas_orig.copy()
scene_crop[:, :3] = helpers.rotate_euler(scene_crop[:, :3], angle)
object_means = helpers.rotate_euler(object_means_orig, angle)
radians = | np.arctan2(object_thetas[:, 1], object_thetas[:, 0]) | numpy.arctan2 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# new feature selection for MNIST dataset
# labels (index) as before (no change), see notebook 'data_mnist'
# version data_mnist_comp: max features (150 x 3 = 450)
# the version was extended and used to create data with max features (200 x 3 = 600)
# In[ ]:
import gzip
import numpy as np
import matplotlib.pyplot as plt
import copy
from scipy import ndimage, misc
threshold = 180
num_angles = 230
# In[2]:
# produce a raster (random)
# random seed: inserted only later
np.random.seed(30)
raster = np.zeros((num_angles, 5))
raster[:, 0] = np.random.randint(0, 360, num_angles)
raster[:, 1] = np.random.randint(0, 27, num_angles) # choose a row
raster[:, 2] = np.random.randint(0, 27, num_angles)
raster[:, 3] = np.random.randint(0, 27, num_angles)
raster[:, 4] = np.random.randint(0, 18, num_angles) # initial position (column) for cutting out samples of length 10, between 0 and 18
# In[5]:
# READ AND GET FEATURES TRAINING DATA
f = gzip.open('train-images-idx3-ubyte.gz','r')
num_images = 60000 #number of images to read out
image_size = 28 #image size
f.read(16) #related to position of image
buf = f.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = data.reshape(num_images, image_size, image_size, 1)
res = np.zeros((num_images, num_angles * 3, 10))
res_2 = | np.zeros((num_images, num_angles * 3)) | numpy.zeros |
# -*- coding: utf-8 -*-
'''
Commonly used metrics for evaluating saliency map performance.
Most metrics are ported from Matlab implementation provided by http://saliency.mit.edu/
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (n.d.). MIT Saliency Benchmark.
Python implementation: Chencan Qian, Sep 2014
'''
from functools import partial
import numpy as np
from numpy import random
from skimage import exposure, img_as_float
from skimage.transform import resize
try:
from cv2 import cv
except ImportError:
print('please install Python binding of OpenCV to compute EMD')
EPS = 2.2204e-16
def normalize(x, method='standard', axis=None):
x = np.array(x, copy=True)
if axis is not None:
y = np.rollaxis(x, axis).reshape([x.shape[axis], -1])
shape = np.ones(len(x.shape))
shape[axis] = x.shape[axis]
if method == 'standard':
res = (x - np.mean(y, axis=1).reshape(shape)) / np.std(y, axis=1).reshape(shape)
elif method == 'range':
res = (x - np.min(y, axis=1).reshape(shape)) / (np.max(y, axis=1) - np.min(y, axis=1)).reshape(shape)
elif method == 'sum':
res = x / np.float_(np.sum(y, axis=1).reshape(shape))
else:
raise ValueError('method not in {"standard", "range", "sum"}')
else:
if method == 'standard':
res = (x - np.mean(x)) / np.std(x)
elif method == 'range':
res = (x - np.min(x)) / (np.max(x) - np.min(x))
elif method == 'sum':
res = x / float(np.sum(x))
else:
raise ValueError('method not in {"standard", "range", "sum"}')
return res
def match_hist(image, cdf, bin_centers, nbins=256):
'''Modify pixels of input image so that its histogram matches target image histogram, specified by:
cdf, bin_centers = cumulative_distribution(target_image)
Parameters
----------
image : array
Image to be transformed.
cdf : 1D array
Values of cumulative distribution function of the target histogram.
bin_centers ; 1D array
Centers of bins of the target histogram.
nbins : int, optional
Number of bins for image histogram.
Returns
-------
out : float array
Image array after histogram matching.
References
----------
[1] Matlab implementation histoMatch(MTX, N, X) by Simoncelli, 7/96.
'''
image = img_as_float(image)
old_cdf, old_bin = exposure.cumulative_distribution(image, nbins) # Unlike [1], we didn't add small positive number to the histogram
new_bin = np.interp(old_cdf, cdf, bin_centers)
out = np.interp(image.ravel(), old_bin, new_bin)
return out.reshape(image.shape)
def AUC_Judd(saliency_map, fixation_map, jitter=True):
s_map = np.array(saliency_map, copy=True)
f_map = np.array(fixation_map, copy=True) > 0.5
# If there are no fixation to predict, return NaN
if not np.any(f_map):
print('no fixation to predict')
return np.nan
# Make the saliency_map the size of the fixation_map
if s_map.shape != f_map.shape:
s_map = resize(s_map, f_map.shape, order=3, mode='nearest')
# Jitter the saliency map slightly to disrupt ties of the same saliency value
if jitter:
s_map += random.rand(*s_map.shape) * 1e-7
# Normalize saliency map to have values between [0,1]
s_map = normalize(s_map, method='range')
S = s_map.ravel()
F = f_map.ravel()
S_fix = S[F] # Saliency map values at fixation locations
n_fix = len(S_fix)
n_pixels = len(S)
# Calculate AUC
thresholds = sorted(S_fix, reverse=True)
tp = np.zeros(len(thresholds)+2)
fp = np.zeros(len(thresholds)+2)
tp[0] = 0; tp[-1] = 1
fp[0] = 0; fp[-1] = 1
for k, thresh in enumerate(thresholds):
above_th = np.sum(S >= thresh) # Total number of saliency map values above threshold
tp[k+1] = (k + 1) / float(n_fix) # Ratio saliency map values at fixation locations above threshold
fp[k+1] = (above_th - k - 1) / float(n_pixels - n_fix) # Ratio other saliency map values above threshold
return np.trapz(tp, fp) # y, x
def AUC_Borji(saliency_map, fixation_map, n_rep=100, step_size=0.1, rand_sampler=None):
s_map = np.array(saliency_map, copy=True)
f_map = np.array(fixation_map, copy=True) > 0.5
# If there are no fixation to predict, return NaN
if not np.any(f_map):
print('no fixation to predict')
return np.nan
# Make the saliency_map the size of the fixation_map
if s_map.shape != f_map.shape:
s_map = resize(s_map, f_map.shape, order=3, mode='nearest')
# Normalize saliency map to have values between [0,1]
s_map = normalize(s_map, method='range')
S = s_map.ravel()
F = f_map.ravel()
S_fix = S[F] # Saliency map values at fixation locations
n_fix = len(S_fix)
n_pixels = len(S)
# For each fixation, sample n_rep values from anywhere on the saliency map
if rand_sampler is None:
r = random.randint(0, n_pixels, [n_fix, n_rep])
S_rand = S[r] # Saliency map values at random locations (including fixated locations!? underestimated)
else:
S_rand = rand_sampler(S, F, n_rep, n_fix)
# Calculate AUC per random split (set of random locations)
auc = np.zeros(n_rep) * np.nan
for rep in range(n_rep):
thresholds = np.r_[0:np.max(np.r_[S_fix, S_rand[:,rep]]):step_size][::-1]
tp = np.zeros(len(thresholds)+2)
fp = np.zeros(len(thresholds)+2)
tp[0] = 0; tp[-1] = 1
fp[0] = 0; fp[-1] = 1
for k, thresh in enumerate(thresholds):
tp[k+1] = np.sum(S_fix >= thresh) / float(n_fix)
fp[k+1] = np.sum(S_rand[:,rep] >= thresh) / float(n_fix)
auc[rep] = np.trapz(tp, fp)
return np.mean(auc) # Average across random splits
# def AUC_shuffled(saliency_map, fixation_map, other_map, n_rep=100, step_size=0.1):
#
# o_map = np.array(other_map, copy=True) > 0.5
# if other_map.shape != fixation_map.shape:
# raise ValueError('other_map.shape != fixation_map.shape')
# # For each fixation, sample n_rep values (from fixated locations on other_map) on the saliency map
# def sample_other(other, S, F, n_rep, n_fix):
# fixated = np.nonzero(other)[0]
# indexer = map(lambda x: random.permutation(x)[:n_fix], np.tile(range(len(fixated)), [n_rep, 1]))
# r = fixated[np.transpose(indexer)]
# S_rand = S[r] # Saliency map values at random locations (including fixated locations!? underestimated)
# return S_rand
# return AUC_Borji(saliency_map, fixation_map, n_rep, step_size, partial(sample_other, o_map.ravel()))
def AUC_shuffled(saliency_map, fixation_map, other_map, n_rep=100, step_size=0.1):
s_map = np.array(saliency_map, copy=True)
f_map = np.array(fixation_map, copy=True) > 0.5
o_map = np.array(other_map, copy=True) > 0.5
if other_map.shape != fixation_map.shape:
raise ValueError('other_map.shape != fixation_map.shape')
if not np.any(f_map):
print('no fixation to predict')
return np.nan
if s_map.shape != f_map.shape:
s_map = resize(s_map, f_map.shape, order=3, mode='nearest')
s_map = normalize(s_map, method='range')
S = s_map.ravel()
F = f_map.ravel()
Oth = o_map.ravel()
S_fix = S[F] # Saliency map values at fixation locations
n_fix = len(S_fix)
ind = np.nonzero(Oth)[0]
n_ind = len(ind)
n_fix_oth = min(n_fix,n_ind)
r = random.randint(0, n_ind, [n_ind, n_rep])[:n_fix_oth,:]
S_rand = S[ind[r]]
auc = np.zeros(n_rep) * np.nan
for rep in range(n_rep):
thresholds = np.r_[0:np.max(np.r_[S_fix, S_rand[:,rep]]):step_size][::-1]
tp = np.zeros(len(thresholds)+2)
fp = np.zeros(len(thresholds)+2)
tp[0] = 0; tp[-1] = 1
fp[0] = 0; fp[-1] = 1
for k, thresh in enumerate(thresholds):
tp[k+1] = np.sum(S_fix >= thresh) / float(n_fix)
fp[k+1] = np.sum(S_rand[:,rep] >= thresh) / float(n_fix_oth)
auc[rep] = np.trapz(tp, fp)
return np.mean(auc)
def NSS(saliency_map, fixation_map):
s_map = np.array(saliency_map, copy=True)
f_map = np.array(fixation_map, copy=True) > 0.5
if s_map.shape != f_map.shape:
s_map = resize(s_map, f_map.shape)
# Normalize saliency map to have zero mean and unit std
s_map = normalize(s_map, method='standard')
# Mean saliency value at fixation locations
return np.mean(s_map[f_map])
def KLD(saliency_map1, saliency_map2):
map1 = np.array(saliency_map1, copy=True)
map2 = np.array(saliency_map2, copy=True)
if map1.shape != map2.shape:
map1 = resize(map1, map2.shape, order=3, mode='nearest') # bi-cubic/nearest is what Matlab imresize() does by default
# Normalize the two maps to have zero mean and unit std
map1 = normalize(map1, method='sum')
map2 = normalize(map2, method='sum')
return np.sum(map2 * np.log(EPS + map2 / (map1+EPS)))
def CC(saliency_map1, saliency_map2):
map1 = np.array(saliency_map1, copy=True)
map2 = np.array(saliency_map2, copy=True)
if map1.shape != map2.shape:
map1 = resize(map1, map2.shape, order=3, mode='nearest') # bi-cubic/nearest is what Matlab imresize() does by default
# Normalize the two maps to have zero mean and unit std
map1 = normalize(map1, method='standard')
map2 = normalize(map2, method='standard')
# Compute correlation coefficient
return np.corrcoef(map1.ravel(), map2.ravel())[0,1]
def SIM(saliency_map1, saliency_map2):
map1 = np.array(saliency_map1, copy=True)
map2 = np.array(saliency_map2, copy=True)
if map1.shape != map2.shape:
map1 = resize(map1, map2.shape, order=3, mode='nearest') # bi-cubic/nearest is what Matlab imresize() does by default
# Normalize the two maps to have values between [0,1] and sum up to 1
map1 = normalize(map1, method='range')
map2 = normalize(map2, method='range')
map1 = normalize(map1, method='sum')
map2 = normalize(map2, method='sum')
# Compute histogram intersection
intersection = np.minimum(map1, map2)
return np.sum(intersection)
def EMD(saliency_map1, saliency_map2, sub_sample=1/32.0):
map2 = np.array(saliency_map2, copy=True)
# Reduce image size for efficiency of calculation
map2 = resize(map2, np.round(np.array(map2.shape)*sub_sample), order=3, mode='nearest')
map1 = resize(saliency_map1, map2.shape, order=3, mode='nearest')
# Histogram match the images so they have the same mass
map1 = match_hist(map1, *exposure.cumulative_distribution(map2))
# Normalize the two maps to sum up to 1,
# so that the score is independent of the starting amount of mass / spread of fixations of the fixation map
map1 = normalize(map1, method='sum')
map2 = normalize(map2, method='sum')
# Compute EMD with OpenCV
# - http://docs.opencv.org/modules/imgproc/doc/histograms.html#emd
# - http://stackoverflow.com/questions/5101004/python-code-for-earth-movers-distance
# - http://stackoverflow.com/questions/12535715/set-type-for-fromarray-in-opencv-for-python
r, c = map2.shape
x, y = np.meshgrid(range(c), range(r))
signature1 = cv.CreateMat(r*c, 3, cv.CV_32FC1)
signature2 = cv.CreateMat(r*c, 3, cv.CV_32FC1)
cv.Convert(cv.fromarray(np.c_[map1.ravel(), x.ravel(), y.ravel()]), signature1)
cv.Convert(cv.fromarray(np.c_[map2.ravel(), x.ravel(), y.ravel()]), signature2)
return cv.CalcEMD2(signature2, signature1, cv.CV_DIST_L2)
def InfoGain(saliencyMap, fixationMap, baselineMap):
map1 = np.array(saliencyMap, copy=True)
mapb = np.array(baselineMap, copy=True)
map1 = resize(map1, fixationMap.shape, order=3, mode='nearest')
mapb = resize(mapb, fixationMap.shape, order=3, mode='nearest')
map1 = normalize(map1, method='range')
mapb = normalize(mapb, method='range')
map1 = normalize(map1, method='sum')
mapb = normalize(mapb, method='sum')
locs = | np.array(fixationMap, copy=True) | numpy.array |
import os
import sys
from itertools import cycle
import h5py
import numpy as np
from keras.models import Model, load_model
from keras.layers import Convolution2D, Deconvolution2D, Input, Reshape, Flatten, Activation, merge
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
# Total width and height of the wrapped area used
# as input to the convolutional network.
WIDTH = 50
HEIGHT = 50
# How many frames to take into account in each batch.
BATCH_SIZE = 256
# Fraction of data sample used for validation.
VALIDATION_SPLIT = 0.3
# How many previous frames to use as input.
LOOKBACK = 0
# For reproducibility.
np.random.seed(0)
def gated_unit(x):
'''A single layer of the convolutional network
using a gated activation unit.'''
c = Convolution2D(8, 3, 3, border_mode='same')(x)
s = Activation('sigmoid')(Convolution2D(8, 1, 1)(c))
t = Activation('tanh')(Convolution2D(8, 1, 1)(c))
m = merge([s, t], mode='mul')
residual = Convolution2D(8, 1, 1, activation='relu')(m)
skip = Convolution2D(8, 1, 1, activation='relu')(m)
return residual, skip
def create_model():
'''Returns the complete Keras model.'''
input_batch = Input(shape=(WIDTH, HEIGHT, 4 + 3 * LOOKBACK))
x = Convolution2D(8, 1, 1, activation='relu')(input_batch)
skipped = []
for i in range(8):
x, skip = gated_unit(x)
skipped.append(skip)
out1 = merge(skipped, mode='sum')
out2 = Convolution2D(8, 1, 1)(out1)
out3 = Convolution2D(5, 1, 1)(out2)
output = Reshape((WIDTH, HEIGHT, 5))(Activation('softmax')(Reshape((WIDTH * HEIGHT, 5))(out3)))
model = Model(input=input_batch, output=output)
model.compile('nadam', 'categorical_crossentropy', metrics=['accuracy'])
return model
def prepare_data(group):
'''Preprocess replay data so that it can be used
as input and target of the network.'''
# Copy data from file and transform
player = group['player'][:]
strength = group['strength'][:] / 255
production = group['production'][:] / 20
moves = group['moves'][:]
n_frames = len(player)
# Find the winner (the player with most territory at the end)
players, counts = np.unique(player[-1], return_counts=True)
winner_id = players[counts.argmax()]
if winner_id == 0:
return None
# Broadcast production array to each time frame
production = np.repeat(production[np.newaxis], n_frames, axis=0)
production = production[:,:,:,np.newaxis]
is_winner = player == winner_id
is_loser = (player != winner_id) & (player != 0)
batch = np.array([is_winner, is_loser, strength])
batch = np.transpose(batch, (1, 2, 3, 0))
lookback = []
for i in range(1, LOOKBACK + 1):
back = np.pad(batch[:-i], ((i, 0), (0, 0), (0, 0), (0, 0)), mode='edge')
lookback.append(back)
batch = np.concatenate([batch] + lookback + [production], axis=3)
# One-hot encode the moves
moves = np.eye(5)[np.array(moves)]
nb, nx, ny, nc = np.shape(batch)
if nx > WIDTH or ny > HEIGHT:
# We don't want to work with maps larger than this
return None
pad_x = int((WIDTH - nx) / 2)
extra_x = int(WIDTH - nx - 2 * pad_x)
pad_y = int((HEIGHT - ny) / 2)
extra_y = int(HEIGHT - ny - 2 * pad_y)
batch = np.pad(batch, ((0, 0), (pad_x, pad_x + extra_x), (pad_y, pad_y + extra_y), (0, 0)), 'wrap')
moves = np.pad(moves, ((0, 0), (pad_x, pad_x + extra_x), (pad_y, pad_y + extra_y), (0, 0)), 'wrap')
# Only moves for the winning player have to be predicted.
# If all entries are zero, this pixel won't contribute to
# the loss.
moves[batch[:,:,:,0] == 0] = 0
return batch, moves
def load_data(games):
'''Generator that loads batches of BATCH_SIZE
frames from the specified games.'''
xs = []
ys = []
size = 0
for g in cycle(games):
out = prepare_data(f[g])
if out is None:
continue
X, y = out
size += len(X)
xs.append(X)
ys.append(y)
if size >= BATCH_SIZE:
x_ = np.concatenate(xs, axis=0)
y_ = | np.concatenate(ys, axis=0) | numpy.concatenate |
# Ciholas, Inc. - www.ciholas.com
# Licensed under: creativecommons.org/licenses/by/4.0
# System libraries
import numpy as np
from collections import deque
from math import sqrt
class RollingStandardDeviation:
def __init__(self):
self.K = 0
self.n = 0
self.ex = 0
self.ex2 = 0
def add_variable(self, x):
if | np.isnan(x) | numpy.isnan |
import pytest
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder
from time_series_experiments.pipeline.tasks import (
Wrap,
TaskData,
OrdCat,
OneHot,
DateFeatures,
TargetLag,
)
from time_series_experiments.pipeline.data import take_columns, ColumnType
from time_series_experiments.pipeline.dataset import VarType
def test_imputer_wrapper():
x = np.random.random((1000, 1))
nans = np.random.choice(x.shape[0], size=100)
x[nans] = np.nan
data = TaskData(X=x, column_names=["x"], column_types=[0])
task = Wrap(SimpleImputer(strategy="constant", fill_value=-1))
res = task.fit_transform(data)
assert np.unique(res.X[nans]).shape[0] == 1
assert np.unique(res.X[nans])[0] == -1
task = Wrap(SimpleImputer(strategy="mean"))
res = task.fit_transform(data)
assert np.unique(res.X[nans]).shape[0] == 1
assert np.isclose(np.unique(res.X[nans])[0], np.mean(x[~np.isnan(x)]))
task = Wrap(SimpleImputer(strategy="median", add_indicator=True))
res = task.fit_transform(data)
assert res.X.shape[1] == 2
assert np.all(np.isclose(np.unique(res.X[:, 1][nans]), np.array([1])))
assert np.isclose(np.unique(res.X[:, 0][nans])[0], np.median(x[~np.isnan(x)]))
def test_imputer_wrapper_multiple_cols():
xs = []
for i in range(3):
x = np.random.random((1000, 1))
nans = np.random.choice(x.shape[0], size=100)
x[nans] = np.nan
xs.append(x)
x = np.concatenate(xs, axis=1)
data = TaskData(X=x, column_names=["x1", "x2", "x3"], column_types=[0])
task = Wrap(SimpleImputer(strategy="median", add_indicator=True))
res = task.fit_transform(data)
assert res.X.shape[1] == 6
assert res.column_names == ["SimpleImputer-{}".format(i) for i in range(6)]
@pytest.mark.parametrize("use_other", [True, False])
def test_ordcat_task(use_other):
x1 = np.random.choice(["a", "b", "c"], size=1000)
x2 = np.random.choice(["1", "2", "3", "4", "5", "6"], size=1000)
x = np.hstack([np.reshape(x1, (-1, 1)), np.reshape(x2, (-1, 1))])
data = TaskData(
X=x,
column_names=["x1", "x2"],
column_types=[ColumnType(VarType.NUM), ColumnType(VarType.NUM)],
)
task = OrdCat(min_support=0, use_other=use_other, handle_unknown="error")
res = task.fit_transform(data)
assert res.column_names == ["x1", "x2"]
assert res.column_types == [
ColumnType(VarType.CAT, level=5 if use_other else 4),
ColumnType(VarType.CAT, level=8 if use_other else 7),
]
expected = OrdinalEncoder().fit_transform(data.X)
if use_other:
expected = expected + 2
else:
expected = expected + 1
assert np.all(np.isclose(res.X, expected))
def test_ordcat_task_handle_unknown():
x1 = np.random.choice(["a", "b", "c"], size=1000)
x2 = np.random.choice(["1", "2", "3", "4", "5", "6"], size=1000)
x = np.hstack([np.reshape(x1, (-1, 1)), np.reshape(x2, (-1, 1))])
data = TaskData(
X=x,
column_names=["x1", "x2"],
column_types=[ColumnType(VarType.NUM), ColumnType(VarType.NUM)],
)
task = OrdCat(min_support=0, use_other=False, handle_unknown="missing")
res = task.fit_transform(data)
assert res.column_names == ["x1", "x2"]
assert res.column_types == [
ColumnType(VarType.CAT, level=4),
ColumnType(VarType.CAT, level=7),
]
expected = OrdinalEncoder().fit_transform(data.X)
expected = expected + 1
assert np.all(np.isclose(res.X, expected))
# transform with new categories
x1 = np.random.choice(["a", "c", "d"], size=1000)
x2 = np.random.choice(["2", "3", "5", "6", "7"], size=1000)
x = np.hstack([np.reshape(x1, (-1, 1)), np.reshape(x2, (-1, 1))])
new_data = TaskData(
X=x,
column_names=["x1", "x2"],
column_types=[ColumnType(VarType.NUM), ColumnType(VarType.NUM)],
)
res = task.transform(new_data)
mask = x1 == "d"
results = res.X[:, 0][mask]
assert | np.unique(results) | numpy.unique |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]),
'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]),
'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]),
'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]),
'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]),
'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]),
'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]),
'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]),
'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]),
'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]),
'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]),
'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]),
'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&75': np.array([0.0, 0.4756207622944677]),
'versicolor&1&76': np.array([0.0, 0.4854334805210761]),
'versicolor&1&77': np.array([0.0, 0.16885577975809635]),
'versicolor&1&78': np.array([0.0, 0.395805885538554]),
'versicolor&1&79': np.array([0.0, 0.2538072707138344]),
'versicolor&1&80': np.array([0.0, 0.4854334805210761]),
'versicolor&1&81': np.array([0.0, 0.7613919530844643]),
'versicolor&1&82': np.array([0.0, 0.6668230985485095]),
'versicolor&1&83': np.array([0.0, 0.4904755652105692]),
'versicolor&1&84': np.array([0.0, 0.8121046082359693]),
'versicolor&1&85': np.array([0.0, 0.6855766903749089]),
'versicolor&1&86': np.array([0.0, 0.5008471974438506]),
'versicolor&1&87': np.array([0.0, 0.16885577975809635]),
'versicolor&1&88': np.array([0.0, 0.16885577975809635]),
'versicolor&1&89': np.array([0.0, 0.395805885538554]),
'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&157': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&159': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&160': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&162': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&163': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&164': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&167': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&168': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&171': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&172': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&174': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&175': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&177': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&178': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&179': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&180': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&181': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&182': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&183': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&184': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&185': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&186': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&187': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&188': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&189': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&190': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&191': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&192': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&193': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&194': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&197': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&198': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&201': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&202': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&204': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&205': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&207': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&208': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&209': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&212': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&213': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&216': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&217': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&219': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&220': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&222': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&223': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&224': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&225': np.array([0.6253337666017573, 0.21983620140147825]),
'versicolor&1&226': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&227': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&228': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&229': np.array([0.7182033715159247, 0.0970420677941148]),
'versicolor&1&230': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&231': np.array([0.4976586558055923, 0.5393318265947251]),
'versicolor&1&232': np.array([0.4361093214026388, 0.4279491486345008]),
'versicolor&1&233': np.array([0.613985959011319, 0.23148898930908424]),
'versicolor&1&234': np.array([0.46747697713468217, 0.586607956360002]),
'versicolor&1&235': np.array([0.41044950174869577, 0.45415985894965977]),
'versicolor&1&236': np.array([0.6057447478066579, 0.23993389556303918]),
'versicolor&1&237': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&238': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&239': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'versicolor&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&242': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&243': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'versicolor&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&246': np.array([0.24022705822940116, 0.7185371033867092]),
'versicolor&1&247': np.array([0.010447231513465048, 0.6616528865917504]),
'versicolor&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'versicolor&1&249': np.array([0.21321406009810842, 0.7648907754638917]),
'versicolor&1&250': np.array([-0.027450681014480036, 0.6999336015080245]),
'versicolor&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'versicolor&1&252': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&253': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&254': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&257': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&258': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&261': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&262': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&264': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&265': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&267': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&268': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&269': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&270': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&271': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&275': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&276': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&277': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&278': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&279': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&280': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&281': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&285': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&286': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&290': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&291': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&292': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&293': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&294': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&295': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&296': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&306': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&307': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&309': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&310': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&2': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&3': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&6': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&7': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&9': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&10': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&12': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&13': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&14': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&17': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&18': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&21': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&22': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&24': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&25': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&27': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&28': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&29': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&32': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&33': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&36': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&37': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&39': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&40': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&42': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&43': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&44': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&45': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&46': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&50': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&51': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&52': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&53': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&54': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&55': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&56': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&65': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&66': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&67': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&68': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&69': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&70': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&71': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&75': np.array([0.0, 0.47562425924289314]),
'versicolor&2&76': np.array([0.0, 0.4854368956593117]),
'versicolor&2&77': np.array([0.0, -0.7348263896003956]),
'versicolor&2&78': np.array([0.0, -0.7920887571493729]),
'versicolor&2&79': np.array([0.0, -0.507614207038711]),
'versicolor&2&80': np.array([0.0, 0.4854368956593117]),
'versicolor&2&81': np.array([0.0, -0.3982542883933272]),
'versicolor&2&82': np.array([0.0, -0.08633733326458487]),
'versicolor&2&83': np.array([0.0, 0.4039238345412103]),
'versicolor&2&84': np.array([0.0, -0.38897705551367706]),
'versicolor&2&85': np.array([0.0, -0.06915310813754129]),
'versicolor&2&86': np.array([0.0, 0.41580041887839214]),
'versicolor&2&87': np.array([0.0, -0.7348263896003956]),
'versicolor&2&88': np.array([0.0, -0.7348263896003956]),
'versicolor&2&89': np.array([0.0, -0.7920887571493729]),
'versicolor&2&90': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&91': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&92': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&93': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&94': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&95': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&96': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&97': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&98': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&99': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&100': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&101': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&102': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&103': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&104': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&105': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&106': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&107': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&108': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&109': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&110': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&111': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&112': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&113': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&114': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&115': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&116': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&117': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&118': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&119': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&120': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&121': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&122': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&123': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&124': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&125': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&126': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&127': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&128': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&129': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&130': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&131': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&132': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&133': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&134': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&135': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&136': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&137': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&138': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&139': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&140': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&141': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&142': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&143': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&144': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&145': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&146': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&147': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&148': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&149': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&150': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&151': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&152': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&153': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&154': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&155': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&156': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&157': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&158': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&159': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&160': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&161': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&162': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&163': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&164': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&165': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&166': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&167': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&168': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&169': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&170': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&171': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&172': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&173': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&174': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&175': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&176': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&177': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&178': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&179': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&180': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&181': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&182': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&183': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&184': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&185': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&186': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&187': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&188': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&189': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&190': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&191': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&192': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&193': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&194': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&195': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&196': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&197': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&198': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&199': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&200': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&201': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&202': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&203': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&204': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&205': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&206': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&207': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&208': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&209': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&210': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&211': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&212': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&213': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&214': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&215': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&216': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&217': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&218': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&219': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&220': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&221': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&222': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&223': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&224': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&225': np.array([-0.5775629083348267, 0.7118687782288384]),
'versicolor&2&226': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&227': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&228': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&229': np.array([-0.25603689955471853, -0.451727980232351]),
'versicolor&2&230': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&231': np.array([-0.1269405801024398, -0.34161216844748166]),
'versicolor&2&232': np.array([-0.33176333807327857, 0.09538228407203546]),
'versicolor&2&233': np.array([-0.564696311454556, 0.6421194512020755]),
'versicolor&2&234': np.array([-0.12669523681593967, -0.32786313310034665]),
'versicolor&2&235': np.array([-0.35960845047491363, 0.1335988694092619]),
'versicolor&2&236': np.array([-0.589572650064144, 0.6697478899606418]),
'versicolor&2&237': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&238': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&239': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&240': np.array([0.05667262840030629, 0.4335746514880877]),
'versicolor&2&241': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&242': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&243': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&244': np.array([0.3463149754241171, -0.5568366400939154]),
'versicolor&2&245': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&246': np.array([0.4022739113634462, -0.4700171786183992]),
'versicolor&2&247': np.array([0.5046771347249378, -0.33609610934748635]),
'versicolor&2&248': np.array([0.1370187510624256, 0.30303755274337163]),
'versicolor&2&249': np.array([0.41683021879255133, -0.4812793747667524]),
'versicolor&2&250': np.array([0.5150371666265885, -0.33852139184639396]),
'versicolor&2&251': np.array([0.10611499646955676, 0.33589829339460586]),
'versicolor&2&252': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&253': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&254': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&255': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&256': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&257': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&258': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&259': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&260': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&261': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&262': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&263': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&264': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&265': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&266': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&267': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&268': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&269': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&270': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&271': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&275': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&276': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&277': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&278': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&279': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&280': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&281': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&285': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&286': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&290': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&291': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&292': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&293': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&294': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&295': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&296': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&300': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&301': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&305': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&306': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&307': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&308': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&309': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&310': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&311': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'virginica&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&2': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&3': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&6': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&7': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&9': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&10': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&12': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&13': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&14': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&17': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&18': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&21': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&22': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&24': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&25': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&27': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&28': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&29': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&32': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&33': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&36': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&37': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&39': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&40': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&42': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&43': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&44': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&45': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&46': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&47': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&48': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&49': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&50': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&51': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&52': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&53': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&54': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&55': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&56': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&57': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&58': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&59': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&62': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&63': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&66': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&67': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&69': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&70': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&72': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&73': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&74': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&0&75': np.array([0.0, -0.95124502153736]),
'virginica&0&76': np.array([0.0, -0.9708703761803881]),
'virginica&0&77': np.array([0.0, -0.5659706098422994]),
'virginica&0&78': np.array([0.0, -0.3962828716108186]),
'virginica&0&79': np.array([0.0, 0.2538069363248767]),
'virginica&0&80': np.array([0.0, -0.9708703761803881]),
'virginica&0&81': np.array([0.0, -0.5659706098422994]),
'virginica&0&82': np.array([0.0, -0.3962828716108186]),
'virginica&0&83': np.array([0.0, -0.8943993997517804]),
'virginica&0&84': np.array([0.0, -0.5659706098422994]),
'virginica&0&85': np.array([0.0, -0.3962828716108186]),
'virginica&0&86': np.array([0.0, -0.9166476163222441]),
'virginica&0&87': np.array([0.0, -0.3962828716108186]),
'virginica&0&88': np.array([0.0, -0.5466925844560601]),
'virginica&0&89': np.array([0.0, -0.38529908946531777]),
'virginica&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&92': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&93': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&96': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&97': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&99': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&100': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&102': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&103': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&104': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&107': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&108': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&111': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&112': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&114': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&115': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&117': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&118': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&119': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&120': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&121': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&122': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&123': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&124': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&125': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&126': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&127': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&128': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&129': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&130': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&131': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&132': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&133': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&134': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&137': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&138': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&141': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&142': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&144': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&145': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&147': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&148': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&149': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&152': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&153': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&156': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&157': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&159': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&160': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&162': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&163': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&164': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&167': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&168': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&171': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&172': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&174': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&175': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&177': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&178': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&179': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&180': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&181': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&182': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&183': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&184': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&185': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&186': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&187': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&188': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&189': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&190': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&191': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&192': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&193': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&194': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&197': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&198': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&201': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&202': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&204': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&205': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&207': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&208': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&209': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&212': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&213': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&216': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&217': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&219': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&220': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&222': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&223': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&224': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&225': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&226': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&227': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&228': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&229': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&230': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&231': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&232': | np.array([-0.4329463382004908, -0.057167210150691136]) | numpy.array |
import os
from dataclasses import dataclass
import datetime
import tempfile
import warnings
import isce3
import numpy as np
from osgeo import gdal
# Other functionalities
def compute_az_carrier(burst, orbit, offset, position):
'''
Estimate azimuth carrier and store in numpy arrary
Parameters
----------
burst: Sentinel1BurstSlc
Sentinel1 burst object
orbit: isce3.core.Orbit
Sentinel1 orbit ephemerides
offset: float
Offset between reference and secondary burst
position: tuple
Tuple of locations along y and x directions
Returns
-------
carr: np.ndarray
Azimuth carrier
'''
# Get burst sensing mid relative to orbit reference epoch
fmt = "%Y-%m-%dT%H:%M:%S.%f"
orbit_ref_epoch = datetime.datetime.strptime(orbit.reference_epoch.__str__()[:-3], fmt)
t_mid = burst.sensing_mid - orbit_ref_epoch
_, v = orbit.interpolate(t_mid.total_seconds())
vs = np.linalg.norm(v)
ks = 2 * vs * burst.azimuth_steer_rate / burst.wavelength
y, x = position
n_lines, _ = burst.shape
eta = (y - (n_lines // 2) + offset) * burst.azimuth_time_interval
rng = burst.starting_range + x * burst.range_pixel_spacing
f_etac = np.array(
burst.doppler.poly1d.eval(rng.flatten().tolist())).reshape(rng.shape)
ka = np.array(
burst.azimuth_fm_rate.eval(rng.flatten().tolist())).reshape(rng.shape)
eta_ref = (burst.doppler.poly1d.eval(
burst.starting_range) / burst.azimuth_fm_rate.eval(
burst.starting_range)) - (f_etac / ka)
kt = ks / (1.0 - ks / ka)
carr = np.pi * kt * ((eta - eta_ref) ** 2)
return carr
def polyfit(xin, yin, zin, azimuth_order, range_order,
sig=None, snr=None, cond=1.0e-12,
max_order=True):
"""
Fit 2-D polynomial
Parameters:
xin: np.ndarray
Array locations along x direction
yin: np.ndarray
Array locations along y direction
zin: np.ndarray
Array locations along z direction
azimuth_order: int
Azimuth polynomial order
range_order: int
Slant range polynomial order
sig: -
---------------------------
snr: float
Signal to noise ratio
cond: float
---------------------------
max_order: bool
---------------------------
Returns:
poly: isce3.core.Poly2D
class represents a polynomial function of range
'x' and azimuth 'y'
"""
x = np.array(xin)
xmin = np.min(x)
xnorm = np.max(x) - xmin
if xnorm == 0:
xnorm = 1.0
x = (x - xmin) / xnorm
y = np.array(yin)
ymin = np.min(y)
ynorm = np.max(y) - ymin
if ynorm == 0:
ynorm = 1.0
y = (y - ymin) / ynorm
z = np.array(zin)
big_order = max(azimuth_order, range_order)
arr_list = []
for ii in range(azimuth_order + 1):
yfact = np.power(y, ii)
for jj in range(range_order + 1):
xfact = np.power(x, jj) * yfact
if max_order:
if ((ii + jj) <= big_order):
arr_list.append(xfact.reshape((x.size, 1)))
else:
arr_list.append(xfact.reshape((x.size, 1)))
A = np.hstack(arr_list)
if sig is not None and snr is not None:
raise Exception('Only one of sig / snr can be provided')
if sig is not None:
snr = 1.0 + 1.0 / sig
if snr is not None:
A = A / snr[:, None]
z = z / snr
return_val = True
val, res, _, eigs = np.linalg.lstsq(A, z, rcond=cond)
if len(res) > 0:
print('Chi squared: %f' % (np.sqrt(res / (1.0 * len(z)))))
else:
print('No chi squared value....')
print('Try reducing rank of polynomial.')
return_val = False
coeffs = []
count = 0
for ii in range(azimuth_order + 1):
row = []
for jj in range(range_order + 1):
if max_order:
if (ii + jj) <= big_order:
row.append(val[count])
count = count + 1
else:
row.append(0.0)
else:
row.append(val[count])
count = count + 1
coeffs.append(row)
poly = isce3.core.Poly2d(coeffs, xmin, ymin, xnorm, ynorm)
return poly
@dataclass(frozen=True)
class Doppler:
poly1d: isce3.core.Poly1d
lut2d: isce3.core.LUT2d
@dataclass(frozen=True)
class Sentinel1BurstSlc:
'''Raw values extracted from SAFE XML.
'''
sensing_start: datetime.datetime
radar_center_frequency: float
wavelength: float
azimuth_steer_rate: float
azimuth_time_interval: float
slant_range_time: float
starting_range: float
iw2_mid_range: float
range_sampling_rate: float
range_pixel_spacing: float
shape: tuple()
azimuth_fm_rate: isce3.core.Poly1d
doppler: Doppler
range_bandwidth: float
polarization: str # {VV, VH, HH, HV}
burst_id: str # t{track_number}_iw{1,2,3}_b{burst_index}
platform_id: str # S1{A,B}
center: tuple # {center lon, center lat} in degrees
border: list # list of lon, lat coordinate tuples (in degrees) representing burst border
orbit: isce3.core.Orbit
orbit_direction: str
# VRT params
tiff_path: str # path to measurement tiff in SAFE/zip
i_burst: int
first_valid_sample: int
last_valid_sample: int
first_valid_line: int
last_valid_line: int
# window parameters
range_window_type: str
range_window_coefficient: float
rank: int # The number of PRI between transmitted pulse and return echo.
prf_raw_data: float # Pulse repetition frequency (PRF) of the raw data [Hz]
def as_isce3_radargrid(self):
'''Init and return isce3.product.RadarGridParameters.
Returns:
--------
_ : RadarGridParameters
RadarGridParameters constructed from class members.
'''
prf = 1 / self.azimuth_time_interval
length, width = self.shape
time_delta = datetime.timedelta(days=2)
ref_epoch = isce3.core.DateTime(self.sensing_start - time_delta)
# sensing start with respect to reference epoch
sensing_start = time_delta.total_seconds()
# init radar grid
return isce3.product.RadarGridParameters(sensing_start,
self.wavelength,
prf,
self.starting_range,
self.range_pixel_spacing,
isce3.core.LookSide.Right,
length,
width,
ref_epoch)
def slc_to_file(self, out_path: str, fmt: str = 'ENVI'):
'''Write burst to GTiff file.
Parameters:
-----------
out_path : string
Path of output GTiff file.
'''
if not self.tiff_path:
warn_str = f'Unable write SLC to file. Burst does not contain image data; only metadata.'
warnings.warn(warn_str)
return
# get output directory of out_path
dst_dir, _ = os.path.split(out_path)
# create VRT; make temporary if output not VRT
if fmt != 'VRT':
temp_vrt = tempfile.NamedTemporaryFile(dir=dst_dir)
vrt_fname = temp_vrt.name
else:
vrt_fname = out_path
self.slc_to_vrt_file(vrt_fname)
if fmt == 'VRT':
return
# open temporary VRT and translate to GTiff
src_ds = gdal.Open(vrt_fname)
gdal.Translate(out_path, src_ds, format=fmt)
# clean up
src_ds = None
def slc_to_vrt_file(self, out_path):
'''Write burst to VRT file.
Parameters:
-----------
out_path : string
Path of output VRT file.
'''
if not self.tiff_path:
warn_str = f'Unable write SLC to file. Burst does not contain image data; only metadata.'
warnings.warn(warn_str)
return
line_offset = self.i_burst * self.shape[0]
inwidth = self.last_valid_sample - self.first_valid_sample + 1
inlength = self.last_valid_line - self.first_valid_line + 1
outlength, outwidth = self.shape
yoffset = line_offset + self.first_valid_line
localyoffset = self.first_valid_line
xoffset = self.first_valid_sample
gdal_obj = gdal.Open(self.tiff_path, gdal.GA_ReadOnly)
fullwidth = gdal_obj.RasterXSize
fulllength = gdal_obj.RasterYSize
# TODO maybe cleaner to write with ElementTree
tmpl = f'''<VRTDataset rasterXSize="{outwidth}" rasterYSize="{outlength}">
<VRTRasterBand dataType="CFloat32" band="1">
<NoDataValue>0.0</NoDataValue>
<SimpleSource>
<SourceFilename relativeToVRT="1">{self.tiff_path}</SourceFilename>
<SourceBand>1</SourceBand>
<SourceProperties RasterXSize="{fullwidth}" RasterYSize="{fulllength}" DataType="CInt16"/>
<SrcRect xOff="{xoffset}" yOff="{yoffset}" xSize="{inwidth}" ySize="{inlength}"/>
<DstRect xOff="{xoffset}" yOff="{localyoffset}" xSize="{inwidth}" ySize="{inlength}"/>
</SimpleSource>
</VRTRasterBand>
</VRTDataset>'''
with open(out_path, 'w') as fid:
fid.write(tmpl)
def get_az_carrier_poly(self, offset=0.0, xstep=500, ystep=50,
az_order=5, rg_order=3, index_as_coord=False):
"""
Estimate burst azimuth carrier polymonials
Parameters
----------
offset: float
Offset between reference and secondary bursts
xstep: int
Spacing along x direction
ystep: int
Spacing along y direction
az_order: int
Azimuth polynomial order
rg_order: int
Slant range polynomial order
index_as_coord: bool
If true, polyfit with az/range indices. Else, polyfit with az/range.
Returns
-------
poly: isce3.core.Poly2D
class represents a polynomial function of range
'x' and azimuth 'y'
"""
rdr_grid = self.as_isce3_radargrid()
lines, samples = self.shape
x = | np.arange(0, samples, xstep, dtype=int) | numpy.arange |
import h5py
import pickle
import numpy as np
def load_weights():
fff = h5py.File('Mybase/mask_rcnn_coco.h5','r') #打开h5文件
#print(list(f.keys()))
mydict = {}
mydict['global_step:0'] = 1000
########res1########
dset = fff['conv1']
a = dset['conv1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn_conv1']
a = dset['bn_conv1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
########res2########
dset = fff['res2a_branch1']
a = dset['res2a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch1']
a = dset['bn2a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2a']
a = dset['res2a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2a']
a = dset['bn2a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2b']
a = dset['res2a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2b']
a = dset['bn2a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2c']
a = dset['res2a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2c']
a = dset['bn2a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res2b_branch2a']
a = dset['res2b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2a']
a = dset['bn2b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2b']
a = dset['res2b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2b']
a = dset['bn2b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2c']
a = dset['res2b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2c']
a = dset['bn2b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res2c_branch2a']
a = dset['res2c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2a']
a = dset['bn2c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2b']
a = dset['res2c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2b']
a = dset['bn2c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2c']
a = dset['res2c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2c']
a = dset['bn2c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res3########
dset = fff['res3a_branch1']
a = dset['res3a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch1']
a = dset['bn3a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2a']
a = dset['res3a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2a']
a = dset['bn3a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2b']
a = dset['res3a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2b']
a = dset['bn3a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2c']
a = dset['res3a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2c']
a = dset['bn3a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res3b_branch2a']
a = dset['res3b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2a']
a = dset['bn3b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2b']
a = dset['res3b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2b']
a = dset['bn3b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2c']
a = dset['res3b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2c']
a = dset['bn3b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3c_branch2a']
a = dset['res3c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2a']
a = dset['bn3c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2b']
a = dset['res3c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2b']
a = dset['bn3c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2c']
a = dset['res3c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2c']
a = dset['bn3c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3d_branch2a']
a = dset['res3d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2a']
a = dset['bn3d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2b']
a = dset['res3d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2b']
a = dset['bn3d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2c']
a = dset['res3d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2c']
a = dset['bn3d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res4########
dset = fff['res4a_branch1']
a = dset['res4a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch1']
a = dset['bn4a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2a']
a = dset['res4a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2a']
a = dset['bn4a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = | np.array(a['moving_variance:0'], dtype=np.float32) | numpy.array |
import copy
import os
import json
import importlib
from typing import List
from typing import Dict
from typing import Union
import numpy as np
from joblib import Parallel, delayed
from prettytable import PrettyTable
from ase import Atoms
from ase.io.jsonio import encode as atoms_encoder
from ase.io.jsonio import decode as atoms_decoder
from scipy import stats
from sklearn.gaussian_process import GaussianProcessRegressor
from dscribe.descriptors import SineMatrix
from autocat.learning.predictors import Predictor
from autocat.data.hhi import HHI
from autocat.data.segregation_energies import SEGREGATION_ENERGIES
Array = List[float]
class DesignSpaceError(Exception):
pass
class DesignSpace:
def __init__(
self, design_space_structures: List[Atoms], design_space_labels: Array,
):
"""
Constructor.
Parameters
----------
design_space_structures:
List of all structures within the design space
design_space_labels:
Labels corresponding to all structures within the design space.
If label not yet known, set to np.nan
"""
if len(design_space_structures) != design_space_labels.shape[0]:
msg = f"Number of structures ({len(design_space_structures)})\
and labels ({design_space_labels.shape[0]}) must match"
raise DesignSpaceError(msg)
self._design_space_structures = [
struct.copy() for struct in design_space_structures
]
self._design_space_labels = design_space_labels.copy()
def __repr__(self) -> str:
pt = PrettyTable()
pt.field_names = ["", "DesignSpace"]
pt.add_row(["total # of systems", len(self)])
num_unknown = sum(np.isnan(self.design_space_labels))
pt.add_row(["# of unlabelled systems", num_unknown])
pt.add_row(["unique species present", self.species_list])
max_label = max(self.design_space_labels)
pt.add_row(["maximum label", max_label])
min_label = min(self.design_space_labels)
pt.add_row(["minimum label", min_label])
pt.max_width = 70
return str(pt)
def __len__(self):
return len(self.design_space_structures)
# TODO: non-dunder method for deleting systems
def __delitem__(self, i):
"""
Deletes systems from the design space. If mask provided, deletes wherever True
"""
if isinstance(i, list):
i = np.array(i)
elif isinstance(i, int):
i = [i]
mask = np.ones(len(self), dtype=bool)
mask[i] = 0
self._design_space_labels = self.design_space_labels[mask]
structs = self.design_space_structures
masked_structs = [structs[j] for j in range(len(self)) if mask[j]]
self._design_space_structures = masked_structs
def __eq__(self, other: object) -> bool:
if isinstance(other, DesignSpace):
# check that they are the same length
if len(self) == len(other):
# check all their structures are equal
self_structs = self.design_space_structures
o_structs = other.design_space_structures
if not self_structs == o_structs:
return False
# check their labels are equal
self_labels = self.design_space_labels
o_labels = other.design_space_labels
return np.array_equal(self_labels, o_labels, equal_nan=True)
return False
def copy(self):
"""
Returns a copy of the design space
"""
acds = self.__class__(
design_space_structures=self.design_space_structures,
design_space_labels=self.design_space_labels,
)
return acds
@property
def design_space_structures(self):
return self._design_space_structures
@design_space_structures.setter
def design_space_structures(self, design_space_structures):
msg = "Please use `update` method to update the design space."
raise DesignSpaceError(msg)
@property
def design_space_labels(self):
return self._design_space_labels
@design_space_labels.setter
def design_space_labels(self, design_space_labels):
msg = "Please use `update` method to update the design space."
raise DesignSpaceError(msg)
@property
def species_list(self):
species_list = []
for s in self.design_space_structures:
# get all unique species
found_species = np.unique(s.get_chemical_symbols()).tolist()
new_species = [spec for spec in found_species if spec not in species_list]
species_list.extend(new_species)
return species_list
def update(self, structures: List[Atoms], labels: Array):
"""
Updates design space given structures and corresponding labels.
If structure already in design space, the label is updated.
Parameters
----------
structures:
List of Atoms objects structures to be added
labels:
Corresponding labels to `structures`
"""
if (structures is not None) and (labels is not None):
assert len(structures) == len(labels)
assert all(isinstance(struct, Atoms) for struct in structures)
for i, struct in enumerate(structures):
# if structure already in design space, update label
if struct in self.design_space_structures:
idx = self.design_space_structures.index(struct)
self._design_space_labels[idx] = labels[i]
# otherwise extend design space
else:
self._design_space_structures.append(struct)
self._design_space_labels = np.append(
self.design_space_labels, labels[i]
)
def to_jsonified_list(self) -> List:
"""
Returns a jsonified list representation
"""
collected_jsons = []
for struct in self.design_space_structures:
collected_jsons.append(atoms_encoder(struct))
# append labels to list of collected jsons
jsonified_labels = [float(x) for x in self.design_space_labels]
collected_jsons.append(jsonified_labels)
return collected_jsons
def write_json_to_disk(
self,
json_name: str = None,
write_location: str = ".",
write_to_disk: bool = True,
):
"""
Writes DesignSpace to disk as a json
"""
collected_jsons = self.to_jsonified_list()
# set default json name if needed
if json_name is None:
json_name = "acds.json"
# write out single json
if write_to_disk:
json_path = os.path.join(write_location, json_name)
with open(json_path, "w") as f:
json.dump(collected_jsons, f)
@staticmethod
def from_json(json_name: str):
with open(json_name, "r") as f:
all_data = json.load(f)
structures = []
for i in range(len(all_data) - 1):
atoms = atoms_decoder(all_data[i])
structures.append(atoms)
labels = np.array(all_data[-1])
return DesignSpace(
design_space_structures=structures, design_space_labels=labels,
)
class SequentialLearnerError(Exception):
pass
# TODO: "kwargs" -> "options"?
class SequentialLearner:
def __init__(
self,
design_space: DesignSpace,
predictor_kwargs: Dict[str, Union[str, float]] = None,
candidate_selection_kwargs: Dict[str, Union[str, float]] = None,
sl_kwargs: Dict[str, int] = None,
):
# TODO: move predefined attributes (train_idx, candidate_idxs) to a
# different container (not kwargs)
self._design_space = None
self.design_space = design_space.copy()
# predictor arguments to use throughout the SL process
if predictor_kwargs is None:
predictor_kwargs = {
"model_class": GaussianProcessRegressor,
"featurizer_class": SineMatrix,
}
if "model_class" not in predictor_kwargs:
predictor_kwargs["model_class"] = GaussianProcessRegressor
if "featurizer_class" not in predictor_kwargs:
predictor_kwargs["featurizer_class"] = SineMatrix
if "featurization_kwargs" not in predictor_kwargs:
predictor_kwargs["featurization_kwargs"] = {}
ds_structs_kwargs = {
"design_space_structures": design_space.design_space_structures
}
predictor_kwargs["featurization_kwargs"].update(ds_structs_kwargs)
self._predictor_kwargs = None
self.predictor_kwargs = predictor_kwargs
self._predictor = Predictor(**predictor_kwargs)
# acquisition function arguments to use for candidate selection
if not candidate_selection_kwargs:
candidate_selection_kwargs = {"aq": "Random"}
self._candidate_selection_kwargs = None
self.candidate_selection_kwargs = candidate_selection_kwargs
# other miscellaneous kw arguments
self.sl_kwargs = sl_kwargs if sl_kwargs else {}
# variables that need to be propagated through the SL process
if "iteration_count" not in self.sl_kwargs:
self.sl_kwargs.update({"iteration_count": 0})
if "train_idx" not in self.sl_kwargs:
self.sl_kwargs.update({"train_idx": None})
if "train_idx_history" not in self.sl_kwargs:
self.sl_kwargs.update({"train_idx_history": None})
if "predictions" not in self.sl_kwargs:
self.sl_kwargs.update({"predictions": None})
if "predictions_history" not in self.sl_kwargs:
self.sl_kwargs.update({"predictions_history": None})
if "uncertainties" not in self.sl_kwargs:
self.sl_kwargs.update({"uncertainties": None})
if "uncertainties_history" not in self.sl_kwargs:
self.sl_kwargs.update({"uncertainties_history": None})
if "candidate_indices" not in self.sl_kwargs:
self.sl_kwargs.update({"candidate_indices": None})
if "candidate_index_history" not in self.sl_kwargs:
self.sl_kwargs.update({"candidate_index_history": None})
if "acquisition_scores" not in self.sl_kwargs:
self.sl_kwargs.update({"acquisition_scores": None})
def __repr__(self) -> str:
pt = PrettyTable()
pt.field_names = ["", "Sequential Learner"]
pt.add_row(["iteration count", self.iteration_count])
if self.candidate_structures is not None:
cand_formulas = [
s.get_chemical_formula() for s in self.candidate_structures
]
else:
cand_formulas = None
pt.add_row(["next candidate system structures", cand_formulas])
pt.add_row(["next candidate system indices", self.candidate_indices])
pt.add_row(["acquisition function", self.candidate_selection_kwargs.get("aq")])
pt.add_row(
[
"# of candidates to pick",
self.candidate_selection_kwargs.get("num_candidates_to_pick", 1),
]
)
pt.add_row(
["target maximum", self.candidate_selection_kwargs.get("target_max")]
)
pt.add_row(
["target minimum", self.candidate_selection_kwargs.get("target_min")]
)
pt.add_row(
["include hhi?", self.candidate_selection_kwargs.get("include_hhi", False)]
)
pt.add_row(
[
"include segregation energies?",
self.candidate_selection_kwargs.get("include_seg_ener", False),
]
)
return str(pt) + "\n" + str(self.design_space) + "\n" + str(self.predictor)
@property
def design_space(self):
return self._design_space
@design_space.setter
def design_space(self, design_space):
self._design_space = design_space
@property
def predictor_kwargs(self):
return self._predictor_kwargs
@predictor_kwargs.setter
def predictor_kwargs(self, predictor_kwargs):
if predictor_kwargs is None:
predictor_kwargs = {
"model_class": GaussianProcessRegressor,
"featurizer_class": SineMatrix,
}
if "model_class" not in predictor_kwargs:
predictor_kwargs["model_class"] = GaussianProcessRegressor
if "featurizer_class" not in predictor_kwargs:
predictor_kwargs["featurizer_class"] = SineMatrix
if "featurization_kwargs" not in predictor_kwargs:
predictor_kwargs["featurization_kwargs"] = {}
ds_structs_kwargs = {
"design_space_structures": self.design_space.design_space_structures
}
predictor_kwargs["featurization_kwargs"].update(ds_structs_kwargs)
self._predictor_kwargs = copy.deepcopy(predictor_kwargs)
self._predictor = Predictor(**predictor_kwargs)
@property
def predictor(self):
return self._predictor
@property
def candidate_selection_kwargs(self):
return self._candidate_selection_kwargs
@candidate_selection_kwargs.setter
def candidate_selection_kwargs(self, candidate_selection_kwargs):
if not candidate_selection_kwargs:
candidate_selection_kwargs = {}
self._candidate_selection_kwargs = candidate_selection_kwargs.copy()
@property
def iteration_count(self):
return self.sl_kwargs.get("iteration_count", 0)
@property
def train_idx(self):
return self.sl_kwargs.get("train_idx")
@property
def train_idx_history(self):
return self.sl_kwargs.get("train_idx_history", None)
@property
def predictions(self):
return self.sl_kwargs.get("predictions")
@property
def uncertainties(self):
return self.sl_kwargs.get("uncertainties")
@property
def candidate_indices(self):
return self.sl_kwargs.get("candidate_indices")
@property
def acquisition_scores(self):
return self.sl_kwargs.get("acquisition_scores", None)
@property
def candidate_structures(self):
idxs = self.candidate_indices
if idxs is not None:
return [self.design_space.design_space_structures[i] for i in idxs]
@property
def candidate_index_history(self):
return self.sl_kwargs.get("candidate_index_history", None)
@property
def predictions_history(self):
return self.sl_kwargs.get("predictions_history", None)
@property
def uncertainties_history(self):
return self.sl_kwargs.get("uncertainties_history", None)
def copy(self):
"""
Returns a copy
"""
acsl = self.__class__(design_space=self.design_space,)
acsl.predictor_kwargs = copy.deepcopy(self.predictor_kwargs)
acsl.sl_kwargs = copy.deepcopy(self.sl_kwargs)
return acsl
def iterate(self):
"""Runs the next iteration of sequential learning.
This process consists of:
- retraining the predictor
- predicting candidate properties and calculating candidate scores (if
fully explored returns None)
- selecting the next batch of candidates for objective evaluation (if
fully explored returns None)
"""
dstructs = self.design_space.design_space_structures
dlabels = self.design_space.design_space_labels
mask_nans = ~np.isnan(dlabels)
masked_structs = [struct for i, struct in enumerate(dstructs) if mask_nans[i]]
masked_labels = dlabels[np.where(mask_nans)]
self.predictor.fit(masked_structs, masked_labels)
train_idx = np.zeros(len(dlabels), dtype=bool)
train_idx[np.where(mask_nans)] = 1
self.sl_kwargs.update({"train_idx": train_idx})
train_idx_hist = self.sl_kwargs.get("train_idx_history")
if train_idx_hist is None:
train_idx_hist = []
train_idx_hist.append(train_idx)
self.sl_kwargs.update({"train_idx_history": train_idx_hist})
preds, unc = self.predictor.predict(dstructs)
# update predictions and store in history
self.sl_kwargs.update({"predictions": preds})
pred_hist = self.sl_kwargs.get("predictions_history")
if pred_hist is None:
pred_hist = []
pred_hist.append(preds)
self.sl_kwargs.update({"predictions_history": pred_hist})
# update uncertainties and store in history
self.sl_kwargs.update({"uncertainties": unc})
unc_hist = self.sl_kwargs.get("uncertainties_history")
if unc_hist is None:
unc_hist = []
unc_hist.append(unc)
self.sl_kwargs.update({"uncertainties_history": unc_hist})
# make sure haven't fully searched design space
if any([np.isnan(label) for label in dlabels]):
candidate_idx, _, aq_scores = choose_next_candidate(
dstructs,
dlabels,
train_idx,
preds,
unc,
**self.candidate_selection_kwargs,
)
# if fully searched, no more candidate structures
else:
candidate_idx = None
aq_scores = None
self.sl_kwargs.update({"candidate_indices": candidate_idx})
self.sl_kwargs.update({"acquisition_scores": aq_scores})
# update the candidate index history if new candidate
if candidate_idx is not None:
cand_idx_hist = self.sl_kwargs.get("candidate_index_history")
if cand_idx_hist is None:
cand_idx_hist = []
cand_idx_hist.append(candidate_idx)
self.sl_kwargs.update({"candidate_index_history": cand_idx_hist})
# update the SL iteration count
itc = self.sl_kwargs.get("iteration_count", 0)
self.sl_kwargs.update({"iteration_count": itc + 1})
def to_jsonified_list(self) -> List:
"""
Returns a jsonified list representation
"""
jsonified_list = self.design_space.to_jsonified_list()
# append kwargs for predictor
jsonified_pred_kwargs = {}
for k in self.predictor_kwargs:
if k in ["model_class", "featurizer_class"]:
mod_string = self.predictor_kwargs[k].__module__
class_string = self.predictor_kwargs[k].__name__
jsonified_pred_kwargs[k] = [mod_string, class_string]
elif k == "featurization_kwargs":
jsonified_pred_kwargs[k] = copy.deepcopy(self.predictor_kwargs[k])
# assumes design space will always match DesignSpace
del jsonified_pred_kwargs[k]["design_space_structures"]
else:
jsonified_pred_kwargs[k] = self.predictor_kwargs[k]
jsonified_list.append(jsonified_pred_kwargs)
# append kwargs for candidate selection
jsonified_list.append(self.candidate_selection_kwargs)
# append the acsl kwargs
jsonified_sl_kwargs = {}
for k in self.sl_kwargs:
if k != "iteration_count" and self.sl_kwargs[k] is not None:
jsonified_sl_kwargs[k] = [arr.tolist() for arr in self.sl_kwargs[k]]
elif k == "iteration_count":
jsonified_sl_kwargs["iteration_count"] = self.sl_kwargs[
"iteration_count"
]
elif self.sl_kwargs[k] is None:
jsonified_sl_kwargs[k] = None
jsonified_list.append(jsonified_sl_kwargs)
return jsonified_list
def write_json_to_disk(self, write_location: str = ".", json_name: str = None):
"""
Writes `SequentialLearner` to disk as a json
"""
jsonified_list = self.to_jsonified_list()
if json_name is None:
json_name = "acsl.json"
json_path = os.path.join(write_location, json_name)
with open(json_path, "w") as f:
json.dump(jsonified_list, f)
@staticmethod
def from_json(json_name: str):
with open(json_name, "r") as f:
all_data = json.load(f)
structures = []
for i in range(len(all_data) - 4):
atoms = atoms_decoder(all_data[i])
structures.append(atoms)
labels = np.array(all_data[-4])
acds = DesignSpace(
design_space_structures=structures, design_space_labels=labels,
)
predictor_kwargs = all_data[-3]
for k in predictor_kwargs:
if k in ["model_class", "featurizer_class"]:
mod = importlib.import_module(predictor_kwargs[k][0])
predictor_kwargs[k] = getattr(mod, predictor_kwargs[k][1])
candidate_selection_kwargs = all_data[-2]
raw_sl_kwargs = all_data[-1]
sl_kwargs = {}
for k in raw_sl_kwargs:
if raw_sl_kwargs[k] is not None:
if k in [
"predictions",
"uncertainties",
"acquisition_scores",
"candidate_indices",
]:
sl_kwargs[k] = np.array(raw_sl_kwargs[k])
elif k in [
"predictions_history",
"uncertainties_history",
"candidate_index_history",
]:
sl_kwargs[k] = [np.array(i) for i in raw_sl_kwargs[k]]
elif k == "iteration_count":
sl_kwargs[k] = raw_sl_kwargs[k]
elif k == "train_idx":
sl_kwargs[k] = np.array(raw_sl_kwargs[k], dtype=bool)
elif k == "train_idx_history":
sl_kwargs[k] = [np.array(i, dtype=bool) for i in raw_sl_kwargs[k]]
else:
sl_kwargs[k] = None
return SequentialLearner(
design_space=acds,
predictor_kwargs=predictor_kwargs,
candidate_selection_kwargs=candidate_selection_kwargs,
sl_kwargs=sl_kwargs,
)
def multiple_simulated_sequential_learning_runs(
full_design_space: DesignSpace,
number_of_runs: int = 5,
number_parallel_jobs: int = None,
predictor_kwargs: Dict[str, Union[str, float]] = None,
candidate_selection_kwargs: Dict[str, Union[str, float]] = None,
init_training_size: int = 10,
number_of_sl_loops: int = None,
write_to_disk: bool = False,
write_location: str = ".",
json_name_prefix: str = None,
) -> List[SequentialLearner]:
"""
Conducts multiple simulated sequential learning runs
Parameters
----------
full_design_space:
Fully labelled DesignSpace to simulate
being searched over
predictor_kwargs:
Kwargs to be used in setting up the predictor.
This is where model class, model hyperparameters, etc.
are specified.
candidate_selection_kwargs:
Kwargs that specify that settings for candidate selection.
This is where acquisition function, targets, etc. are
specified.
init_training_size:
Size of the initial training set to be selected from
the full space.
Default: 10
number_of_sl_loops:
Integer specifying the number of sequential learning loops to be conducted.
This value cannot be greater than
`(DESIGN_SPACE_SIZE - init_training_size)/batch_size_to_add`
Default: maximum number of sl loops calculated above
number_of_runs:
Integer of number of runs to be done
Default: 5
number_parallel_jobs:
Integer giving the number of cores to be paralellized across
using `joblib`
Default: None (ie. will run in serial)
write_to_disk:
Boolean specifying whether runs history should be written to disk as jsons.
Default: False
write_location:
String with the location where runs history jsons should be written to disk.
Default: current directory
json_name_prefix:
Prefix used when writing out each simulated run as a json
The naming convention is `{json_name_prefix}_{run #}.json`
Default: acsl_run
Returns
-------
runs_history:
List of SequentialLearner objects for each simulated run
"""
if number_parallel_jobs is not None:
runs_history = Parallel(n_jobs=number_parallel_jobs)(
delayed(simulated_sequential_learning)(
full_design_space=full_design_space,
predictor_kwargs=predictor_kwargs,
candidate_selection_kwargs=candidate_selection_kwargs,
number_of_sl_loops=number_of_sl_loops,
init_training_size=init_training_size,
)
for i in range(number_of_runs)
)
else:
runs_history = [
simulated_sequential_learning(
full_design_space=full_design_space,
predictor_kwargs=predictor_kwargs,
candidate_selection_kwargs=candidate_selection_kwargs,
number_of_sl_loops=number_of_sl_loops,
init_training_size=init_training_size,
)
for i in range(number_of_runs)
]
# TODO: separate dictionary representation and writing to disk
if write_to_disk:
if not os.path.isdir(write_location):
os.makedirs(write_location)
if json_name_prefix is None:
json_name_prefix = "acsl_run"
for i, run in enumerate(runs_history):
name = json_name_prefix + "_" + str(i) + ".json"
run.write_json_to_disk(write_location=write_location, json_name=name)
print(f"SL histories written to {write_location}")
return runs_history
def simulated_sequential_learning(
full_design_space: DesignSpace,
predictor_kwargs: Dict[str, Union[str, float]] = None,
candidate_selection_kwargs: Dict[str, Union[str, float]] = None,
init_training_size: int = 10,
number_of_sl_loops: int = None,
write_to_disk: bool = False,
write_location: str = ".",
json_name: str = None,
) -> SequentialLearner:
"""
Conducts a simulated sequential learning loop for a
fully labelled design space to explore.
Parameters
----------
full_design_space:
Fully labelled DesignSpace to simulate
being searched over
predictor_kwargs:
Kwargs to be used in setting up the predictor.
This is where model class, model hyperparameters, etc.
are specified.
candidate_selection_kwargs:
Kwargs that specify that settings for candidate selection.
This is where acquisition function, targets, etc. are
specified.
init_training_size:
Size of the initial training set to be selected from
the full space.
Default: 10
number_of_sl_loops:
Integer specifying the number of sequential learning loops to be conducted.
This value cannot be greater than
`(DESIGN_SPACE_SIZE - init_training_size)/batch_size_to_add`
Default: maximum number of sl loops calculated above
write_to_disk:
Boolean specifying whether the resulting sequential learner should be
written to disk as a json.
Defaults to False.
write_location:
String with the location where the resulting sequential learner
should be written to disk.
Defaults to current directory.
Returns
-------
sl:
Sequential Learner after having been iterated as specified
by the input settings. Contains candidate, prediction,
and uncertainty histories for further analysis as desired.
"""
ds_size = len(full_design_space)
# check fully explored
if True in np.isnan(full_design_space.design_space_labels):
missing_label_idx = np.where(np.isnan(full_design_space.design_space_labels))[0]
msg = (
f"Design space must be fully explored."
f" Missing labels at indices: {missing_label_idx}"
)
raise SequentialLearnerError(msg)
# check that specified initial training size makes sense
if init_training_size > ds_size:
msg = f"Initial training size ({init_training_size})\
larger than design space ({ds_size})"
raise SequentialLearnerError(msg)
batch_size_to_add = candidate_selection_kwargs.get("num_candidates_to_pick", 1)
max_num_sl_loops = int(np.ceil((ds_size - init_training_size) / batch_size_to_add))
if number_of_sl_loops is None:
number_of_sl_loops = max_num_sl_loops
# check that specified number of loops is feasible
if number_of_sl_loops > max_num_sl_loops:
msg = (
f"Number of SL loops ({number_of_sl_loops}) cannot be greater than"
f" ({max_num_sl_loops})"
)
raise SequentialLearnerError(msg)
# generate initial training set
init_idx = np.zeros(ds_size, dtype=bool)
init_idx[ | np.random.choice(ds_size, init_training_size, replace=False) | numpy.random.choice |
from typing import List
import numpy as np
from numpy import sqrt
Gx_0 = np.array([
[0],
])
Gx_1 = np.array([
[0, 0, 0],
[0, 0, -1],
[0, 1, 0],
])
Gx_2 = np.array([
[0, 1, 0, 0, 0],
[-1, 0, 0, 0, 0],
[0, 0, 0, -sqrt(3), 0],
[0, 0, sqrt(3), 0, -1],
[0, 0, 0, 1, 0],
])
Gx_3 = np.array([
[0, sqrt(6)/2, 0, 0, 0, 0, 0],
[-sqrt(6)/2, 0, sqrt(10)/2, 0, 0, 0, 0],
[0, -sqrt(10)/2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -sqrt(6), 0, 0],
[0, 0, 0, sqrt(6), 0, -sqrt(10)/2, 0],
[0, 0, 0, 0, sqrt(10)/2, 0, -sqrt(6)/2],
[0, 0, 0, 0, 0, sqrt(6)/2, 0],
])
Gx_4 = np.array([
[0, sqrt(2), 0, 0, 0, 0, 0, 0, 0],
[-sqrt(2), 0, sqrt(14)/2, 0, 0, 0, 0, 0, 0],
[0, -sqrt(14)/2, 0, 3*sqrt(2)/2, 0, 0, 0, 0, 0],
[0, 0, -3*sqrt(2)/2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -sqrt(10), 0, 0, 0],
[0, 0, 0, 0, sqrt(10), 0, -3*sqrt(2)/2, 0, 0],
[0, 0, 0, 0, 0, 3*sqrt(2)/2, 0, -sqrt(14)/2, 0],
[0, 0, 0, 0, 0, 0, sqrt(14)/2, 0, -sqrt(2)],
[0, 0, 0, 0, 0, 0, 0, sqrt(2), 0],
])
Gx_5 = np.array([
[0, sqrt(10)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(10)/2, 0, 3*sqrt(2)/2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -3*sqrt(2)/2, 0, sqrt(6), 0, 0, 0, 0, 0, 0, 0],
[0, 0, -sqrt(6), 0, sqrt(7), 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(7), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -sqrt(15), 0, 0, 0, 0],
[0, 0, 0, 0, 0, sqrt(15), 0, -sqrt(7), 0, 0, 0],
[0, 0, 0, 0, 0, 0, sqrt(7), 0, -sqrt(6), 0, 0],
[0, 0, 0, 0, 0, 0, 0, sqrt(6), 0, -3*sqrt(2)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(2)/2, 0, -sqrt(10)/2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(10)/2, 0],
])
Gx_6 = np.array([
[0, sqrt(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(3), 0, sqrt(22)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -sqrt(22)/2, 0, sqrt(30)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -sqrt(30)/2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -3, 0, sqrt(10), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -sqrt(10), 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, -sqrt(21), 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, sqrt(21), 0, -sqrt(10), 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, sqrt(10), 0, -3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 3, 0, -sqrt(30)/2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(30)/2, 0, -sqrt(22)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(22)/2, 0, -sqrt(3)],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(3), 0],
])
Gx_7 = np.array([
[0, sqrt(14)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(14)/2, 0, sqrt(26)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -sqrt(26)/2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -3, 0, sqrt(11), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(11), 0, 5*sqrt(2)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -5*sqrt(2)/2, 0, 3*sqrt(6)/2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -3*sqrt(6)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, -2*sqrt(7), 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 2*sqrt(7), 0, -3*sqrt(6)/2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(6)/2, 0, -5*sqrt(2)/2, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 5*sqrt(2)/2, 0, -sqrt(11), 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | sqrt(11) | numpy.sqrt |
#!/usr/bin/env python
import lasagne.layers
import theano
import theano.tensor as T
import numpy as np
import warnings
from theano.sandbox.rng_mrg import MRG_RandomStreams
_srng = MRG_RandomStreams(42)
def _logit(x):
"""
Logit function in Theano. Useful for parameterizing alpha.
"""
return np.log(x/(1. - x))
def _check_p(p):
"""
Thanks to our logit parameterisation we can't accept p of greater than or
equal to 0.5 (or we get inf logitalphas). So we'll just warn the user and
scale it down slightly.
"""
if p == 0.5:
warnings.warn("Cannot set p to exactly 0.5, limits are: 0 < p < 0.5."
" Setting to 0.4999", RuntimeWarning)
return 0.4999
elif p > 0.5:
warnings.warn("Cannot set p to greater than 0.5, limits are: "
"0 < p < 0.5. Setting to 0.4999", RuntimeWarning)
return 0.4999
elif p <= 0.0:
warnings.warn("Cannot set p to less than or equal to 0.0, limits are: "
"0 < p < 0.5. Setting to 0.0001", RuntimeWarning)
return 0.0001
else:
return p
class VariationalDropout(lasagne.layers.Layer):
"""
Base class for variational dropout layers, because the noise sampling
and initialisation can be shared between type A and B.
Inits:
* p - initialisation of the parameters sampled for the noise
distribution.
* adaptive - one of:
* None - will not allow updates to the dropout rate
* "layerwise" - allow updates to a single parameter controlling the
updates
* "elementwise" - allow updates to a parameter for each hidden layer
* "weightwise" - allow updates to a parameter for each weight (don't
think this is actually necessary to replicate)
"""
def __init__(self, incoming, p=0.5, adaptive=None, nonlinearity=None,
**kwargs):
lasagne.layers.Layer.__init__(self, incoming, **kwargs)
self.adaptive = adaptive
p = _check_p(p)
# init based on adaptive options:
if self.adaptive == None:
# initialise scalar param, but don't register it
self.logitalpha = theano.shared(
value=np.array(_logit(np.sqrt(p/(1.-p)))).astype(theano.config.floatX),
name='logitalpha'
)
elif self.adaptive == "layerwise":
# initialise scalar param, allow updates
self.logitalpha = theano.shared(
value=np.array(_logit(np.sqrt(p/(1.-p)))).astype(theano.config.floatX),
name='logitalpha'
)
self.add_param(self.logitalpha, ())
elif self.adaptive == "elementwise":
# initialise param for each activation passed
self.logitalpha = theano.shared(
value=np.array(
np.ones(self.input_shape[1])*_logit(np.sqrt(p/(1.-p)))
).astype(theano.config.floatX),
name='logitalpha'
)
self.add_param(self.logitalpha, (self.input_shape[1],))
elif self.adaptive == "weightwise":
# not implemented yet
raise NotImplementedError("Not implemented yet, will have to "
"use DenseLayer inheritance.")
# if we get no nonlinearity, just put a non-function there
if nonlinearity == None:
self.nonlinearity = lambda x: x
else:
self.nonlinearity = nonlinearity
class WangGaussianDropout(lasagne.layers.Layer):
"""
Replication of the Gaussian dropout of Wang and Manning 2012.
To use this right, similarly to the above, this has to be applied
to the activations of the network _before the nonlinearity_. This means
that the prior layer must have _no nonlinearity_, and then you can
either apply a nonlinearity in this layer or afterwards yourself.
Uses some of the code and comments from the Lasagne GaussianNoiseLayer:
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
the layer feeding into this layer, or the expected input shape
p : float or tensor scalar, effective dropout probability
nonlinearity : a nonlinearity to apply after the noising process
"""
def __init__(self, incoming, p=0.5, nonlinearity=None, **kwargs):
lasagne.layers.Layer.__init__(self, incoming, **kwargs)
p = _check_p(p)
self.logitalpha = theano.shared(
value=np.array(_logit(np.sqrt(p/(1.-p)))).astype(theano.config.floatX),
name='logitalpha'
)
# if we get no nonlinearity, just put a non-function there
if nonlinearity == None:
self.nonlinearity = lambda x: x
else:
self.nonlinearity = nonlinearity
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true noise is disabled, see notes
"""
self.alpha = T.nnet.sigmoid(self.logitalpha)
if deterministic or T.mean(self.alpha).eval() == 0:
return self.nonlinearity(input)
else:
# sample from the Gaussian that dropout would produce:
mu_z = input
sigma_z = self.alpha*input
randn = _srng.normal(input.shape, avg=1.0, std=1.)
return self.nonlinearity(mu_z + sigma_z*randn)
class SrivastavaGaussianDropout(lasagne.layers.Layer):
"""
Replication of the Gaussian dropout of Srivastava et al. 2014 (section
10). Applies noise to the activations prior to the weight matrix
according to equation 11 in the Variational Dropout paper; to match the
adaptive dropout implementation.
Uses some of the code and comments from the Lasagne GaussianNoiseLayer:
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
the layer feeding into this layer, or the expected input shape
p : float or tensor scalar, effective dropout probability
"""
def __init__(self, incoming, p=0.5, **kwargs):
super(SrivastavaGaussianDropout, self).__init__(incoming, **kwargs)
p = _check_p(p)
self.logitalpha = theano.shared(
value=np.array(_logit(np.sqrt(p/(1.-p)))).astype(theano.config.floatX),
name='logitalpha'
)
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true noise is disabled, see notes
"""
self.alpha = T.nnet.sigmoid(self.logitalpha)
if deterministic or T.mean(self.alpha).eval() == 0:
return input
else:
return input + \
input*self.alpha*_srng.normal(input.shape,
avg=0.0, std=1.)
class VariationalDropoutA(VariationalDropout, SrivastavaGaussianDropout):
"""
Variational dropout layer, implementing correlated weight noise over the
output of a layer. Adaptive version of Srivastava's Gaussian dropout.
Inits:
* p - initialisation of the parameters sampled for the noise
distribution.
* adaptive - one of:
* None - will not allow updates to the dropout rate
* "layerwise" - allow updates to a single parameter controlling the
updates
* "elementwise" - allow updates to a parameter for each hidden layer
* "weightwise" - allow updates to a parameter for each weight (don't
think this is actually necessary to replicate)
"""
def __init__(self, incoming, p=0.5, adaptive=None, nonlinearity=None,
**kwargs):
VariationalDropout.__init__(self, incoming, p=p, adaptive=adaptive,
nonlinearity=nonlinearity, **kwargs)
class VariationalDropoutB(VariationalDropout, WangGaussianDropout):
"""
Variational dropout layer, implementing independent weight noise. Adaptive
version of Wang's Gaussian dropout.
Inits:
* p - initialisation of the parameters sampled for the noise
distribution.
* adaptive - one of:
* None - will not allow updates to the dropout rate
* "layerwise" - allow updates to a single parameter controlling the
updates
* "elementwise" - allow updates to a parameter for each hidden layer
* "weightwise" - allow updates to a parameter for each weight (don't
think this is actually necessary to replicate)
"""
def __init__(self, incoming, p=0.5, adaptive=None, nonlinearity=None,
**kwargs):
VariationalDropout.__init__(self, incoming, p=p, adaptive=adaptive,
nonlinearity=nonlinearity, **kwargs)
class SingleWeightSample(lasagne.layers.DenseLayer):
"""
MC on the uncertainty of the weights by taking a single sample of the
weight matrix and propagating forwards.
"""
def __init__(self, incoming, num_units, p=0.5, **kwargs):
super(SingleWeightSample, self).__init__(incoming, num_units, **kwargs)
# then initialise the noise terms for each weight
p = _check_p(p)
self.logitalpha = theano.shared(
value=np.array(_logit(np.sqrt(p/(1.-p)))).astype(theano.config.floatX),
name='logitalpha'
)
self.alpha = T.nnet.sigmoid(self.logitalpha)
self.epsilon = | np.sqrt(1./num_units) | numpy.sqrt |
import os
import unittest
from unittest import mock
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
import redback
dirname = os.path.dirname(__file__)
class TestTransient(unittest.TestCase):
def setUp(self) -> None:
self.time = np.array([1, 2, 3])
self.time_err = np.array([0.2, 0.3, 0.4])
self.y = np.array([3, 4, 2])
self.y_err = np.sqrt(self.y)
self.redshift = 0.75
self.data_mode = 'counts'
self.name = "GRB123456"
self.photon_index = 2
self.use_phase_model = False
self.transient = redback.transient.transient.Transient(
time=self.time, time_err=self.time_err, counts=self.y,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model)
def tearDown(self) -> None:
del self.time
del self.time_err
del self.y
del self.y_err
del self.redshift
del self.data_mode
del self.name
del self.photon_index
del self.use_phase_model
del self.transient
def test_ttes_data_mode_setting(self):
bin_ttes = MagicMock(return_value=(self.time, self.y))
ttes = np.arange(0, 1, 1000)
self.data_mode = 'ttes'
self.bin_size = 0.1
self.transient = redback.transient.transient.Transient(
ttes=ttes, redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, bin_ttes=bin_ttes)
bin_ttes.assert_called_once()
def test_data_mode_switches(self):
self.assertTrue(self.transient.counts_data)
self.assertFalse(self.transient.luminosity_data)
self.assertFalse(self.transient.flux_data)
self.assertFalse(self.transient.flux_density_data)
self.assertFalse(self.transient.magnitude_data)
self.assertFalse(self.transient.tte_data)
def test_set_data_mode_switch(self):
self.transient.flux_data = True
self.assertTrue(self.transient.flux_data)
self.assertFalse(self.transient.counts_data)
def test_get_time_via_x(self):
self.assertTrue(np.array_equal(self.time, self.transient.x))
self.assertTrue(np.array_equal(self.time_err, self.transient.x_err))
def test_get_time_via_x_luminosity_data(self):
new_times = np.array([1, 2, 3])
new_time_errs = np.array([0.1, 0.2, 0.3])
self.transient.time_rest_frame = new_times
self.transient.time_rest_frame_err = new_time_errs
self.transient.data_mode = "luminosity"
self.assertTrue(np.array_equal(new_times, self.transient.x))
self.assertTrue(np.array_equal(new_time_errs, self.transient.x_err))
def test_x_same_as_time(self):
self.assertTrue(np.array_equal(self.transient.x, self.transient.time))
def test_xerr_same_as_time_err(self):
self.assertTrue(np.array_equal(self.transient.x_err, self.transient.time_err))
def test_set_use_phase_model(self):
self.assertFalse(self.transient.use_phase_model)
def test_xlabel(self):
self.assertEqual(r"Time since burst [days]", self.transient.xlabel)
self.transient.use_phase_model = True
self.assertEqual(r"Time [MJD]", self.transient.xlabel)
def test_ylabel(self):
self.assertEqual(r'Counts', self.transient.ylabel)
self.transient.luminosity_data = True
self.assertEqual(r'Luminosity [$10^{50}$ erg s$^{-1}$]', self.transient.ylabel)
self.transient.magnitude_data = True
self.assertEqual(r'Magnitude', self.transient.ylabel)
self.transient.flux_data = True
self.assertEqual(r'Flux [erg cm$^{-2}$ s$^{-1}$]', self.transient.ylabel)
self.transient.flux_density_data = True
self.assertEqual(r'Flux density [mJy]', self.transient.ylabel)
self.transient.flux_density_data = False
with self.assertRaises(ValueError):
_ = self.transient.ylabel
def test_use_phase_model_time_attribute(self):
self.transient = redback.transient.transient.Transient(
time_mjd=self.time, time_mjd_err=self.time_err, counts=self.y, redshift=self.redshift,
data_mode=self.data_mode, name=self.name, photon_index=self.photon_index,
use_phase_model=True)
self.assertTrue(np.array_equal(self.transient.time_mjd, self.transient.x))
self.assertTrue(np.array_equal(self.transient.time_mjd_err, self.transient.x_err))
def test_set_x(self):
new_x = np.array([2, 3, 4])
self.transient.x = new_x
self.assertTrue(np.array_equal(new_x, self.transient.x))
self.assertTrue(np.array_equal(new_x, self.transient.time))
def test_set_x_err(self):
new_x_err = np.array([3, 4, 5])
self.transient.x_err = new_x_err
self.assertTrue(np.array_equal(new_x_err, self.transient.x_err))
self.assertTrue(np.array_equal(new_x_err, self.transient.time_err))
def test_set_y(self):
new_y = np.array([7, 8, 9])
self.transient.y = new_y
self.assertTrue(np.array_equal(new_y, self.transient.y))
self.assertTrue(np.array_equal(new_y, self.transient.counts))
def test_set_y_err(self):
new_y_err = np.array([7, 8, 9])
self.transient.y_err = new_y_err
self.assertTrue(np.array_equal(new_y_err, self.transient.y_err))
self.assertTrue(np.array_equal(new_y_err, self.transient.counts_err))
def test_y_same_as_counts(self):
self.assertTrue(np.array_equal(self.transient.y, self.transient.counts))
def test_yerr_same_as_counts(self):
self.assertTrue(np.array_equal(self.transient.y_err, self.transient.counts_err))
def test_redshift(self):
self.assertEqual(self.redshift, self.transient.redshift)
def test_get_data_mode(self):
self.assertEqual(self.data_mode, self.transient.data_mode)
def test_set_data_mode(self):
new_data_mode = "luminosity"
self.transient.data_mode = new_data_mode
self.assertEqual(new_data_mode, self.transient.data_mode)
def test_set_illegal_data_mode(self):
with self.assertRaises(ValueError):
self.transient.data_mode = "abc"
def test_plot_lightcurve(self):
pass
# self.transient.plot_lightcurve(model=None)
def test_plot_data(self):
pass
# self.transient.plot_data()
class TestOpticalTransient(unittest.TestCase):
def setUp(self) -> None:
self.time = np.array([1, 2, 3])
self.time_err = np.array([0.2, 0.3, 0.4])
self.y = np.array([3, 4, 2])
self.y_err = np.sqrt(self.y)
self.redshift = 0.75
self.data_mode = 'flux_density'
self.name = "SN2000A"
self.photon_index = 2
self.use_phase_model = False
self.bands = np.array(['i', 'g', 'g'])
self.active_bands = np.array(['g'])
self.transient = redback.transient.transient.OpticalTransient(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands)
def tearDown(self) -> None:
del self.time
del self.time_err
del self.y
del self.y_err
del self.redshift
del self.data_mode
del self.name
del self.photon_index
del self.use_phase_model
del self.bands
del self.active_bands
del self.transient
def test_load_data_magnitude(self):
name = "optical_transient_test_data"
transient_dir = f"{dirname}/data"
processed_file_path = f"{transient_dir}/{name}.csv"
data_mode = "magnitude"
time_days, time_mjd, magnitude, magnitude_err, bands, system = \
self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode)
expected_time_days = np.array([0.4813999999969383, 0.49020000000018626])
expected_time_mjd = np.array([57982.9814, 57982.9902])
expected_magnitude = np.array([17.48, 18.26])
expected_magnitude_err = np.array([0.02, 0.15])
expected_bands = np.array(["i", "H"])
expected_system = np.array(["AB", "AB"])
self.assertTrue(np.allclose(expected_time_days, time_days))
self.assertTrue(np.allclose(expected_time_mjd, time_mjd))
self.assertTrue(np.allclose(expected_magnitude, magnitude))
self.assertTrue(np.allclose(expected_magnitude_err, magnitude_err))
self.assertTrue(np.array_equal(expected_bands, bands))
self.assertTrue(np.array_equal(expected_system, system))
def test_load_data_flux_density(self):
name = "optical_transient_test_data"
transient_dir = f"{dirname}/data"
data_mode = "flux_density"
processed_file_path = f"{transient_dir}/{name}.csv"
time_days, time_mjd, flux_density, flux_density_err, bands, system = \
self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode)
expected_time_days = np.array([0.4813999999969383, 0.49020000000018626])
expected_time_mjd = np.array([57982.9814, 57982.9902])
expected_flux_density = np.array([0.36982817978026444, 0.1803017740859559])
expected_flux_density_err = np.array([0.006812898591418732, 0.024911116226263914])
expected_bands = np.array(["i", "H"])
expected_system = np.array(["AB", "AB"])
self.assertTrue(np.allclose(expected_time_days, time_days))
self.assertTrue(np.allclose(expected_time_mjd, time_mjd))
self.assertTrue(np.allclose(expected_flux_density, flux_density))
self.assertTrue(np.allclose(expected_flux_density_err, flux_density_err))
self.assertTrue(np.array_equal(expected_bands, bands))
self.assertTrue(np.array_equal(expected_system, system))
def test_load_data_all(self):
name = "optical_transient_test_data"
transient_dir = f"{dirname}/data"
processed_file_path = f"{transient_dir}/{name}.csv"
data_mode = "all"
time_days, time_mjd, flux_density, flux_density_err, magnitude, magnitude_err, bands, system = \
self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode)
expected_time_days = np.array([0.4813999999969383, 0.49020000000018626])
expected_time_mjd = np.array([57982.9814, 57982.9902])
expected_flux_density = np.array([0.36982817978026444, 0.1803017740859559])
expected_flux_density_err = np.array([0.006812898591418732, 0.024911116226263914])
expected_magnitude = np.array([17.48, 18.26])
expected_magnitude_err = np.array([0.02, 0.15])
expected_bands = np.array(["i", "H"])
expected_system = np.array(["AB", "AB"])
self.assertTrue(np.allclose(expected_time_days, time_days))
self.assertTrue(np.allclose(expected_time_mjd, time_mjd))
self.assertTrue(np.allclose(expected_flux_density, flux_density))
self.assertTrue(np.allclose(expected_flux_density_err, flux_density_err))
self.assertTrue(np.allclose(expected_magnitude, magnitude))
self.assertTrue(np.allclose(expected_magnitude_err, magnitude_err))
self.assertTrue(np.array_equal(expected_bands, bands))
self.assertTrue(np.array_equal(expected_system, system))
def test_get_from_open_access_catalogue(self):
with mock.patch("redback.transient.transient.OpticalTransient.load_data") as m:
expected_time_days = np.array([0.4813999999969383, 0.49020000000018626])
expected_time_mjd = np.array([57982.9814, 57982.9902])
expected_flux_density = np.array([0.36982817978026444, 0.1803017740859559])
expected_flux_density_err = np.array([0.006812898591418732, 0.024911116226263914])
expected_magnitude = np.array([17.48, 18.26])
expected_magnitude_err = np.array([0.02, 0.15])
expected_bands = np.array(["i", "H"])
expected_system = np.array(["AB", "AB"])
m.return_value = \
expected_time_days, expected_time_mjd, expected_flux_density, expected_flux_density_err, \
expected_magnitude, expected_magnitude_err, expected_bands, expected_system
name = "test"
transient = redback.transient.transient.OpticalTransient.from_open_access_catalogue(name=name)
self.assertTrue(transient.magnitude_data)
self.assertEqual(name, transient.name)
self.assertTrue(np.allclose(expected_time_days, transient.time))
self.assertTrue(np.allclose(expected_time_mjd, transient.time_mjd))
self.assertTrue(np.allclose(expected_flux_density, transient.flux_density))
self.assertTrue(np.allclose(expected_flux_density_err, transient.flux_density_err))
self.assertTrue(np.allclose(expected_magnitude, transient.magnitude))
self.assertTrue(np.allclose(expected_magnitude_err, transient.magnitude_err))
self.assertTrue(np.array_equal(expected_bands, transient.bands))
self.assertTrue(np.array_equal(expected_system, transient.system))
def test_set_active_bands(self):
self.assertTrue(np.array_equal(np.array(self.active_bands), self.transient.active_bands))
def test_set_active_bands_all(self):
self.transient = redback.transient.transient.OpticalTransient(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands,
active_bands='all')
self.assertTrue(np.array_equal(np.array(['g', 'i']), self.transient.active_bands))
def test_set_frequencies_from_bands(self):
expected = [1, 2, 2]
bands_to_frequency = MagicMock(return_value=expected)
self.transient = redback.transient.transient.OpticalTransient(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands,
active_bands=self.active_bands, bands_to_frequency=bands_to_frequency)
self.assertTrue(np.array_equal(expected, self.transient.frequency))
bands_to_frequency.assert_called_once()
def test_set_frequencies_default(self):
frequency = np.array([1, 2, 2])
self.transient = redback.transient.transient.OpticalTransient(
time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err,
redshift=self.redshift, data_mode=self.data_mode, name=self.name,
photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands,
frequency=frequency, active_bands=self.active_bands)
self.assertTrue(np.array_equal(frequency, self.transient.frequency))
def test_get_filtered_data(self):
filtered_x, filtered_x_err, filtered_y, filtered_y_err = self.transient.get_filtered_data()
expected_x = self.time[1:]
expected_x_err = self.time_err[1:]
expected_y = self.y[1:]
expected_y_err = self.y_err[1:]
self.assertTrue(np.array_equal(expected_x, filtered_x))
self.assertTrue(np.array_equal(expected_x_err, filtered_x_err))
self.assertTrue(np.array_equal(expected_y, filtered_y))
self.assertTrue(np.array_equal(expected_y_err, filtered_y_err))
def test_get_filtered_data_no_x_err(self):
self.transient.x_err = None
_, filtered_x_err, _, _ = self.transient.get_filtered_data()
self.assertIsNone(filtered_x_err)
def test_get_filtered_data_illegal_data_mode(self):
with self.assertRaises(ValueError):
self.transient.luminosity_data = True
self.transient.get_filtered_data()
def test_meta_data_not_available(self):
self.assertIsNone(self.transient.meta_data)
@mock.patch("pandas.read_csv")
def test_meta_data_from_csv(self, read_csv):
self.transient.directory_structure = redback.get_data.directory.DirectoryStructure(
directory_path='data', raw_file_path=None, processed_file_path=None)
expected = dict(a=1)
read_csv.return_value = expected
self.transient._set_data()
self.assertDictEqual(expected, self.transient.meta_data)
def test_transient_dir(self):
with mock.patch('redback.get_data.directory.open_access_directory_structure') as m:
expected = 'expected'
m.return_value = expected, '_', '_'
self.assertEqual(expected, self.transient.transient_dir)
def test_unique_bands(self):
expected = np.array(['g', 'i'])
self.assertTrue(np.array_equal(expected, self.transient.unique_bands))
def test_list_of_band_indices(self):
expected = [np.array([1, 2]), np.array([0])]
self.assertTrue( | np.array_equal(expected[0], self.transient.list_of_band_indices[0]) | numpy.array_equal |
import warnings
warnings.filterwarnings("ignore")
import os
import sys
# libraries
import time
import numpy as np
import pandas as pd
import argparse
import cv2
import PIL.Image
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch.utils.data import TensorDataset, DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
from sklearn.metrics import roc_auc_score
from warmup_scheduler import GradualWarmupScheduler
import albumentations
import timm
from tqdm import tqdm
from model import *
from loss import *
device = torch.device('cuda')
image_size = 512
use_amp = True
data_dir = './input/hpa-single-cell-image-classification/'
image_folder = './input/hpa-512/train/'
p_drop_cell = 0.
batch_size = 32
num_workers = 36
init_lr = 1e-4
num_classes = 19
n_ch = 4
loss_type = 'BCE' # 'BCE' or 'CE'
freeze_epo = 0
warmup_epo = 1
cosine_epo = 14
n_epochs = freeze_epo + warmup_epo + cosine_epo
if use_amp:
use_torch_amp = torch.__version__ >= '1.6'
if use_torch_amp:
import torch.cuda.amp as amp
else:
from apex import amp
else:
use_torch_amp = False
log_dir = './output'
model_dir = './output'
os.makedirs(log_dir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
ext_mean = [30.89923273, 153.09532163, 81.67066827, 230.55380814]
orig_mean = [239.93038613, 246.05603962, 250.16871503, 250.50623682]
df_train_all = pd.read_csv('./input/hpa-512/train_all.csv')
df_train_all['filepath'] = df_train_all.apply(lambda row: os.path.join(image_folder, row.ID + '.png'), axis=1)
print(os.path.exists(df_train_all.loc[0].filepath), df_train_all.loc[0].filepath)
print(os.path.exists(df_train_all.iloc[-1].filepath), df_train_all.iloc[-1].filepath)
class HpaImageDataSet1:
def __init__(self, df, transform=None):
self.df = df.reset_index()
self.transform = transform
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
row = self.df.iloc[index]
image = np.asarray(PIL.Image.open(row.filepath)).copy()
if self.transform is not None:
image = self.transform(image=image)['image']
image = image.astype(np.float32)
for ch in range(4):
if row.is_ext == 0 or row.is_ext == 2:
image[:,:,ch] /= orig_mean[ch]
else:
image[:,:,ch] /= ext_mean[ch]
image = image.transpose(2, 0, 1)
label = np.zeros(num_classes)
for l in (row.Label.split('|')):
label[int(l)] = 1.
return torch.tensor(image).float(), torch.tensor(label).float()
class HpaImageDataSet2:
def __init__(self, df, image_size=None, crop_size=None, transform=None, cutmix_neg=False, mix_color=False, random_ch=False):
self.df = df
self.image_size = image_size
self.crop_size = crop_size
self.transform = transform
self.cutmix_neg = cutmix_neg
self.mix_color = mix_color
self.random_ch = random_ch
def __len__(self):
return (self.df.shape[0])
def __getitem__(self, idx):
row = self.df.iloc[idx]
mask = None
image = np.asarray(PIL.Image.open(row.filepath)).copy()
image = cv2.resize(image,(self.image_size,self.image_size))
if self.crop_size is not None:
random_crop_size = int(np.random.uniform(self.crop_size, self.image_size))
x = int(np.random.uniform(0, self.image_size - random_crop_size))
y = int(np.random.uniform(0, self.image_size - random_crop_size))
image = image[x:x + random_crop_size, y:y + random_crop_size,:]
image = cv2.resize(image,(self.crop_size,self.crop_size))
if self.transform is not None:
image = self.transform(image=image)['image']
image = image.astype(np.float32)
image = np.transpose(image,(2,0,1))
for ch in range(4):
if row.is_ext == 0:
image[ch] = image[ch] / orig_mean[ch]
else:
image[ch] = image[ch] / ext_mean[ch]
add_neg_cell = False
mix_red = False
mix_blue = False
mix_yellow = False
rand_prob = np.random.rand()
if self.cutmix_neg and rand_prob < 0.05:
image[1,...] = image[1,...] * rand_prob * 2
add_neg_cell = True
elif self.mix_color and 0.05 < rand_prob < 0.075:
image[1,...] = image[0,...] * (1-(rand_prob-0.05)*16)
mix_red = True
elif self.mix_color and 0.075 < rand_prob < 0.1:
image[1,...] = image[3,...] * (1-(rand_prob-0.075)*16)
mix_yellow = True
elif self.random_ch and 0.1 < rand_prob < 0.15:
ch_probs = np.random.rand(4)*0.5+0.6
for ch in range(4):
image[ch] = image[ch]*ch_probs[ch]
# image = normed(image)
labels = np.zeros(num_classes)
for l in (row.Label.split('|')):
labels[int(l)] = 1.
if add_neg_cell:
labels[:] = 0.0
labels[18] = 1.0
elif mix_red:
labels[:] = 0.0
labels[10] = 1.0
elif mix_yellow:
labels[:] = 0.0
labels[6] = 1.0
return [torch.tensor(image, dtype=torch.float),torch.tensor(labels, dtype=torch.float)]
def mAP(pred, target):
""" Calculate the mean average precision with respect of classes
Args:
pred (torch.Tensor | np.ndarray): The model prediction with shape
(N, C), where C is the number of classes.
target (torch.Tensor | np.ndarray): The target of each prediction with
shape (N, C), where C is the number of classes. 1 stands for
positive examples, 0 stands for negative examples and -1 stands for
difficult examples.
Returns:
float: A single float as mAP value.
"""
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred = pred.numpy()
target = target.numpy()
elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)):
raise TypeError('pred and target should both be torch.Tensor or'
'np.ndarray')
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
num_classes = pred.shape[1]
ap = np.zeros(num_classes)
for k in range(num_classes):
ap[k] = average_precision(pred[:, k], target[:, k])
mean_ap = ap.mean() * 100.0
return ap, mean_ap
def average_precision(pred, target):
""" Calculate the average precision for a single class
AP summarizes a precision-recall curve as the weighted mean of maximum
precisions obtained for any r'>r, where r is the recall:
..math::
\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n
Note that no approximation is involved since the curve is piecewise
constant.
Args:
pred (np.ndarray): The model prediction with shape (N, ).
target (np.ndarray): The target of each prediction with shape (N, ).
Returns:
float: a single float as average precision value.
"""
eps = np.finfo(np.float32).eps
# sort examples
sort_inds = np.argsort(-pred)
sort_target = target[sort_inds]
# count true positive examples
pos_inds = sort_target == 1
tp = np.cumsum(pos_inds)
total_pos = tp[-1]
# count not difficult examples
pn_inds = sort_target != -1
pn = np.cumsum(pn_inds)
tp[np.logical_not(pos_inds)] = 0
precision = tp / np.maximum(pn, eps)
ap = np.sum(precision) / | np.maximum(total_pos, eps) | numpy.maximum |
# -*- coding: utf-8 -*-
"""
Created on 4 Jun 2021
@author: Alexandre
"""
import numpy as np
from pyro.dynamic import statespace
###############################################################################
class SingleMass( statespace.StateSpaceSystem ):
"""Single Mass with linear spring and damper
Attributes
----------
"""
############################
def __init__(self, m=1, k=2, b=0):
""" """
# params
self.m = m
self.k = k
self.b = b
self.l1 = 2
self.l2 = 1
# Matrix ABCD
self.compute_ABCD()
# initialize standard params
statespace.StateSpaceSystem.__init__( self, self.A, self.B, self.C, self.D)
# Name and labels
self.name = 'Linear-Spring-Damper'
self.input_label = [ 'Force']
self.input_units = [ '[N]']
self.output_label = ['Position']
self.output_units = ['[m]']
self.state_label = [ 'Position','Velocity']
self.state_units = [ '[m]', '[m/s]']
self.linestyle = '-'
###########################################################################
def compute_ABCD(self):
"""
"""
self.A = np.array([ [ 0 , 1 ],
[ -self.k/self.m , -self.b/self.m ] ])
self.B = np.array([ [ 0 ],
[ 1 /self.m ]])
self.C = np.array([ [ 1 , 0 ]])
self.D = np.array([ [ 0 ]])
###########################################################################
# Graphical output
###########################################################################
#############################
def xut2q( self, x , u , t ):
""" Compute configuration variables ( q vector ) """
q = np.array([ x[0], u[0] ]) # Hack to illustrate force vector
return q
###########################################################################
def forward_kinematic_domain(self, q ):
"""
"""
l = self.l1 * 2
domain = [ (-l+self.l1,l+self.l1) , (-l,l) , (-l,l) ]#
return domain
###########################################################################
def forward_kinematic_lines(self, q ):
"""
Compute points p = [x;y;z] positions given config q
----------------------------------------------------
- points of interest for ploting
Outpus:
lines_pts = [] : a list of array (n_pts x 3) for each lines
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# ground line
pts = np.zeros(( 2 , 3 ))
pts[0,:] = np.array([-self.l1,-self.l2,0])
pts[1,:] = np.array([-self.l1,+self.l2,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
# mass
pts = np.zeros(( 5 , 3 ))
pts[0,:] = np.array([q[0] - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([q[0] + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([q[0] + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([q[0] - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'b')
# spring
pts = np.zeros(( 15 , 3 ))
d = q[0] + self.l1 - self.l2/2
h = self.l2 / 3
pts[0,:] = np.array([d*0.00 - self.l1,0,0])
pts[1,:] = np.array([d*0.20 - self.l1,0,0])
pts[2,:] = np.array([d*0.25 - self.l1,+h,0])
pts[3,:] = np.array([d*0.30 - self.l1,-h,0])
pts[4,:] = np.array([d*0.35 - self.l1,+h,0])
pts[5,:] = np.array([d*0.40 - self.l1,-h,0])
pts[6,:] = np.array([d*0.45 - self.l1,+h,0])
pts[7,:] = np.array([d*0.50 - self.l1,-h,0])
pts[8,:] = np.array([d*0.55 - self.l1,+h,0])
pts[9,:] = np.array([d*0.60 - self.l1,-h,0])
pts[10,:] = np.array([d*0.65 - self.l1,+h,0])
pts[11,:] = np.array([d*0.70 - self.l1,-h,0])
pts[12,:] = np.array([d*0.75 - self.l1,+h,0])
pts[13,:] = np.array([d*0.80 - self.l1,0,0])
pts[14,:] = np.array([d*1.00 - self.l1,0,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
return lines_pts , lines_style , lines_color
###########################################################################
def forward_kinematic_lines_plus(self, x , u , t ):
"""
plots the force vector
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# force arrow
pts = np.zeros(( 5 , 3 ))
xf = x[0] # base of force x coordinate
f = u[0] # force amplitude
pts[0,:] = np.array([xf + self.l2/2,0,0])
pts[1,:] = np.array([xf + self.l2/2 + f,0,0])
pts[2,:] = np.array([xf + self.l2/2 + f - self.l2/4*f,+self.l2/4*f,0])
pts[3,:] = np.array([xf + self.l2/2 + f,0,0])
pts[4,:] = np.array([xf + self.l2/2 + f - self.l2/4*f,-self.l2/4*f,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'r')
return lines_pts , lines_style , lines_color
###############################################################################
class TwoMass( statespace.StateSpaceSystem ):
"""Two Mass with linear spring and damper
Attributes
----------
"""
############################
def __init__(self, m=1, k=2, b=0.2, output_mass = 2):
""" """
# params
self.m1 = m
self.k1 = k
self.b1 = b
self.m2 = m
self.k2 = k
self.b2 = b
self.l1 = 2
self.l2 = 1
# sensor output
self.output_mass = output_mass
# Matrix ABCD
self.compute_ABCD()
# initialize standard params
statespace.StateSpaceSystem.__init__( self, self.A, self.B, self.C, self.D)
# Name and labels
self.name = 'Two mass with linear spring-dampers'
self.input_label = ['Force']
self.input_units = ['[N]']
self.output_label = ['x2']
self.output_units = ['[m]']
self.state_label = [ 'x1','x2', 'dx1', 'dx2']
self.state_units = [ '[m]', '[m]', '[m/s]', '[m/s]']
self.linestyle = '-'
###########################################################################
def compute_ABCD(self):
"""
"""
self.A = np.array([ [ 0, 0, 1, 0 ],
[ 0, 0, 0, 1 ],
[ -(self.k1+self.k2)/self.m1, +self.k2/self.m1, -self.b1/self.m1, 0],
[ +self.k2/self.m2, -self.k2/self.m2, 0, -self.b2/self.m2]])
self.B = np.array([ [ 0 ],
[ 0 ],
[ 0 ],
[ 1/self.m2 ]])
if self.output_mass == 2:
self.C = np.array([ [ 0 , 1 , 0 , 0 ]])
self.output_label = ['x2']
elif self.output_mass ==1:
self.C = np.array([ [ 1 , 0 , 0 , 0 ]])
self.output_label = ['x1']
else:
self.C = np.array([ [ 0 , 1 , 0 , 0 ]])
self.output_label = ['x2']
self.D = np.array([ [ 0 ]])
###########################################################################
# Graphical output
###########################################################################
#############################
def xut2q( self, x , u , t ):
""" Compute configuration variables ( q vector ) """
q = np.array([ x[0], x[1], u[0] ])
return q
###########################################################################
def forward_kinematic_domain(self, q ):
"""
"""
l = self.l1 * 3
domain = [ (-l+self.l1,l+self.l1) , (-l,l) , (-l,l) ]#
return domain
###########################################################################
def forward_kinematic_lines(self, q ):
"""
Compute points p = [x;y;z] positions given config q
----------------------------------------------------
- points of interest for ploting
Outpus:
lines_pts = [] : a list of array (n_pts x 3) for each lines
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# ground line
pts = np.zeros(( 2 , 3 ))
pts[0,:] = np.array([-self.l1*2,-self.l2,0])
pts[1,:] = np.array([-self.l1*2,+self.l2,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
# mass 1
pts = np.zeros(( 5 , 3 ))
x1 = q[0] - self.l1
pts[0,:] = np.array([ x1 - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([ x1 + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([ x1 + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([ x1 - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'g')
# mass 2
pts = np.zeros(( 5 , 3 ))
x2 = q[1]
pts[0,:] = np.array([x2 - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([x2 + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([x2 + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([x2 - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'b')
# spring 1
pts = np.zeros(( 15 , 3 ))
d = q[0] + self.l1 - self.l2/2
h = self.l2 / 3
pts[0,:] = np.array([d*0.00 - self.l1*2,0,0])
pts[1,:] = np.array([d*0.20 - self.l1*2,0,0])
pts[2,:] = np.array([d*0.25 - self.l1*2,+h,0])
pts[3,:] = np.array([d*0.30 - self.l1*2,-h,0])
pts[4,:] = np.array([d*0.35 - self.l1*2,+h,0])
pts[5,:] = np.array([d*0.40 - self.l1*2,-h,0])
pts[6,:] = np.array([d*0.45 - self.l1*2,+h,0])
pts[7,:] = | np.array([d*0.50 - self.l1*2,-h,0]) | numpy.array |
from functools import reduce
from operator import add
import sys
from typing import List, Tuple
import numpy as np
from scipy.linalg import expm, kron
import quara.utils.matrix_util as mutil
from quara.objects.composite_system import CompositeSystem
from quara.objects.gate import (
Gate,
convert_hs,
convert_var_index_to_gate_index,
convert_gate_index_to_var_index,
convert_hs_to_var,
)
from quara.objects.matrix_basis import (
MatrixBasis,
get_comp_basis,
)
from quara.settings import Settings
class EffectiveLindbladian(Gate):
def __init__(
self,
c_sys: CompositeSystem,
hs: np.ndarray,
is_physicality_required: bool = True,
is_estimation_object: bool = True,
on_para_eq_constraint: bool = True,
on_algo_eq_constraint: bool = True,
on_algo_ineq_constraint: bool = True,
mode_proj_order: str = "eq_ineq",
eps_proj_physical: float = None,
eps_truncate_imaginary_part: float = None,
):
"""Constructor
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
hs : np.ndarray
HS representation of this EffectiveLindbladian.
is_physicality_required : bool, optional
checks whether the EffectiveLindbladian is physically wrong, by default True.
if at least one of the following conditions is ``False``, the EffectiveLindbladian is physically wrong:
- EffectiveLindbladian is TP(trace-preserving map).
- EffectiveLindbladian is CP(Complete-Positivity-Preserving).
If you want to ignore the above requirements and create a EffectiveLindbladian object, set ``is_physicality_required`` to ``False``.
Raises
------
ValueError
HS representation is not square matrix.
ValueError
dim of HS representation is not square number.
ValueError
HS representation is not real matrix.
ValueError
dim of HS representation does not equal dim of CompositeSystem.
ValueError
``is_physicality_required`` is ``True`` and the gate is not physically correct.
"""
# check the basis is a orthonormal Hermitian matrix basis with B_0 = I/sqrt(d)
if c_sys.is_orthonormal_hermitian_0thprop_identity == False:
raise ValueError(
"basis is not a orthonormal Hermitian matrix basis and 0th prop I."
)
super().__init__(
c_sys,
hs,
is_physicality_required=is_physicality_required,
is_estimation_object=is_estimation_object,
on_para_eq_constraint=on_para_eq_constraint,
on_algo_eq_constraint=on_algo_eq_constraint,
on_algo_ineq_constraint=on_algo_ineq_constraint,
mode_proj_order=mode_proj_order,
eps_proj_physical=eps_proj_physical,
eps_truncate_imaginary_part=eps_truncate_imaginary_part,
)
# whether the EffectiveLindbladian is physically correct
# is_physical() is called in the parent class, so it is not checked here.
def calc_h_mat(self) -> np.ndarray:
"""calculates h matrix of this EffectiveLindbladian.
Returns
-------
np.ndarray
h matrix of this EffectiveLindbladian.
"""
basis = self.composite_system.basis()
comp_basis = self.composite_system.comp_basis()
lindbladian_cb = convert_hs(self.hs, basis, comp_basis)
identity = np.eye(self.dim)
tmp_h_mat = np.zeros((self.dim, self.dim), dtype=np.complex128)
for B_alpha in basis:
trace = np.trace(
lindbladian_cb
@ (np.kron(B_alpha, identity) - np.kron(identity, B_alpha.conj()))
)
h_alpha = 1j / (2 * self.dim) * trace
tmp_h_mat += h_alpha * B_alpha
return tmp_h_mat
def calc_j_mat(self) -> np.ndarray:
"""calculates j matrix of this EffectiveLindbladian.
Returns
-------
np.ndarray
j matrix of this EffectiveLindbladian.
"""
basis = self.composite_system.basis()
comp_basis = self.composite_system.comp_basis()
lindbladian_cb = convert_hs(self.hs, basis, comp_basis)
identity = np.eye(self.dim)
tmp_j_mat = np.zeros((self.dim, self.dim), dtype=np.complex128)
for alpha, B_alpha in enumerate(basis[1:]):
trace = np.trace(
lindbladian_cb
@ (np.kron(B_alpha, identity) + np.kron(identity, B_alpha.conj()))
)
delta = 1 if alpha == 0 else 0
j_alpha = 1 / (2 * self.dim * (1 + delta)) * trace
tmp_j_mat += j_alpha * B_alpha
return tmp_j_mat
def calc_k_mat(self) -> np.ndarray:
"""calculates k matrix of this EffectiveLindbladian.
Returns
-------
np.ndarray
k matrix of this EffectiveLindbladian.
"""
basis = self.composite_system.basis()
comp_basis = self.composite_system.comp_basis()
lindbladian_cb = convert_hs(self.hs, basis, comp_basis)
tmp_k_mat = np.zeros(
(self.dim ** 2 - 1, self.dim ** 2 - 1), dtype=np.complex128
)
for alpha, B_alpha in enumerate(basis[1:]):
for beta, B_beta in enumerate(basis[1:]):
tmp_k_mat[alpha, beta] = np.trace(
lindbladian_cb @ kron(B_alpha, B_beta.conj())
)
return tmp_k_mat
def _check_mode_basis(self, mode_basis: str):
if not mode_basis in ["hermitian_basis", "comp_basis"]:
raise ValueError(f"unsupported mode_basis={mode_basis}")
def calc_h_part(self, mode_basis: str = "hermitian_basis") -> np.ndarray:
"""calculates h part of this EffectiveLindbladian.
mode_basis allows the following values:
- hermitian_basis
- comp_basis
Parameters
----------
mode_basis : str, optional
basis for calculating h part, by default "hermitian_basis"
Returns
-------
np.ndarray
h part of this EffectiveLindbladian.
"""
self._check_mode_basis(mode_basis)
h_mat = self.calc_h_mat()
h_part = _calc_h_part_from_h_mat(h_mat)
if mode_basis == "hermitian_basis":
h_part = convert_hs(
h_part,
self.composite_system.comp_basis(),
self.composite_system.basis(),
)
h_part = _truncate_hs(h_part, self.eps_truncate_imaginary_part)
return h_part
def calc_j_part(self, mode_basis: str = "hermitian_basis") -> np.ndarray:
"""calculates j part of this EffectiveLindbladian.
mode_basis allows the following values:
- hermitian_basis
- comp_basis
Parameters
----------
mode_basis : str, optional
basis for calculating j part, by default "hermitian_basis"
Returns
-------
np.ndarray
j part of this EffectiveLindbladian.
"""
self._check_mode_basis(mode_basis)
j_mat = self.calc_j_mat()
j_part = _calc_j_part_from_j_mat(j_mat)
if mode_basis == "hermitian_basis":
j_part = convert_hs(
j_part,
self.composite_system.comp_basis(),
self.composite_system.basis(),
)
j_part = _truncate_hs(j_part, self.eps_truncate_imaginary_part)
return j_part
def calc_k_part(self, mode_basis: str = "hermitian_basis") -> np.ndarray:
"""calculates k part of this EffectiveLindbladian.
mode_basis allows the following values:
- hermitian_basis
- comp_basis
Parameters
----------
mode_basis : str, optional
basis for calculating k part, by default "hermitian_basis"
Returns
-------
np.ndarray
k part of this EffectiveLindbladian.
"""
self._check_mode_basis(mode_basis)
k_mat = self.calc_k_mat()
k_part = _calc_k_part_from_k_mat(k_mat, self.composite_system)
if mode_basis == "hermitian_basis":
k_part = convert_hs(
k_part,
self.composite_system.comp_basis(),
self.composite_system.basis(),
)
k_part = _truncate_hs(k_part, self.eps_truncate_imaginary_part)
return k_part
def calc_d_part(self, mode_basis: str = "hermitian_basis") -> np.ndarray:
"""calculates d part of this EffectiveLindbladian.
mode_basis allows the following values:
- hermitian_basis
- comp_basis
Parameters
----------
mode_basis : str, optional
basis for calculating d part, by default "hermitian_basis"
Returns
-------
np.ndarray
d part of this EffectiveLindbladian.
"""
self._check_mode_basis(mode_basis)
d_part = self.calc_j_part(mode_basis="comp_basis") + self.calc_k_part(
mode_basis="comp_basis"
)
if mode_basis == "hermitian_basis":
d_part = convert_hs(
d_part,
self.composite_system.comp_basis(),
self.composite_system.basis(),
)
d_part = _truncate_hs(d_part, self.eps_truncate_imaginary_part)
return d_part
def _generate_origin_obj(self):
# return HS matrix of the origin = diag(0, min, min,..,min) in R^{{dim ** 2}x{dim ** 2}}
min = sys.float_info.min_exp
diag_values = [0] + [min] * (self.dim ** 2 - 1)
origin_hs = np.diag(diag_values).real.astype(np.float64)
return origin_hs
def calc_gradient(self, var_index: int) -> "EffectiveLindbladian":
lindbladian = calc_gradient_from_effective_lindbladian(
self.composite_system,
self.hs,
var_index,
is_estimation_object=self.is_estimation_object,
on_para_eq_constraint=self.on_para_eq_constraint,
on_algo_eq_constraint=self.on_algo_eq_constraint,
on_algo_ineq_constraint=self.on_algo_ineq_constraint,
eps_proj_physical=self.eps_proj_physical,
eps_truncate_imaginary_part=self.eps_truncate_imaginary_part,
)
return lindbladian
def calc_proj_eq_constraint(self) -> "EffectiveLindbladian":
new_hs = self._copy()
new_hs[0, :] = 0
new_lindbladian = EffectiveLindbladian(
c_sys=self.composite_system,
hs=new_hs,
is_physicality_required=self.is_physicality_required,
is_estimation_object=self.is_estimation_object,
on_para_eq_constraint=self.on_para_eq_constraint,
on_algo_eq_constraint=self.on_algo_eq_constraint,
on_algo_ineq_constraint=self.on_algo_ineq_constraint,
eps_proj_physical=self.eps_proj_physical,
eps_truncate_imaginary_part=self.eps_truncate_imaginary_part,
)
return new_lindbladian
def calc_proj_ineq_constraint(self) -> "EffectiveLindbladian":
h_mat = self.calc_h_mat()
j_mat = self.calc_j_mat()
k_mat = self.calc_k_mat()
# project k_mat
eigenvals, eigenvecs = np.linalg.eig(k_mat)
for index in range(len(eigenvals)):
if eigenvals[index] < 0:
eigenvals[index] = 0
new_k_mat = eigenvecs @ np.diag(eigenvals) @ eigenvecs.T.conjugate()
new_lindbladian = generate_effective_lindbladian_from_hjk(
self.composite_system,
h_mat,
j_mat,
new_k_mat,
is_physicality_required=self.is_physicality_required,
is_estimation_object=self.is_estimation_object,
on_para_eq_constraint=self.on_para_eq_constraint,
on_algo_eq_constraint=self.on_algo_eq_constraint,
on_algo_ineq_constraint=self.on_algo_ineq_constraint,
eps_proj_physical=self.eps_proj_physical,
eps_truncate_imaginary_part=self.eps_truncate_imaginary_part,
)
return new_lindbladian
def is_tp(self, atol: float = None) -> bool:
"""returns whether the effective Lindbladian is TP(trace-preserving map).
Parameters
----------
atol : float, optional
the absolute tolerance parameter, uses :func:`~quara.settings.Settings.get_atol` by default.
this function checks ``absolute(trace after mapped - trace before mapped) <= atol``.
Returns
-------
bool
True where the effective Lindbladian is TP, False otherwise.
"""
atol = Settings.get_atol() if atol is None else atol
# for A:L^{gb}, "A is TP" <=> "1st row of A is zeros"
return np.allclose(self.hs[0], 0, atol=atol, rtol=0.0)
def is_cp(self, atol: float = None) -> bool:
"""returns whether effective Lindbladian is CP(Complete-Positivity-Preserving).
Parameters
----------
atol : float, optional
the absolute tolerance parameter, uses :func:`~quara.settings.Settings.get_atol` by default.
this function ignores eigenvalues close zero.
Returns
-------
bool
True where the effective Lindbladian is CP, False otherwise.
"""
atol = Settings.get_atol() if atol is None else atol
# for A:L^{gb}, "A is CP" <=> "k >= 0"
return mutil.is_positive_semidefinite(self.calc_k_mat(), atol=atol)
def to_kraus_matrices(self) -> List[Tuple[np.float64, np.ndarray]]:
"""returns Kraus matrices of EffectiveLindbladian.
if :math:`A` is Hermitian preserve matrix, then :math:`A(X) = \\sum_i a_i A_i X A_i^{\\dagger}`, where :math:`a_i` are real numbers and :math:`A_i` are complex square matrices.
this function returns the list of :math:`(a_i, A_i)` sorted in descending order by :math:`a_i`.
Returns
-------
List[Tuple[np.float64, np.ndarray]]
Kraus matrices of EffectiveLindbladian.
"""
# step1. calc the eigenvalue decomposition of Choi matrix.
# Choi = \sum_{\alpha} c_{\alpha} |c_{\alpha}><c_{\alpha}| s.t. c_{\alpha} are eigenvalues and |c_{\alpha}> are eigenvectors of orthogonal basis.
choi = self.to_choi_matrix()
eigen_vals, eigen_vecs = np.linalg.eig(choi)
eigens = [
(eigen_vals[index], eigen_vecs[:, index])
for index in range(len(eigen_vals))
]
# filter non-zero eigen values
eigens = [
(eigen_val, eigen_vec)
for (eigen_val, eigen_vec) in eigens
if not np.isclose(eigen_val, 0, atol=Settings.get_atol())
]
# sort large eigenvalue order
eigens = sorted(eigens, key=lambda x: x[0], reverse=True)
# step2. convert to Kraus representaion.
# K_{\alpha} = {\sqrt{c_{\alpha}}, unvec(|c_{\alpha}>)}
kraus = [
(np.sqrt(eigen_val), eigen_vec.reshape((self.dim, self.dim)))
for (eigen_val, eigen_vec) in eigens
]
return kraus
def _generate_from_var_func(self):
return convert_var_to_effective_lindbladian
def to_gate(self) -> Gate:
"""returns the Gate corresponding to this EffectiveLindbladian.
Returns
-------
Gate
the Gate corresponding to this EffectiveLindbladian.
"""
new_hs = expm(self.hs)
gate = Gate(
self.composite_system,
new_hs,
is_physicality_required=self.is_physicality_required,
is_estimation_object=self.is_estimation_object,
on_para_eq_constraint=self.on_para_eq_constraint,
on_algo_eq_constraint=self.on_algo_eq_constraint,
on_algo_ineq_constraint=self.on_algo_ineq_constraint,
mode_proj_order=self.mode_proj_order,
eps_proj_physical=self.eps_proj_physical,
eps_truncate_imaginary_part=self.eps_truncate_imaginary_part,
)
return gate
def convert_var_index_to_effective_lindbladian_index(
c_sys: CompositeSystem, var_index: int, on_para_eq_constraint: bool = True
) -> Tuple[int, int]:
"""converts variable index to EffectiveLindbladian index.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
var_index : int
variable index.
on_para_eq_constraint : bool, optional
uses equal constraints, by default True.
Returns
-------
Tuple[int, int]
index of EffectiveLindbladian.
first value of tuple is row number of HS representation of this EffectiveLindbladian.
second value of tuple is column number of HS representation of this EffectiveLindbladian.
"""
return convert_var_index_to_gate_index(
c_sys, var_index, on_para_eq_constraint=on_para_eq_constraint
)
def convert_effective_lindbladian_index_to_var_index(
c_sys: CompositeSystem,
effective_lindbladian_index: Tuple[int, int],
on_para_eq_constraint: bool = True,
) -> int:
"""converts effective_lindbladian_index index to variable index.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
effective_lindbladian_index : Tuple[int, int]
index of EffectiveLindbladian.
first value of tuple is row number of HS representation of this EffectiveLindbladian.
second value of tuple is column number of HS representation of this EffectiveLindbladian.
on_para_eq_constraint : bool, optional
uses equal constraints, by default True.
Returns
-------
int
variable index.
"""
return convert_gate_index_to_var_index(
c_sys, effective_lindbladian_index, on_para_eq_constraint=on_para_eq_constraint
)
def convert_var_to_effective_lindbladian(
c_sys: CompositeSystem,
var: np.ndarray,
is_physicality_required: bool = True,
is_estimation_object: bool = True,
on_para_eq_constraint: bool = True,
on_algo_eq_constraint: bool = True,
on_algo_ineq_constraint: bool = True,
eps_proj_physical: float = None,
eps_truncate_imaginary_part: float = None,
) -> EffectiveLindbladian:
"""converts vec of variables to EffectiveLindbladian.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
var : np.ndarray
vec of variables.
on_para_eq_constraint : bool, optional
uses equal constraints, by default True.
eps_truncate_imaginary_part : float, optional
threshold to truncate imaginary part, by default :func:`~quara.settings.Settings.get_atol`
Returns
-------
EffectiveLindbladian
converted EffectiveLindbladian.
"""
dim = c_sys.dim
size = (dim ** 2 - 1, dim ** 2) if on_para_eq_constraint else (dim ** 2, dim ** 2)
reshaped = var.reshape(size)
hs = (
np.insert(reshaped, 0, np.eye(1, dim ** 2), axis=0)
if on_para_eq_constraint
else reshaped
)
lindbladian = EffectiveLindbladian(
c_sys,
hs,
is_physicality_required=is_physicality_required,
is_estimation_object=is_estimation_object,
on_para_eq_constraint=on_para_eq_constraint,
on_algo_eq_constraint=on_algo_eq_constraint,
on_algo_ineq_constraint=on_algo_ineq_constraint,
eps_proj_physical=eps_proj_physical,
eps_truncate_imaginary_part=eps_truncate_imaginary_part,
)
return lindbladian
def convert_effective_lindbladian_to_var(
c_sys: CompositeSystem, hs: np.ndarray, on_para_eq_constraint: bool = True
) -> np.ndarray:
"""converts hs of EffectiveLindbladian to vec of variables.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
hs : np.ndarray
HS representation of this EffectiveLindbladian.
on_para_eq_constraint : bool, optional
uses equal constraints, by default True.
Returns
-------
np.ndarray
vec of variables.
"""
return convert_hs_to_var(c_sys, hs, on_para_eq_constraint=on_para_eq_constraint)
def calc_gradient_from_effective_lindbladian(
c_sys: CompositeSystem,
hs: np.ndarray,
var_index: int,
is_estimation_object: bool = True,
on_para_eq_constraint: bool = True,
on_algo_eq_constraint: bool = True,
on_algo_ineq_constraint: bool = True,
eps_proj_physical: float = None,
eps_truncate_imaginary_part: float = None,
) -> EffectiveLindbladian:
"""calculates gradient from EffectiveLindbladian.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this gate.
hs : np.ndarray
HS representation of this gate.
var_index : int
variable index.
on_para_eq_constraint : bool, optional
uses equal constraints, by default True.
eps_truncate_imaginary_part : float, optional
threshold to truncate imaginary part, by default :func:`~quara.settings.Settings.get_atol`
Returns
-------
EffectiveLindbladian
EffectiveLindbladian with gradient as hs.
"""
gradient = np.zeros((c_sys.dim ** 2, c_sys.dim ** 2), dtype=np.float64)
gate_index = convert_var_index_to_effective_lindbladian_index(
c_sys, var_index, on_para_eq_constraint
)
gradient[gate_index] = 1
lindbladian = EffectiveLindbladian(
c_sys,
gradient,
is_physicality_required=False,
is_estimation_object=is_estimation_object,
on_para_eq_constraint=on_para_eq_constraint,
on_algo_eq_constraint=on_algo_eq_constraint,
on_algo_ineq_constraint=on_algo_ineq_constraint,
eps_proj_physical=eps_proj_physical,
eps_truncate_imaginary_part=eps_truncate_imaginary_part,
)
return lindbladian
def _check_h_mat(h_mat: np.ndarray, dim: int) -> None:
# whetever h_mat is Hermitian
if not mutil.is_hermitian(h_mat):
raise ValueError("h_mat must be Hermitian. h_mat={h_mat}")
# whether dim of h_mat equals dim of CompositeSystem
size = h_mat.shape[0]
if dim != size:
raise ValueError(
f"dim of h_mat must equal dim of CompositeSystem. dim of h_mat is {size}. dim of CompositeSystem is {dim}"
)
def _calc_h_part_from_h_mat(h_mat: np.ndarray) -> np.ndarray:
identity = np.eye(h_mat.shape[0])
return -1j * (np.kron(h_mat, identity) - np.kron(identity, h_mat.conj()))
def _check_j_mat(j_mat: np.ndarray, dim: int) -> None:
# whetever j_mat is Hermitian
if not mutil.is_hermitian(j_mat):
raise ValueError("j_mat must be Hermitian. j_mat={j_mat}")
# whether dim of j_mat equals dim of CompositeSystem
size = j_mat.shape[0]
if dim != size:
raise ValueError(
f"dim of j_mat must equal dim of CompositeSystem. dim of j_mat is {size}. dim of CompositeSystem is {dim}"
)
def _calc_j_mat_from_k_mat(k_mat: np.ndarray, c_sys: CompositeSystem) -> np.ndarray:
return _calc_j_mat_from_k_mat_with_sparsity(k_mat, c_sys)
def _calc_j_mat_from_k_mat_with_sparsity(
k_mat: np.ndarray, c_sys: CompositeSystem
) -> np.ndarray:
j_mat_vec = c_sys.basishermitian_basis_T_from_1.dot(k_mat.flatten())
j_mat = j_mat_vec.reshape((c_sys.dim, c_sys.dim))
return -1 / 2 * j_mat
def _calc_j_mat_from_k_mat_slowly(
k_mat: np.ndarray, c_sys: CompositeSystem
) -> np.ndarray:
basis = c_sys.basis()
j_mat = np.zeros((c_sys.dim, c_sys.dim), dtype=np.complex128)
for row in range(k_mat.shape[0]):
for col in range(k_mat.shape[1]):
term = k_mat[row, col] * (basis[col + 1].T.conj() @ basis[row + 1])
j_mat += term
return -1 / 2 * j_mat
def _calc_j_part_from_j_mat(j_mat: np.ndarray) -> np.ndarray:
identity = np.eye(j_mat.shape[0])
return np.kron(j_mat, identity) + np.kron(identity, j_mat.conj())
def _check_k_mat(k_mat: np.ndarray, dim: int) -> None:
# whetever k_mat is Hermitian
if not mutil.is_hermitian(k_mat):
raise ValueError("k_mat must be Hermitian. k_mat={k_mat}")
# whether dim of k_mat equals dim of CompositeSystem
size = k_mat.shape[0]
if dim ** 2 - 1 != size:
raise ValueError(
f"dim of k_mat must equal 'dim of CompositeSystem' ** 2 -1 . dim of k_mat is {size}. dim of CompositeSystem is {dim}"
)
def _calc_k_part_from_k_mat(k_mat: np.ndarray, c_sys: CompositeSystem) -> np.ndarray:
return _calc_k_part_from_k_mat_with_sparsity(k_mat, c_sys)
def _calc_k_part_from_slowly(k_mat: np.ndarray, c_sys: CompositeSystem) -> np.ndarray:
basis = c_sys.basis()
k_part = np.zeros((c_sys.dim ** 2, c_sys.dim ** 2), dtype=np.complex128)
for row in range(k_mat.shape[0]):
for col in range(k_mat.shape[0]):
term = k_mat[row, col] * kron(basis[row + 1], basis[col + 1].conj())
k_part += term
return k_part
def _calc_k_part_from_k_mat_with_sparsity(
k_mat: np.ndarray, c_sys: CompositeSystem
) -> np.ndarray:
k_part_vec = c_sys.basis_basisconjugate_T_sparse_from_1.dot(k_mat.flatten())
k_part = k_part_vec.reshape((c_sys.dim ** 2, c_sys.dim ** 2))
return k_part
def _truncate_hs(
hs: np.ndarray,
eps_truncate_imaginary_part: float = None,
is_zero_imaginary_part_required: bool = True,
) -> np.ndarray:
tmp_hs = mutil.truncate_imaginary_part(hs, eps_truncate_imaginary_part)
if is_zero_imaginary_part_required == True and np.any(tmp_hs.imag != 0):
raise ValueError(
f"some imaginary parts of entries of matrix != 0. converted hs={tmp_hs}"
)
if is_zero_imaginary_part_required == True:
tmp_hs = tmp_hs.real.astype(np.float64)
truncated_hs = mutil.truncate_computational_fluctuation(
tmp_hs, eps_truncate_imaginary_part
)
return truncated_hs
def generate_hs_from_hjk(
c_sys: CompositeSystem,
h_mat: np.ndarray,
j_mat: np.ndarray,
k_mat: np.ndarray,
eps_truncate_imaginary_part: float = None,
) -> np.ndarray:
"""generates HS matrix of EffectiveLindbladian from h matrix, j matrix and k matrix.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
h_mat : np.ndarray
h matrix.
j_mat : np.ndarray
j matrix.
k_mat : np.ndarray
k matrix.
Returns
-------
np.ndarray
HS matrix of EffectiveLindbladian.
"""
dim = c_sys.dim
# calculate h_part
_check_h_mat(h_mat, dim)
h_part = _calc_h_part_from_h_mat(h_mat)
# calculate j_part
_check_j_mat(j_mat, dim)
j_part = _calc_j_part_from_j_mat(j_mat)
# calculate k_part
_check_k_mat(k_mat, dim)
k_part = _calc_k_part_from_k_mat(k_mat, c_sys)
# calculate hs(=Lindbladian for Hermitian basis)
lindbladian_comp_basis = h_part + j_part + k_part
lindbladian_tmp = convert_hs(
lindbladian_comp_basis, c_sys.comp_basis(), c_sys.basis()
)
lindbladian_hermitian_basis = _truncate_hs(
lindbladian_tmp, eps_truncate_imaginary_part
)
return lindbladian_hermitian_basis
def generate_effective_lindbladian_from_hjk(
c_sys: CompositeSystem,
h_mat: np.ndarray,
j_mat: np.ndarray,
k_mat: np.ndarray,
is_physicality_required: bool = True,
is_estimation_object: bool = True,
on_para_eq_constraint: bool = True,
on_algo_eq_constraint: bool = True,
on_algo_ineq_constraint: bool = True,
mode_proj_order: str = "eq_ineq",
eps_proj_physical: float = None,
eps_truncate_imaginary_part: float = None,
):
"""generates EffectiveLindbladian from h matrix, j matrix and k matrix.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
h_mat : np.ndarray
h matrix.
j_mat : np.ndarray
j matrix.
k_mat : np.ndarray
k matrix.
is_physicality_required : bool, optional
whether this QOperation is physicality required, by default True
is_estimation_object : bool, optional
whether this QOperation is estimation object, by default True
on_para_eq_constraint : bool, optional
whether this QOperation is on parameter equality constraint, by default True
on_algo_eq_constraint : bool, optional
whether this QOperation is on algorithm equality constraint, by default True
on_algo_ineq_constraint : bool, optional
whether this QOperation is on algorithm inequality constraint, by default True
mode_proj_order : str, optional
the order in which the projections are performed, by default "eq_ineq"
eps_proj_physical : float, optional
epsilon that is projection algorithm error threshold for being physical, by default :func:`~quara.settings.Settings.get_atol` / 10.0
eps_truncate_imaginary_part : float, optional
threshold to truncate imaginary part, by default :func:`~quara.settings.Settings.get_atol`
Returns
-------
np.ndarray
EffectiveLindbladian.
"""
# generate HS
hs = generate_hs_from_hjk(c_sys, h_mat, j_mat, k_mat)
# init
effective_lindbladian = EffectiveLindbladian(
c_sys,
hs,
is_physicality_required=is_physicality_required,
is_estimation_object=is_estimation_object,
on_para_eq_constraint=on_para_eq_constraint,
on_algo_eq_constraint=on_algo_eq_constraint,
on_algo_ineq_constraint=on_algo_ineq_constraint,
mode_proj_order=mode_proj_order,
eps_proj_physical=eps_proj_physical,
eps_truncate_imaginary_part=eps_truncate_imaginary_part,
)
return effective_lindbladian
def generate_hs_from_h(
c_sys: CompositeSystem,
h_mat: np.ndarray,
eps_truncate_imaginary_part: float = None,
) -> np.ndarray:
"""generates HS matrix of EffectiveLindbladian from h matrix.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
h_mat : np.ndarray
h matrix.
Returns
-------
np.ndarray
HS matrix of EffectiveLindbladian.
"""
dim = c_sys.dim
# calculate h_part
_check_h_mat(h_mat, dim)
h_part = _calc_h_part_from_h_mat(h_mat)
# calculate hs(=Lindbladian for Hermitian basis)
lindbladian_comp_basis = h_part
lindbladian_tmp = convert_hs(
lindbladian_comp_basis, c_sys.comp_basis(), c_sys.basis()
)
lindbladian_hermitian_basis = _truncate_hs(
lindbladian_tmp, eps_truncate_imaginary_part
)
return lindbladian_hermitian_basis
def generate_effective_lindbladian_from_h(
c_sys: CompositeSystem,
h_mat: np.ndarray,
is_physicality_required: bool = True,
is_estimation_object: bool = True,
on_para_eq_constraint: bool = True,
on_algo_eq_constraint: bool = True,
on_algo_ineq_constraint: bool = True,
mode_proj_order: str = "eq_ineq",
eps_proj_physical: float = None,
eps_truncate_imaginary_part: float = None,
):
"""generates EffectiveLindbladian from h matrix.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
h_mat : np.ndarray
h matrix.
is_physicality_required : bool, optional
whether this QOperation is physicality required, by default True
is_estimation_object : bool, optional
whether this QOperation is estimation object, by default True
on_para_eq_constraint : bool, optional
whether this QOperation is on parameter equality constraint, by default True
on_algo_eq_constraint : bool, optional
whether this QOperation is on algorithm equality constraint, by default True
on_algo_ineq_constraint : bool, optional
whether this QOperation is on algorithm inequality constraint, by default True
mode_proj_order : str, optional
the order in which the projections are performed, by default "eq_ineq"
eps_proj_physical : float, optional
epsilon that is projection algorithm error threshold for being physical, by default :func:`~quara.settings.Settings.get_atol` / 10.0
eps_truncate_imaginary_part : float, optional
threshold to truncate imaginary part, by default :func:`~quara.settings.Settings.get_atol`
Returns
-------
np.ndarray
EffectiveLindbladian.
"""
# generate HS
hs = generate_hs_from_h(c_sys, h_mat)
# init
effective_lindbladian = EffectiveLindbladian(
c_sys,
hs,
is_physicality_required=is_physicality_required,
is_estimation_object=is_estimation_object,
on_para_eq_constraint=on_para_eq_constraint,
on_algo_eq_constraint=on_algo_eq_constraint,
on_algo_ineq_constraint=on_algo_ineq_constraint,
mode_proj_order=mode_proj_order,
eps_proj_physical=eps_proj_physical,
eps_truncate_imaginary_part=eps_truncate_imaginary_part,
)
return effective_lindbladian
def generate_hs_from_hk(
c_sys: CompositeSystem,
h_mat: np.ndarray,
k_mat: np.ndarray,
eps_truncate_imaginary_part: float = None,
) -> np.ndarray:
"""generates HS matrix of EffectiveLindbladian from h matrix and k matrix.
j matrix is calculated from k matrix.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
h_mat : np.ndarray
h matrix.
k_mat : np.ndarray
k matrix.
Returns
-------
np.ndarray
HS matrix of EffectiveLindbladian.
"""
dim = c_sys.dim
# calculate h_part
_check_h_mat(h_mat, dim)
h_part = _calc_h_part_from_h_mat(h_mat)
# calculate k_part
_check_k_mat(k_mat, dim)
k_part = _calc_k_part_from_k_mat(k_mat, c_sys)
# calculate j_part
j_mat = _calc_j_mat_from_k_mat(k_mat, c_sys)
j_part = _calc_j_part_from_j_mat(j_mat)
# calculate hs(=Lindbladian for Hermitian basis)
lindbladian_comp_basis = h_part + j_part + k_part
lindbladian_tmp = convert_hs(
lindbladian_comp_basis, c_sys.comp_basis(), c_sys.basis()
)
lindbladian_hermitian_basis = _truncate_hs(
lindbladian_tmp, eps_truncate_imaginary_part
)
return lindbladian_hermitian_basis
def generate_effective_lindbladian_from_hk(
c_sys: CompositeSystem,
h_mat: np.ndarray,
k_mat: np.ndarray,
is_physicality_required: bool = True,
is_estimation_object: bool = True,
on_para_eq_constraint: bool = True,
on_algo_eq_constraint: bool = True,
on_algo_ineq_constraint: bool = True,
mode_proj_order: str = "eq_ineq",
eps_proj_physical: float = None,
eps_truncate_imaginary_part: float = None,
):
"""generates EffectiveLindbladian from h matrix and k matrix.
j matrix is calculated from k matrix.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
h_mat : np.ndarray
h matrix.
k_mat : np.ndarray
k matrix.
is_physicality_required : bool, optional
whether this QOperation is physicality required, by default True
is_estimation_object : bool, optional
whether this QOperation is estimation object, by default True
on_para_eq_constraint : bool, optional
whether this QOperation is on parameter equality constraint, by default True
on_algo_eq_constraint : bool, optional
whether this QOperation is on algorithm equality constraint, by default True
on_algo_ineq_constraint : bool, optional
whether this QOperation is on algorithm inequality constraint, by default True
mode_proj_order : str, optional
the order in which the projections are performed, by default "eq_ineq"
eps_proj_physical : float, optional
epsilon that is projection algorithm error threshold for being physical, by default :func:`~quara.settings.Settings.get_atol` / 10.0
eps_truncate_imaginary_part : float, optional
threshold to truncate imaginary part, by default :func:`~quara.settings.Settings.get_atol`
Returns
-------
np.ndarray
EffectiveLindbladian.
"""
# generate HS
hs = generate_hs_from_hk(c_sys, h_mat, k_mat)
# init
effective_lindbladian = EffectiveLindbladian(
c_sys,
hs,
is_physicality_required=is_physicality_required,
is_estimation_object=is_estimation_object,
on_para_eq_constraint=on_para_eq_constraint,
on_algo_eq_constraint=on_algo_eq_constraint,
on_algo_ineq_constraint=on_algo_ineq_constraint,
mode_proj_order=mode_proj_order,
eps_proj_physical=eps_proj_physical,
eps_truncate_imaginary_part=eps_truncate_imaginary_part,
)
return effective_lindbladian
def generate_hs_from_k(
c_sys: CompositeSystem,
k_mat: np.ndarray,
eps_truncate_imaginary_part: float = None,
) -> np.ndarray:
"""generates HS matrix of EffectiveLindbladian from k matrix.
j matrix is calculated from k matrix.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
k_mat : np.ndarray
k matrix.
Returns
-------
np.ndarray
HS matrix of EffectiveLindbladian.
"""
dim = c_sys.dim
# calculate k_part
_check_k_mat(k_mat, dim)
k_part = _calc_k_part_from_k_mat(k_mat, c_sys)
# calculate j_part
j_mat = _calc_j_mat_from_k_mat(k_mat, c_sys)
j_part = _calc_j_part_from_j_mat(j_mat)
# calculate hs(=Lindbladian for Hermitian basis)
lindbladian_comp_basis = j_part + k_part
lindbladian_tmp = convert_hs(
lindbladian_comp_basis, c_sys.comp_basis(), c_sys.basis()
)
lindbladian_hermitian_basis = _truncate_hs(
lindbladian_tmp, eps_truncate_imaginary_part
)
return lindbladian_hermitian_basis
def generate_effective_lindbladian_from_k(
c_sys: CompositeSystem,
k_mat: np.ndarray,
is_physicality_required: bool = True,
is_estimation_object: bool = True,
on_para_eq_constraint: bool = True,
on_algo_eq_constraint: bool = True,
on_algo_ineq_constraint: bool = True,
mode_proj_order: str = "eq_ineq",
eps_proj_physical: float = None,
eps_truncate_imaginary_part: float = None,
):
"""generates EffectiveLindbladian from k matrix.
j matrix is calculated from k matrix.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this EffectiveLindbladian.
k_mat : np.ndarray
k matrix.
is_physicality_required : bool, optional
whether this QOperation is physicality required, by default True
is_estimation_object : bool, optional
whether this QOperation is estimation object, by default True
on_para_eq_constraint : bool, optional
whether this QOperation is on parameter equality constraint, by default True
on_algo_eq_constraint : bool, optional
whether this QOperation is on algorithm equality constraint, by default True
on_algo_ineq_constraint : bool, optional
whether this QOperation is on algorithm inequality constraint, by default True
mode_proj_order : str, optional
the order in which the projections are performed, by default "eq_ineq"
eps_proj_physical : float, optional
epsilon that is projection algorithm error threshold for being physical, by default :func:`~quara.settings.Settings.get_atol` / 10.0
eps_truncate_imaginary_part : float, optional
threshold to truncate imaginary part, by default :func:`~quara.settings.Settings.get_atol`
Returns
-------
np.ndarray
EffectiveLindbladian.
"""
# generate HS
hs = generate_hs_from_k(c_sys, k_mat)
# init
effective_lindbladian = EffectiveLindbladian(
c_sys,
hs,
is_physicality_required=is_physicality_required,
is_estimation_object=is_estimation_object,
on_para_eq_constraint=on_para_eq_constraint,
on_algo_eq_constraint=on_algo_eq_constraint,
on_algo_ineq_constraint=on_algo_ineq_constraint,
mode_proj_order=mode_proj_order,
eps_proj_physical=eps_proj_physical,
eps_truncate_imaginary_part=eps_truncate_imaginary_part,
)
return effective_lindbladian
def generate_j_part_cb_from_jump_operators(
jump_operators: List[np.ndarray],
) -> np.ndarray:
"""generates j part of EffectiveLindbladian from jump operators.
this j part is represented by computational basis.
Parameters
----------
jump_operators : List[np.ndarray]
jump operators to generate j part.
Returns
-------
np.ndarray
j part of EffectiveLindbladian.
"""
dim = jump_operators[0].shape[0]
identity = np.eye(dim)
terms = [
| np.kron(opertor, identity) | numpy.kron |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import joblib as jl
from code.plotting import parlabels
traces = jl.load('ramp_fits/traces/NGRIP.gz')
nevent = len(traces.coords['event'].values)
order_freq = np.zeros((nevent, 4, 4))
for i, event in enumerate(traces.coords['event'].values):
t0 = traces.sel(model='t0', event=event)
t0_order = np.argsort(t0, axis=1)
f = lambda x: np.bincount(x, minlength=4)
order_freq[i] = np.array(list(map(f, t0_order.values.T))) / 12000
mean_order = | np.mean(order_freq, axis=0) | numpy.mean |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import pickle
from dataset.mnist import load_mnist
from common.functions import sigmoid, softmax
import time
import logging
#ロガー
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create console handler and set level to debug
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
# add formatter to ch
handler.setFormatter(formatter)
logger.addHandler(handler)
'''
トレーニング済みのモデルから推論を行う
'''
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(
normalize=False, flatten=True, one_hot_label=False)
return x_test, t_test
def init_network():
with open("sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
'''
推論を行う
Parameters
----------
network
x
Returns
-------
'''
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = | np.dot(z1, W2) | numpy.dot |
import warnings
from functools import reduce
import cv2
import numpy as np
import tensorflow as tf
def compose(*funcs):
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
#---------------------------------------------------#
# 对输入图像进行resize
#---------------------------------------------------#
def letterbox_image(image, size):
ih, iw, _ = np.shape(image)
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw, nh))
new_image = np.ones([size[1], size[0], 3]) * 128
new_image[(h-nh)//2:nh+(h-nh)//2, (w-nw)//2:nw+(w-nw)//2] = image
return new_image
#-----------------------------------------------------------------#
# 将输出调整为相对于原图的大小
#-----------------------------------------------------------------#
def retinaface_correct_boxes(result, input_shape, image_shape):
new_shape = image_shape*np.min(input_shape/image_shape)
offset = (input_shape - new_shape) / 2. / input_shape
scale = input_shape / new_shape
scale_for_boxs = [scale[1], scale[0], scale[1], scale[0]]
scale_for_landmarks = [scale[1], scale[0], scale[1], scale[0], scale[1], scale[0], scale[1], scale[0], scale[1], scale[0]]
offset_for_boxs = [offset[1], offset[0], offset[1],offset[0]]
offset_for_landmarks = [offset[1], offset[0], offset[1], offset[0], offset[1], offset[0], offset[1], offset[0], offset[1], offset[0]]
result[:,:4] = (result[:, :4] - np.array(offset_for_boxs)) * np.array(scale_for_boxs)
result[:,5:] = (result[:, 5:] - np.array(offset_for_landmarks)) * np.array(scale_for_landmarks)
return result
class BBoxUtility(object):
def __init__(self, anchors=None, overlap_threshold = 0.35, top_k=300, nms_thresh = 0.45):
self.anchors = anchors
self.num_anchors = 0 if anchors is None else len(anchors)
self.overlap_threshold = overlap_threshold
self._top_k = top_k
self._nms_thresh = nms_thresh
def iou(self, box):
#---------------------------------------------#
# 计算出每个真实框与所有的先验框的iou
# 判断真实框与先验框的重合情况
#---------------------------------------------#
inter_upleft = np.maximum(self.anchors[:, :2], box[:2])
inter_botright = np.minimum(self.anchors[:, 2:4], box[2:])
inter_wh = inter_botright - inter_upleft
inter_wh = np.maximum(inter_wh, 0)
inter = inter_wh[:, 0] * inter_wh[:, 1]
#---------------------------------------------#
# 真实框的面积
#---------------------------------------------#
area_true = (box[2] - box[0]) * (box[3] - box[1])
#---------------------------------------------#
# 先验框的面积
#---------------------------------------------#
area_gt = (self.anchors[:, 2] - self.anchors[:, 0])*(self.anchors[:, 3] - self.anchors[:, 1])
#---------------------------------------------#
# 计算iou
#---------------------------------------------#
union = area_true + area_gt - inter
iou = inter / union
return iou
def encode_box(self, box, return_iou=True):
#---------------------------------------------#
# 计算当前真实框和先验框的重合情况
# iou [self.num_anchors]
# encoded_box [self.num_anchors, 5]
#---------------------------------------------#
iou = self.iou(box[:4])
encoded_box = np.zeros((self.num_anchors, 4 + return_iou + 10 + 1))
#---------------------------------------------#
# 找到每一个真实框,重合程度较高的先验框
# 真实框可以由这个先验框来负责预测
#---------------------------------------------#
assign_mask = iou > self.overlap_threshold
#---------------------------------------------#
# 如果没有一个先验框重合度大于self.overlap_threshold
# 则选择重合度最大的为正样本
#---------------------------------------------#
if not assign_mask.any():
assign_mask[iou.argmax()] = True
#---------------------------------------------#
# 利用iou进行赋值
#---------------------------------------------#
if return_iou:
encoded_box[:, 4][assign_mask] = iou[assign_mask]
#---------------------------------------------#
# 找到对应的先验框
#---------------------------------------------#
assigned_anchors = self.anchors[assign_mask]
#----------------------------------------------------#
# 逆向编码,将真实框转化为Retinaface预测结果的格式
# 先计算真实框的中心与长宽
#----------------------------------------------------#
box_center = 0.5 * (box[:2] + box[2:4])
box_wh = box[2:4] - box[:2]
#---------------------------------------------#
# 再计算重合度较高的先验框的中心与长宽
#---------------------------------------------#
assigned_anchors_center = 0.5 * (assigned_anchors[:, :2] +
assigned_anchors[:, 2:4])
assigned_anchors_wh = (assigned_anchors[:, 2:4] -
assigned_anchors[:, :2])
#------------------------------------------------#
# 逆向求取应该有的预测结果
#------------------------------------------------#
encoded_box[:, :2][assign_mask] = box_center - assigned_anchors_center
encoded_box[:, :2][assign_mask] /= assigned_anchors_wh
encoded_box[:, :2][assign_mask] /= 0.1
encoded_box[:, 2:4][assign_mask] = np.log(box_wh / assigned_anchors_wh)
encoded_box[:, 2:4][assign_mask] /= 0.2
ldm_encoded = np.zeros_like(encoded_box[:, 5: -1][assign_mask])
ldm_encoded = np.reshape(ldm_encoded, [-1, 5, 2])
ldm_encoded[:, :, 0] = box[[4, 6, 8, 10, 12]] - np.repeat(assigned_anchors_center[:, 0: 1], 5, axis = -1)
ldm_encoded[:, :, 1] = box[[5, 7, 9, 11, 13]] - np.repeat(assigned_anchors_center[:, 1: 2], 5, axis = -1)
ldm_encoded[:, :, 0] /= np.repeat(assigned_anchors_wh[:,0:1], 5, axis=-1)
ldm_encoded[:, :, 1] /= np.repeat(assigned_anchors_wh[:,1:2], 5, axis=-1)
ldm_encoded[:, :, 0] /= 0.1
ldm_encoded[:, :, 1] /= 0.1
encoded_box[:, 5:-1][assign_mask] = np.reshape(ldm_encoded,[-1,10])
encoded_box[:, -1][assign_mask] = box[-1]
return encoded_box.ravel()
def assign_boxes(self, boxes):
#-----------------------------------------------------#
# assignment分为3个部分
# :4 的内容为网络应该有的回归预测结果
# 4 的内容为是否包含物体
#
# 5:6 的内容为包含物体的概率
# 7 的内容为是否包含物体
#
# 8:-1 的内容为特征点应该有的回归预测结果
# -1 的内容为是否包含物体
#-----------------------------------------------------#
assignment = np.zeros((self.num_anchors, 4 + 1 + 2 + 1 + 10 + 1))
#-----------------------------------------------------#
# 序号为5的地方是为背景的概率
#-----------------------------------------------------#
assignment[:, 5] = 1
if len(boxes) == 0:
return assignment
#-----------------------------------------------------#
# 每一个真实框的编码后的值,和iou
# encoded_boxes n, num_anchors, 16
#-----------------------------------------------------#
encoded_boxes = np.apply_along_axis(self.encode_box, 1, boxes)
encoded_boxes = encoded_boxes.reshape(-1, self.num_anchors, 16)
#-----------------------------------------------------#
# 取出每个先验框重合度最大的真实框
#-----------------------------------------------------#
best_iou = encoded_boxes[:, :, 4].max(axis=0)
best_iou_idx = encoded_boxes[:, :, 4].argmax(axis=0)
best_iou_mask = best_iou > 0
best_iou_idx = best_iou_idx[best_iou_mask]
assign_num = len(best_iou_idx)
#-----------------------------------------------------#
# 将编码后的真实框取出
#-----------------------------------------------------#
encoded_boxes = encoded_boxes[:, best_iou_mask, :]
assignment[:, :4][best_iou_mask] = encoded_boxes[best_iou_idx, np.arange(assign_num), :4]
#-----------------------------------------------------#
# 4、7和-1代表为当前先验框是否包含目标
#-----------------------------------------------------#
assignment[:, 4][best_iou_mask] = 1
#-----------------------------------------------------#
# 5:6 的内容为包含物体的概率
# 7 的内容为是否包含物体
#-----------------------------------------------------#
assignment[:, 5][best_iou_mask] = 0
assignment[:, 6][best_iou_mask] = 1
assignment[:, 7][best_iou_mask] = 1
#-----------------------------------------------------#
# 8:-1 的内容为特征点应该有的回归预测结果
# -1 的内容为是否包含物体
#-----------------------------------------------------#
assignment[:, 8:][best_iou_mask] = encoded_boxes[best_iou_idx, np.arange(assign_num), 5:]
return assignment
def cal_iou(self, b1, b2):
b1_x1, b1_y1, b1_x2, b1_y2 = b1[0], b1[1], b1[2], b1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = b2[:, 0], b2[:, 1], b2[:, 2], b2[:, 3]
inter_rect_x1 = np.maximum(b1_x1, b2_x1)
inter_rect_y1 = np.maximum(b1_y1, b2_y1)
inter_rect_x2 = np.minimum(b1_x2, b2_x2)
inter_rect_y2 = np.minimum(b1_y2, b2_y2)
inter_area = np.maximum(inter_rect_x2 - inter_rect_x1, 0) * \
np.maximum(inter_rect_y2 - inter_rect_y1, 0)
area_b1 = (b1_x2-b1_x1)*(b1_y2-b1_y1)
area_b2 = (b2_x2-b2_x1)*(b2_y2-b2_y1)
iou = inter_area/np.maximum((area_b1+area_b2-inter_area),1e-6)
return iou
def decode_boxes(self, mbox_loc, mbox_ldm, mbox_anchorbox):
#-----------------------------------------------------#
# 获得先验框的宽与高
#-----------------------------------------------------#
anchor_width = mbox_anchorbox[:, 2] - mbox_anchorbox[:, 0]
anchor_height = mbox_anchorbox[:, 3] - mbox_anchorbox[:, 1]
#-----------------------------------------------------#
# 获得先验框的中心点
#-----------------------------------------------------#
anchor_center_x = 0.5 * (mbox_anchorbox[:, 2] + mbox_anchorbox[:, 0])
anchor_center_y = 0.5 * (mbox_anchorbox[:, 3] + mbox_anchorbox[:, 1])
#-----------------------------------------------------#
# 真实框距离先验框中心的xy轴偏移情况
#-----------------------------------------------------#
decode_bbox_center_x = mbox_loc[:, 0] * anchor_width * 0.1
decode_bbox_center_x += anchor_center_x
decode_bbox_center_y = mbox_loc[:, 1] * anchor_height * 0.1
decode_bbox_center_y += anchor_center_y
#-----------------------------------------------------#
# 真实框的宽与高的求取
#-----------------------------------------------------#
decode_bbox_width = | np.exp(mbox_loc[:, 2] * 0.2) | numpy.exp |
import cv2
from scipy.signal import filtfilt
import numpy as np
import os
import shutil
from scipy import signal
import sys
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
from directorios import *
from visualizacion import *
from Simulaciones.Input.inicializacion import *
from Simulaciones.Recursos.evolucion import *
from scipy.optimize import curve_fit
from tkinter import *
import tkinter as tk
from tkinter import filedialog
import os
from scipy.stats import linregress
from visualizacion import *
import time
# NOMBRAR, GUARDAR Y CARGAR DATOS
def select_file(datos_path):
root = tk.Tk()
root.withdraw()
carpeta = filedialog.askopenfilename(parent=root,
initialdir=datos_path,
title='Selecciones el archivo')
return carpeta
def select_directory(datos_path):
root = tk.Tk()
root.withdraw()
carpeta = filedialog.askdirectory(parent=root,
initialdir=datos_path,
title='Selecciones la carpeta')
return carpeta
def crear_directorios_trabajo():
root = tk.Tk()
root.withdraw()
def crear_directorio(path):
if os.path.exists(path) == True:
print('Este archivo ya existe')
else:
os.makedirs(path)
print(path + ' creado')
detection_parent_file = filedialog.askdirectory(parent=root,
initialdir='C:/',
title='Detección multiple')
crear_directorio(detection_parent_file + '/mnustes_science/images/canned')
crear_directorio(detection_parent_file + '/mnustes_science/images/img_lab')
crear_directorio(detection_parent_file + '/mnustes_science/images/img_phantom')
crear_directorio(detection_parent_file + '/mnustes_science/experimental_data')
crear_directorio(detection_parent_file + '/mnustes_science/simulation_data')
main_directory = detection_parent_file + '/mnustes_science'
return main_directory
def guardar_txt(path, file, **kwargs): # upgradear a diccionario para nombre de variables
if os.path.exists(path + file) == False:
os.makedirs(path + file)
for key, value in kwargs.items():
np.savetxt(path + file + '\\' + key + ".txt", value)
def cargar_txt(path, file, **kwargs): # upgradear a diccionario para nombre de variables
array = []
for key, values in kwargs.items():
array_i = np.loadtxt(path + file + '\\' + key + ".txt")
array.append(array_i)
return array
def nombre_pndls_estandar(**kwargs):
mu = kwargs['mu']
L = kwargs['L']
L_name = str(L)
if 'n' and 'forcing_amp' and 'forcing_freq' and 'profundidad' not in kwargs:
sigma = kwargs['sigma']
nu = kwargs['nu']
gamma = kwargs['gamma']
sigma_st = str(round(float(mu), 3))
mu_st = str(round(float(mu), 3))
gamma_st = str(round(float(gamma), 3))
nu_st = str(round(float(nu), 3))
sigma_splited = sigma_st.split('.')
mu_splited = mu_st.split('.')
gamma_splited = gamma_st.split('.')
nu_splited = nu_st.split('.')
sigma_name = sigma_splited[0] + sigma_splited[1]
mu_name = mu_splited[0] + mu_splited[1]
gamma_name = gamma_splited[0] + gamma_splited[1]
nu_name = nu_splited[0] + nu_splited[1]
nombre = '\\gaussian\mu=' + mu_name + '\gamma=' + gamma_name + '_nu=' + nu_name + '\L=' + L_name + '\sigma=' + sigma_name
elif 'alpha' and 'beta' and 'nu' and 'gamma' not in kwargs:
d = kwargs['profundidad']
n = kwargs['n']
a = kwargs['forcing_amp']
w = kwargs['forcing_freq']
d_name = str(round(float(d), 2))
n_name = str(n)
a_name = str(round(float(a), 2))
w_name = str(round(float(w), 2))
nombre = '\\gaussian_exp\\d=' + d_name + '\\n=' + n_name + '\\f=' + w_name + '_a=' + a_name
return nombre
def nombre_pndls_bigaussian(gamma, mu, nu, sigma1, sigma2, dist, fase):
gamma_st = str(truncate(gamma, 3))
mu_st = str(truncate(mu, 3))
nu_st = str(truncate(nu, 3))
sigma1_st = str(truncate(sigma1, 2))
sigma2_st = str(truncate(sigma2, 2))
dist_st = str(truncate(dist, 2))
fase_st = str(truncate(fase / np.pi, 2)) + 'pi'
nombre = '\\bigaussian\\mu=' + mu_st + '\\gamma=' + gamma_st + '_nu=' + nu_st +'\\fase=' + fase_st +'\\sigma_1=' + sigma1_st +'\\sigma_2=' + sigma2_st + '\\distancia=' + dist_st
return nombre
def truncate(num, n):
integer = int(num * (10**n))/(10**n)
return float(integer)
# DETECCION
def canny_prueba(sigma):
root = tk.Tk()
root.withdraw()
reference_image = filedialog.askopenfilename(parent=root,
initialdir="D:\mnustes_science",
title='Detección multiple')
print(str(reference_image))
im = cv2.imread(str(reference_image))
REC = cv2.selectROI(im)
rec = list(REC)
imCrop = im[rec[1]:(rec[1] + rec[3]), rec[0]:(rec[0] + rec[2])]
imBlur = cv2.GaussianBlur(imCrop, (3, 3), 0)
canned = auto_canny(imBlur, sigma)
cv2.imshow('Imagen de referencia', canned)
cv2.waitKey(delay=0)
cv2.destroyWindow('Imagen de referencia')
def canny_to_data():
canned_path = 'D:\mnustes_science\images\canned'
datos_path = 'D:\mnustes_science\experimental_data'
root = tk.Tk()
root.withdraw()
detection_file = filedialog.askdirectory(parent=root,
initialdir=canned_path,
title='Selecciones la carpeta canny')
if not detection_file:
sys.exit('No se seleccionó ninguna carpeta')
os.chdir(detection_file)
parent_file_name = os.path.basename(detection_file)
print('Se va a procesar la carpeta ' + detection_file)
IMGs = os.listdir(canned_path + '\\single_file\\' + parent_file_name)
X, T, PHI = datos_3d(IMGs, canned_path + '\\single_file\\' + parent_file_name, nivel='si')
guardar_txt(datos_path, '\\single_file\\' + parent_file_name + '\\', X=X, T=T, PHI=PHI)
def deteccion_contornos(tipo, sigma, img_format, **kwargs):
if tipo == 'multiple':
root = tk.Tk()
root.withdraw()
detection_parent_file = filedialog.askdirectory(parent=root,
initialdir="D:\mnustes_science",
title='Detección multiple')
if not detection_parent_file:
sys.exit('No se seleccionó ningún archivo')
os.chdir(detection_parent_file)
detection_files = os.listdir()
parent_file_name = os.path.basename(detection_parent_file)
print('Se va a procesar la carpeta ' + str(parent_file_name))
canned_path = 'D:\mnustes_science\images\canned'
datos_path = 'D:\mnustes_science\experimental_data'
reference_image = filedialog.askopenfilename(parent=root,
initialdir=detection_files,
title='Seleccionar imagen de referencia')
recs = ROI_select(reference_image)
for name in detection_files:
print('Procesando ' + str(name) + ' (' + str(detection_files.index(name)) + '/' + str(len(detection_files)) + ')')
if img_format == 'jpg':
deteccion_jpg(detection_parent_file + '\\' + name, canned_path + '\\' + parent_file_name + '\\' + name,
recs, sigma)
elif img_format == 'tiff':
deteccion_tiff(detection_parent_file + '\\' + name, canned_path + '\\' + parent_file_name + '\\' + name,
recs, sigma)
IMGs = os.listdir(canned_path + '\\' + parent_file_name + '\\' + name)
X, T, PHI = datos_3d(IMGs, canned_path + '\\' + parent_file_name + '\\' + name)
guardar_txt(datos_path, '\\' + parent_file_name + '\\' + name, X=X, T=T, PHI=PHI)
elif tipo == 'single_file':
root = tk.Tk()
root.withdraw()
zero_file = filedialog.askdirectory(parent=root,
initialdir="D:\mnustes_science",
title='Seleccione la carpeta del cero')
if not zero_file:
sys.exit('No se seleccionó ningún archivo')
detection_file = filedialog.askdirectory(parent=root,
initialdir="D:\mnustes_science",
title='Seleccione la carpeta para detección')
if not detection_file:
sys.exit('No se seleccionó ningún archivo')
os.chdir(detection_file)
parent_file_name = os.path.basename(detection_file)
os.chdir(detection_file)
zero_name = os.path.basename(zero_file)
canned_path = 'D:\mnustes_science\images\canned'
datos_path = 'D:\mnustes_science\experimental_data'
print('Se va a procesar la carpeta ' + detection_file)
reference_image = filedialog.askopenfilename(parent=root,
initialdir=detection_file,
title='Seleccionar imagen de referencia')
recs = ROI_select(reference_image)
if 'file_name' not in kwargs:
file_name = 'default'
else:
file_name = kwargs['file_name']
if img_format == 'jpg':
deteccion_jpg(zero_file, canned_path + '\\' + file_name + '\\' + parent_file_name + '\\' + zero_name, recs, sigma)
elif img_format == 'tiff':
deteccion_tiff(zero_file, canned_path + '\\' + file_name + '\\' + parent_file_name + '\\' + zero_name, recs, sigma)
IMGs = os.listdir(canned_path + '\\' + file_name + '\\' + parent_file_name + '\\' + zero_name)
X, T, ZERO = datos_3d(IMGs, canned_path + '\\' + file_name + '\\' + parent_file_name + '\\' + zero_name)
guardar_txt(datos_path, '\\' + file_name + '\\' + parent_file_name, ZERO=ZERO)
if img_format == 'jpg':
deteccion_jpg(detection_file, canned_path + '\\' + file_name + '\\' + parent_file_name, recs, sigma)
elif img_format == 'tiff':
deteccion_tiff(detection_file, canned_path + '\\' + file_name + '\\' + parent_file_name, recs, sigma)
IMGs = os.listdir(canned_path + '\\' + file_name + '\\' + parent_file_name)
X, T, PHI = datos_3d(IMGs, canned_path + '\\' + file_name + '\\' + parent_file_name)
guardar_txt(datos_path, '\\' + file_name + '\\' + parent_file_name , X=X, T=T, PHI=PHI)
return X, T, PHI
def auto_canny(image, sigma):
if sigma == 'fixed':
lower = 100
upper = 200
else:
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged
def deteccion_jpg(file_i, file_o, REC, sigma):
IMGs = os.listdir(file_i) # lista de nombres de archivos en la carpeta indicada
im = cv2.imread(file_i + '/cam000000.jpg')
rec = list(REC)
imCrop = im[rec[1]:(rec[1] + rec[3]), rec[0]:(rec[0] + rec[2])]
imBlur = cv2.GaussianBlur(imCrop, (7, 7), 0)
ddepth = cv2.CV_16S
scale = 1
delta = 0
grad_x = cv2.Sobel(imBlur, ddepth, 1, 0, ksize=3, scale=scale, delta=delta, borderType = cv2.BORDER_DEFAULT)
grad_y = cv2.Sobel(imBlur, ddepth, 0, 1, ksize=3, scale=scale, delta=delta, borderType = cv2.BORDER_DEFAULT)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
grad = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
edges = auto_canny(grad, sigma)
if os.path.exists(file_o) == True:
print('Este archivo de CANNY ya existe, ¿desea eliminarlo y continuar? (y/n)')
a = str(input())
if a == 'y':
shutil.rmtree(file_o)
elif a == 'n':
sys.exit("Proceso terminado, cambie de carpeta")
os.makedirs(file_o)
cv2.imwrite(os.path.join(file_o, IMGs[0]), edges)
for i in range(1, len(IMGs)):
im = cv2.imread(file_i + '\\' + IMGs[i])
imCrop = im[rec[1]:(rec[1] + rec[3]), rec[0]:(rec[0] + rec[2])]
imBlur = cv2.GaussianBlur(imCrop, (3, 3), 0)
# edges = cv2.Canny(imBlur,10,200)
edges = auto_canny(imBlur, sigma)
cv2.imwrite(os.path.join(file_o, IMGs[i]), edges)
return IMGs
def deteccion_tiff(file_i, file_o, REC, sigma):
IMGs = os.listdir(file_i) # lista de nombres de archivos en la carpeta indicada
im = cv2.imread(file_i + '/cam000000.tif')
rec = list(REC)
imCrop = im[rec[1]:(rec[1] + rec[3]), rec[0]:(rec[0] + rec[2])]
imBlur = cv2.GaussianBlur(imCrop, (3, 3), 0)
edges = auto_canny(imBlur, sigma)
if os.path.exists(file_o) == True:
print('Este archivo de CANNY ya existe, ¿desea eliminarlo y continuar? (y/n)')
a = str(input())
if a == 'y':
shutil.rmtree(file_o)
elif a == 'n':
sys.exit("Proceso terminado, cambie de carpeta")
os.makedirs(file_o)
cv2.imwrite(os.path.join(file_o, IMGs[0]), edges)
for i in range(1, len(IMGs)):
im = cv2.imread(file_i + '\\' + IMGs[i])
imCrop = im[rec[1]:(rec[1] + rec[3]), rec[0]:(rec[0] + rec[2])]
imBlur = cv2.GaussianBlur(imCrop, (3, 3), 0)
# edges = cv2.Canny(imBlur,10,200)
edges = auto_canny(imBlur, sigma)
cv2.imwrite(os.path.join(file_o, IMGs[i]), edges)
return IMGs
def ROI_select(path):
im = cv2.imread(path)
RECs = cv2.selectROI(im)
return RECs
# IMAGENES A DATOS
def phi_t(IMGs, file_o, l):
img = cv2.imread(file_o + '\\' + IMGs[l], 0)
rows, cols = img.shape
phi = []
i = cols - 1
while i != 0:
j = rows - 1
while j != 0:
n = 0
k = img[j, i]
if k == 255:
phi_i = rows - j
phi.append(phi_i)
j = 0
n = 1
elif k != 255:
j = j - 1
if j == 1 and n == 0:
if not phi:
phi_i = 0.5 * rows
phi.append(phi_i)
j = j - 1
else:
phi_i = phi[-1]
phi.append(phi_i)
j = j - 1
i = i - 1
x = []
for i in range(cols - 1):
x.append(i)
phi.reverse()
return phi, cols
def datos_3d(IMGS, FILE_OUT):
PHI = []
T = []
N_imgs = len(IMGS)
if N_imgs == 1:
phi, cols = phi_t(IMGS, FILE_OUT, 0)
t = [0]
PHI.append(phi)
T.append(t)
else:
for i in range(1, N_imgs):
phi, cols = phi_t(IMGS, FILE_OUT, i)
t = [i]
PHI.append(phi)
T.append(t)
X = np.arange(1, cols)
Y = np.array(T)
Z = np.array(PHI)
return X, Y, Z
# PROCESOS DE DATOS
def drift_velocity(T_per, X_mm, Z_mm, window_l, window_u, t_inicial, t_final):
### DEFINIENDO COSAS, VENTANA INICIAL E INTERVALO TEMPORAL A ANALIZAR ###
L_wind = window_u - window_l
### ENCONTRANDO MAXIMOS ###
t_array = []
x_array = []
for i in range(t_inicial, t_final):
j = window_l + np.argmax(Z_mm[i, window_l:window_u])
t_array.append(T_per[i])
x_array.append(X_mm[j])
window_l = int(j - L_wind / 2)
window_u = int(j + L_wind / 2)
t_np = np.array(t_array)
x_np = np.array(x_array)
### REGRESIÓN LINEAL ###
linear_fit = linregress(t_array, x_array)
x_fit = linear_fit.slope * t_np + linear_fit.intercept
return t_np, x_np, x_fit, linear_fit
def zero_fix(z_limit, mode, cargar, *args):
datos_path = 'D:\mnustes_science\experimental_data'
carpeta = select_directory(datos_path)
if mode == 'zero':
if cargar == 'si':
[X, T, PHI, zero] = cargar_txt(carpeta, '', X='X', T='T', PHI='PHI', ZERO='ZERO')
elif cargar == 'no':
zero = cargar_txt(carpeta, '', ZERO='ZERO')
[X, T, PHI] = [args[0], args[1], args[2]]
ZERO = np.ones((len(PHI[:, 0]), len(PHI[0, :])))
for i in range(len(T)):
ZERO[i, :] = zero
Z = PHI - ZERO
Z = np.array(Z)
guardar_txt(carpeta, '', Z=Z)
visualizacion(X, T, Z, tipo='colormap', guardar='si', path=carpeta,
file='', nombre='espaciotiempo_mean', cmap='seismic', vmin=-z_limit, vzero=0, vmax=z_limit)
plt.close()
elif mode == 'mean':
if cargar == 'si':
[X, T, PHI] = cargar_txt(carpeta, '', X='X', T='T', PHI='PHI')
elif cargar == 'no':
[X, T, PHI] = [args[0], args[1], args[2]]
Z = nivel_mean(PHI, X, T)
Z = np.array(Z)
guardar_txt(carpeta, '', Z=Z)
visualizacion(X, T, Z, tipo='colormap', guardar='si', path=carpeta,
file='', nombre='espaciotiempo_filt', cmap='seismic', vmin=-z_limit, vzero=0, vmax=z_limit)
plt.close()
return carpeta, X, T, Z
def nivel_mean(PHI, X, T):
mean = np.mean(PHI[:, 0])
#PHI = filtro_superficie(PHI, 3, 'X')
MEAN = mean * np.ones((len(PHI[:, 0]), len(PHI[0, :])))
Z = PHI - MEAN
mmin = Z[0, 0]
mmax = Z[0, -1]
pend = mmax - mmin
nivels = []
for i in range(len(X)):
y_i = (pend / len(X)) * X[i]
nivels.append(y_i)
nivels = np.array(nivels)
Z_new = []
for i in range(len(T)):
Z_new_i = Z[i, :] - nivels
Z_new_i = Z_new_i.tolist()
Z_new.append(Z_new_i)
return Z_new
def field_envelopes(X, T, Z, carpeta):
def envelopes(s):
q_u = np.zeros(s.shape)
q_l = np.zeros(s.shape)
u_x = [0, ]
u_y = [s[0], ]
l_x = [0, ]
l_y = [s[0], ]
for k in range(1, len(s) - 1):
if (np.sign(s[k] - s[k - 1]) == 1) and (np.sign(s[k] - s[k + 1]) == 1):
u_x.append(k)
u_y.append(s[k])
if (np.sign(s[k] - s[k - 1]) == -1) and ((np.sign(s[k] - s[k + 1])) == -1):
l_x.append(k)
l_y.append(s[k])
u_x.append(len(s) - 1)
u_y.append(s[-1])
l_x.append(len(s) - 1)
l_y.append(s[-1])
u_p = interp1d(u_x, u_y, kind='linear', bounds_error=False, fill_value=0.0)
l_p = interp1d(l_x, l_y, kind='linear', bounds_error=False, fill_value=0.0)
for k in range(0, len(s)):
q_u[k] = u_p(k)
q_l[k] = l_p(k)
q_u = q_u.tolist()
q_l = q_l.tolist()
return q_u, q_l
A = np.zeros((len(T), len(X)))
B = np.zeros((len(T), len(X)))
for i in range(len(X)):
print(i)
s = Z[:, i]
q_u, q_l =envelopes(s)
A[:, i] = q_u
B[:, i] = q_l
guardar_txt(carpeta, '', A=A, B=B)
visualizacion(X, T, A, tipo='colormap', guardar='si', path=carpeta,
file='', nombre='A_plot', cmap='seismic')
plt.close()
visualizacion(X, T, B, tipo='colormap', guardar='si', path=carpeta,
file='', nombre='B_plot', cmap='seismic')
plt.close()
def filtro_array(n, funcion):
# the larger n is, the smoother curve will be
b = [1.0 / n] * n
a = 1
phi_filtered = filtfilt(b, a, funcion)
return phi_filtered
def filtro_superficie(Z, intensidad, sentido):
X_len = len(Z[:, 0])
Y_len = len(Z[0, :])
FILT = np.zeros((X_len, Y_len))
if sentido == 'X':
for i in range(X_len):
filtered = filtro_array(intensidad, Z[i, :])
FILT[i, :] = filtered
elif sentido == 'Y':
for i in range(Y_len):
filtered = filtro_array(intensidad, Z[:, i])
FILT[:, i] = filtered
elif sentido == 'XY':
for i in range(X_len):
filtered = filtro_array(intensidad, Z[i, :])
FILT[i, :] = filtered
for i in range(Y_len):
filtered = filtro_array(intensidad, FILT[:, i])
FILT[:, i] = filtered
elif sentido == 'YX':
for i in range(Y_len):
filtered = filtro_array(intensidad, Z[:, i])
FILT[:, i] = filtered
for i in range(X_len):
filtered = filtro_array(intensidad, FILT[i, :])
FILT[i, :] = filtered
return FILT
def proyeccion_maximos(Z):
def proyeccion(PHI):
PHIT = PHI.transpose()
rows, cols = PHIT.shape
PHIT_proy = np.zeros(rows)
for i in range(cols - 1):
PHIT_proy = PHIT_proy + np.absolute(PHIT[:, i])
PHIT_proy = (1 / cols) * PHIT_proy
return PHIT_proy
phi_inicial = Z[0, :]
phi_max = np.argmax(phi_inicial) # tomar el argumento del máximo valor del primer contorno
maximo_temporal = Z[:, phi_max] # array de como se comporta el maximo encontrado en el tiempo
frecuencias, power_density = signal.periodogram(maximo_temporal) # periodograma del array anterior
max_element = np.argmax(power_density) # toma la frecuencia que corresponde al ajuste sinosidal
periodo = 1 / frecuencias[max_element] # periodo asociado a la frecuencia
max_int = np.argmax(Z[0:int(periodo), phi_max]) # encuentra el máximo en el primer periodo del
max_int = int(max_int)
A = []
for i in range(1, 2 * int(len(Z[:, phi_max])/periodo)):
if int(max_int * i/2) < len(Z[:, 0]):
A_i = np.absolute(Z[int(max_int * i / 2), :])
A.append(A_i)
A = A[1:]
A_np = | np.array(A) | numpy.array |
"""
Generate a bunch of trimesh objects, in meter radian
"""
import math
import numpy as np
import basis.trimesh.primitives as tp
import basis.trimesh as trm
import basis.robot_math as rm
import shapely.geometry as shpg
def gen_box(extent=np.array([1, 1, 1]), homomat=np.eye(4)):
"""
:param extent: x, y, z (origin is 0)
:param homomat: rotation and translation
:return: a Trimesh object (Primitive)
author: weiwei
date: 20191228osaka
"""
return tp.Box(box_extents=extent, box_transform=homomat)
def gen_stick(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, type="rect", sections=8):
"""
interface to genrectstick/genroundstick
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param type: rect or round
:param sections: # of discretized sectors used to approximate a cylinder
:return:
author: weiwei
date: 20191228osaka
"""
if type == "rect":
return gen_rectstick(spos, epos, thickness, sections=sections)
if type == "round":
return gen_roundstick(spos, epos, thickness, count=[sections / 2.0, sections / 2.0])
def gen_rectstick(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=.005, sections=8):
"""
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param sections: # of discretized sectors used to approximate a cylinder
:return: a Trimesh object (Primitive)
author: weiwei
date: 20191228osaka
"""
pos = spos
height = np.linalg.norm(epos - spos)
if np.allclose(height, 0):
rotmat = np.eye(3)
else:
rotmat = rm.rotmat_between_vectors(np.array([0, 0, 1]), epos - spos)
homomat = rm.homomat_from_posrot(pos, rotmat)
return tp.Cylinder(height=height, radius=thickness / 2.0, sections=sections, homomat=homomat)
def gen_roundstick(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, count=[8, 8]):
"""
:param spos:
:param epos:
:param thickness:
:return: a Trimesh object (Primitive)
author: weiwei
date: 20191228osaka
"""
pos = spos
height = np.linalg.norm(epos - spos)
if np.allclose(height, 0):
rotmat = np.eye(3)
else:
rotmat = rm.rotmat_between_vectors(np.array([0, 0, 1]), epos - spos)
homomat = rm.homomat_from_posrot(pos, rotmat)
return tp.Capsule(height=height, radius=thickness / 2.0, count=count, homomat=homomat)
def gen_dashstick(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, lsolid=None, lspace=None,
sections=8, sticktype="rect"):
"""
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param lsolid: length of the solid section, 1*thickness if None
:param lspace: length of the empty section, 1.5*thickness if None
:return:
author: weiwei
date: 20191228osaka
"""
solidweight = 1.6
spaceweight = 1.07
if not lsolid:
lsolid = thickness * solidweight
if not lspace:
lspace = thickness * spaceweight
length, direction = rm.unit_vector(epos - spos, toggle_length=True)
nstick = math.floor(length / (lsolid + lspace))
vertices = np.empty((0, 3))
faces = np.empty((0, 3))
for i in range(0, nstick):
tmp_spos = spos + (lsolid * direction + lspace * direction) * i
tmp_stick = gen_stick(spos=tmp_spos,
epos=tmp_spos + lsolid * direction,
thickness=thickness,
type=sticktype,
sections=sections)
tmp_stick_faces = tmp_stick.faces + len(vertices)
vertices = np.vstack((vertices, tmp_stick.vertices))
faces = np.vstack((faces, tmp_stick_faces))
# wrap up the last segment
tmp_spos = spos + (lsolid * direction + lspace * direction) * nstick
tmp_epos = tmp_spos + lsolid * direction
final_length, _ = rm.unit_vector(tmp_epos - spos, toggle_length=True)
if final_length > length:
tmp_epos = epos
tmp_stick = gen_stick(spos=tmp_spos,
epos=tmp_epos,
thickness=thickness,
type=sticktype,
sections=sections)
tmp_stick_faces = tmp_stick.faces + len(vertices)
vertices = np.vstack((vertices, tmp_stick.vertices))
faces = np.vstack((faces, tmp_stick_faces))
return trm.Trimesh(vertices=vertices, faces=faces)
def gen_sphere(pos=np.array([0, 0, 0]), radius=0.02, subdivisions=2):
"""
:param pos: 1x3 nparray
:param radius: 0.02 m by default
:param subdivisions: levels of icosphere discretization
:return:
author: weiwei
date: 20191228osaka
"""
return tp.Sphere(sphere_radius=radius, sphere_center=pos, subdivisions=subdivisions)
def gen_ellipsoid(pos=np.array([0, 0, 0]), axmat=np.eye(3), subdivisions=5):
"""
:param pos:
:param axmat: 3x3 mat, each column is an axis of the ellipse
:param subdivisions: levels of icosphere discretization
:return:
author: weiwei
date: 20191228osaka
"""
homomat = rm.homomat_from_posrot(pos, axmat)
sphere = tp.Sphere(sphere_radius=1, sphere_center=pos, subdivisions=subdivisions)
vertices = rm.homomat_transform_points(homomat, sphere.vertices)
return trm.Trimesh(vertices=vertices, faces=sphere.faces)
def gen_dumbbell(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, sections=8, subdivisions=1):
"""
NOTE: return stick+spos_ball+epos_ball also work, but it is a bit slower
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param sections:
:param subdivisions: levels of icosphere discretization
:return:
author: weiwei
date: 20191228osaka
"""
stick = gen_rectstick(spos=spos, epos=epos, thickness=thickness, sections=sections)
spos_ball = gen_sphere(pos=spos, radius=thickness, subdivisions=subdivisions)
epos_ball = gen_sphere(pos=epos, radius=thickness, subdivisions=subdivisions)
vertices = np.vstack((stick.vertices, spos_ball.vertices, epos_ball.vertices))
sposballfaces = spos_ball.faces + len(stick.vertices)
endballfaces = epos_ball.faces + len(spos_ball.vertices) + len(stick.vertices)
faces = np.vstack((stick.faces, sposballfaces, endballfaces))
return trm.Trimesh(vertices=vertices, faces=faces)
def gen_cone(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), radius=0.005, sections=8):
"""
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param sections: # of discretized sectors used to approximate a cylinder
:return:
author: weiwei
date: 20191228osaka
"""
height = np.linalg.norm(spos - epos)
pos = spos
rotmat = rm.rotmat_between_vectors(np.array([0, 0, 1]), epos - spos)
homomat = rm.homomat_from_posrot(pos, rotmat)
return tp.Cone(height=height, radius=radius, sections=sections, homomat=homomat)
def gen_arrow(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, sections=8, sticktype="rect"):
"""
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param sections: # of discretized sectors used to approximate a cylinder
:param sticktype: The shape at the end of the arrow stick, round or rect
:param radius:
:return:
author: weiwei
date: 20191228osaka
"""
direction = rm.unit_vector(epos - spos)
stick = gen_stick(spos=spos, epos=epos - direction * thickness * 4, thickness=thickness, type=sticktype,
sections=sections)
cap = gen_cone(spos=epos - direction * thickness * 4, epos=epos, radius=thickness, sections=sections)
vertices = np.vstack((stick.vertices, cap.vertices))
capfaces = cap.faces + len(stick.vertices)
faces = np.vstack((stick.faces, capfaces))
return trm.Trimesh(vertices=vertices, faces=faces)
def gen_dasharrow(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, lsolid=None, lspace=None,
sections=8, sticktype="rect"):
"""
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param lsolid: length of the solid section, 1*thickness if None
:param lspace: length of the empty section, 1.5*thickness if None
:return:
author: weiwei
date: 20191228osaka
"""
length, direction = rm.unit_vector(epos - spos, toggle_length=True)
cap = gen_cone(spos=epos - direction * thickness * 4, epos=epos, radius=thickness, sections=sections)
dash_stick = gen_dashstick(spos=spos,
epos=epos - direction * thickness * 4,
thickness=thickness,
lsolid=lsolid,
lspace=lspace,
sections=sections,
sticktype=sticktype)
tmp_stick_faces = dash_stick.faces + len(cap.vertices)
vertices = np.vstack((cap.vertices, dash_stick.vertices))
faces = np.vstack((cap.faces, tmp_stick_faces))
return trm.Trimesh(vertices=vertices, faces=faces)
def gen_axis(pos=np.array([0, 0, 0]), rotmat=np.eye(3), length=0.1, thickness=0.005):
"""
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:return:
author: weiwei
date: 20191228osaka
"""
directionx = rotmat[:, 0]
directiony = rotmat[:, 1]
directionz = rotmat[:, 2]
# x
endx = directionx * length
stickx = gen_stick(spos=pos, epos=endx, thickness=thickness)
capx = gen_cone(spos=endx, epos=endx + directionx * thickness * 4, radius=thickness)
# y
endy = directiony * length
sticky = gen_stick(spos=pos, epos=endy, thickness=thickness)
capy = gen_cone(spos=endy, epos=endy + directiony * thickness * 4, radius=thickness)
# z
endz = directionz * length
stickz = gen_stick(spos=pos, epos=endz, thickness=thickness)
capz = gen_cone(spos=endz, epos=endz + directionz * thickness * 4, radius=thickness)
vertices = np.vstack(
(stickx.vertices, capx.vertices, sticky.vertices, capy.vertices, stickz.vertices, capz.vertices))
capxfaces = capx.faces + len(stickx.vertices)
stickyfaces = sticky.faces + len(stickx.vertices) + len(capx.vertices)
capyfaces = capy.faces + len(stickx.vertices) + len(capx.vertices) + len(sticky.vertices)
stickzfaces = stickz.faces + len(stickx.vertices) + len(capx.vertices) + len(sticky.vertices) + len(capy.vertices)
capzfaces = capz.faces + len(stickx.vertices) + len(capx.vertices) + len(sticky.vertices) + len(
capy.vertices) + len(stickz.vertices)
faces = np.vstack((stickx.faces, capxfaces, stickyfaces, capyfaces, stickzfaces, capzfaces))
return trm.Trimesh(vertices=vertices, faces=faces)
def gen_torus(axis=np.array([1, 0, 0]),
starting_vector=None,
portion=.5,
center= | np.array([0, 0, 0]) | numpy.array |
import numpy as np
def sample_Z_l(batch_size, d_l, L, M):
outputs = []
for _ in range(batch_size):
Z_l = np.zeros(shape=(d_l, L, M))
for i in range(L):
for j in range(M):
sample = np.random.uniform(-1.0, 1.0, size=(d_l))
Z_l[:, i, j] = sample
outputs.append(Z_l)
return np.array(outputs)
def sample_Z_g(batch_size, d_g, L, M):
outputs = []
for _ in range(batch_size):
z_g = | np.random.uniform(-1.0, 1.0, size=(d_g, 1, 1)) | numpy.random.uniform |
"""
feast_classes stores the basic classes used by FEAST.
Additional classes are stored in DetectionModules directory and leak_objects.
"""
import random
import numpy as np
from .leak_class_functions import leak_objects_generator as leak_obj_gen
import pickle
from .simulation_functions import set_kwargs_attrs
# Constants:
GROUND_TEMP = 300 # K
PRESSURE = 101325 # Pa
class GasField:
"""
GasField accommodates all data that defines a gas field at the beginning of a simulation.
"""
def __init__(self, initial_leaks=None, null_repair_rate=None, **kwargs):
"""
Input params:
initial_leaks The set of leaks that exist at the beginning of the simulation
null_repair_rate The rate at which leaks are repaired in the Null process (repairs/leak/day)
kwargs All attributes defined in the kwargs section below
"""
# -------------- Attributes that can be defined with kwargs --------------
# Type of distribution used to generate leak sizes
self.dist_type = 'bootstrap'
# Path to a LeakData object file
self.leak_data_path = 'fort_worth_leaks.p'
# Rate at which new leaks are produced
self.leak_production_rate = 1e-5 # new leaks per component per day
# Number of valves and connectors per well (736659 total components/1138 wells in the Fort Worth study)
self.components_per_site = 650
# Number of wells to be simulated
self.site_count = 100
# Maximum number of wells to be surveyed with a single capital investment
self.max_count = 6000
# Driving distance between wells
self.site_spacing = 700 # m
# Concentration of wells
self.well_density = 2 # wells per km^2
# Square root of the area over which a leak may be found per well. Based on satellite imagery.
self.well_length = 10 # m
# Maximum leak height
self.h0_max = 5 # m
# Plume temperature
self.t_plume = 300 # K
# Update any attributes defined by kwargs
set_kwargs_attrs(self, kwargs)
# -------------- Calculated parameters --------------
# Define functions and parameters related to leaks
self.leak_size_maker, self.leak_params, self.leaks_per_well = leak_obj_gen(self.dist_type, self.leak_data_path)
# Define the number of leaks in each well site
self.leaks_in_well = np.random.poisson(self.leaks_per_well, self.site_count)
n_leaks = int(sum(self.leaks_in_well))
# Define the initial set of leaks
if initial_leaks is None:
self.initial_leaks = self.leak_size_maker(n_leaks, self) # g/s
else:
self.initial_leaks = initial_leaks
# Total number of components in the simulation
self.component_count = self.components_per_site * self.site_count
# Null repair rate
if null_repair_rate is None:
# leaks repaired per leak per day
self.null_repair_rate = self.leak_production_rate * self.component_count/n_leaks
else:
self.null_repair_rate = null_repair_rate
# Distribution of leak costs
self.repair_cost_dist = pickle.load(open('InputData/DataObjectInstances/fernandez_leak_repair_costs_2006.p',
'rb'))
# FinanceSettings stores all parameters relating to economic calculations
class FinanceSettings:
def __init__(self, gas_price=2E-4, discount_rate=0.08):
self.gas_price = gas_price # dollars/gram (2e-4 $/g=$5/mcf methane at STP)
self.discount_rate = discount_rate
class Atmosphere:
"""
Defines atmosphere variables for use in plume simulations
"""
def __init__(self, timesteps, wind_speed_path='arpae_wind.p', wind_direction_path='fort_worth_wind.p', **kwargs):
"""
Inputs
timesteps number of timesteps in the simulation
wind_speed_path path to a wind data object
wind_direction_path path to a wind data object (may or may not be the same as wind_speed_path)
"""
self.wind_speed, self.wind_direction, self.stab_class, self.r_y, self.r_z = [], [], [], [], []
speed_data = pickle.load(open('InputData/DataObjectInstances/' + wind_speed_path, 'rb'))
dir_data = pickle.load(open('InputData/DataObjectInstances/' + wind_direction_path, 'rb'))
a = np.array([927, 370, 283, 707, 1070, 1179])
l = np.array([0.102, 0.0962, 0.0722, 0.0475, 0.0335, 0.022])
q = np.array([-1.918, -0.101, 0.102, 0.465, 0.624, 0.700])
k = np.array([0.250, 0.202, 0.134, 0.0787, .0566, 0.0370])
p = np.array([0.189, 0.162, 0.134, 0.135, 0.137, 0.134])
self.wind_speed = np.zeros(timesteps)
self.wind_direction = np.zeros(timesteps)
self.stab_class = np.zeros(timesteps)
self.ground_temp = np.ones(timesteps) * GROUND_TEMP
self.a_temp = self.ground_temp - 20
self.pressure = np.ones(timesteps) * PRESSURE
# emissivities of the ground and air
self.e_a = np.ones(timesteps) * 0.1
self.e_g = np.ones(timesteps) * 0.5
# Stability classes are chosen randomly with equal probability, subject to constraints based on wind speed.
# Stability classes 5 and 6 are never chosen because they rarely occur during the day.
for ind in range(0, timesteps):
self.wind_speed[ind] = random.choice(speed_data.wind_speed)
self.wind_direction[ind] = random.choice(dir_data.wind_direction)
if self.wind_speed[ind] < 2:
self.stab_class[ind] = random.choice([0, 1])
elif self.wind_speed[ind] < 3:
self.stab_class[ind] = random.choice([0, 1, 2])
elif self.wind_speed[ind] < 5:
self.stab_class[ind] = random.choice([1, 2, 3])
else:
self.stab_class[ind] = random.choice([2, 3])
set_kwargs_attrs(self, kwargs)
self.a, self.l, self.q = np.zeros(timesteps), np.zeros(timesteps), np.zeros(timesteps)
self.k, self.p = np.zeros(timesteps), | np.zeros(timesteps) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 9 15:19:56 2017
Example for contour of ribbon electron beam
@author: Boytsov
"""
import numpy as np
import matplotlib.pyplot as plt
import h5py
SGSE_conv_unit_current_to_A = 3e10 * 0.1; #from current units SGSE to A
SI_conv_cm_to_m = 0.01;
SI_conv_g_to_kg = 0.001
SI_conv_Fr_to_C = 3.3356409519815207e-10
Si_conv_G_T = 0.0001
eps0 = 8.85e-12
def get_B_field( h5file ):
B_field = h5file["/ExternalFields/mgn_uni"].attrs["magnetic_uniform_field_z"][0]
return B_field * Si_conv_G_T
def get_source_current( h5file ):
time_step = h5file["/TimeGrid"].attrs["time_step_size"][0]
charge = h5file["/ParticleSources/cathode_emitter"].attrs["charge"][0]
particles_per_step = h5file[
"/ParticleSources/cathode_emitter"].attrs["particles_to_generate_each_step"][0]
current = np.abs(particles_per_step * charge / time_step)
return current / SGSE_conv_unit_current_to_A
def get_source_particle_parameters( h5file ):
mass = h5file["/ParticleSources/cathode_emitter"].attrs["mass"][0]
charge = h5file["/ParticleSources/cathode_emitter"].attrs["charge"][0]
momentum_z = h5file["/ParticleSources/cathode_emitter"].attrs["mean_momentum_z"][0]
return ( mass * SI_conv_g_to_kg,
charge * SI_conv_Fr_to_C,
momentum_z * SI_conv_g_to_kg * SI_conv_cm_to_m )
def get_source_geometry( h5file ):
start_y = h5["/ParticleSources/cathode_emitter"].attrs["box_y_top"][0]
end_y = h5["/ParticleSources/cathode_emitter"].attrs["box_y_bottom"][0]
start_x = h5["/ParticleSources/cathode_emitter"].attrs["box_x_left"][0]
end_x = h5["/ParticleSources/cathode_emitter"].attrs["box_x_right"][0]
length_of_cathode = start_y-end_y
half_width_of_cathode = (start_x-end_x) / 2
center_of_beam = (start_x+end_x) / 2
return ( length_of_cathode * SI_conv_cm_to_m,
half_width_of_cathode * SI_conv_cm_to_m,
center_of_beam * SI_conv_cm_to_m )
def get_zlim( h5file ):
start_z = (h5["/ParticleSources/cathode_emitter"].attrs["box_z_near"][0]+h5["/ParticleSources/cathode_emitter"].attrs["box_z_far"][0])/2
end_z = h5["/SpatialMesh/"].attrs["z_volume_size"][0]
return( start_z * SI_conv_cm_to_m,
end_z * SI_conv_cm_to_m)
def get_voltage( momentum_z, mass, charge ):
energy = (momentum_z * momentum_z) / (2 * mass)
voltage = energy / np.abs(charge)
return voltage
def get_current_dens(current,length_of_cathode):
current_dens = current / length_of_cathode
return current_dens
def eta(charge,mass):
eta = np.abs(charge / mass )
return eta
def velocity(eta,voltage):
velocity = np.sqrt(2*eta*voltage)
return velocity
def R_const(half_thick, x0_const, velocity, angle, B):
R_const = half_thick * np.sqrt( (1 - x0_const/half_thick)**2)
return R_const
def lambda_const(eta, voltage ,B_field):
lambda_const = 4 * np.pi / (np.sqrt(2*eta)) * np.sqrt(voltage) / B_field
return lambda_const
def phi_const(x0_const, half_thick, velocity, angle, eta, B_field):
phi_const = -1 * np.arctan((1 - x0_const / half_thick) * (eta * B_field * half_thick) / (velocity * np.tan(angle)))
return phi_const
def x0_const(eta, current_dens, voltage, B_field, B_field_cathode, xk):
a0 = 1 / (2*2**0.5*eps0*eta**(3/2)) * current_dens / (B_field**2*voltage**0.5)
x0_const = a0 + B_field_cathode / B_field * xk
return x0_const
def contour( z_position , x0_const, R_const, lambda_const, phi_const):
contour = x0_const - R_const * np.sin(2*np.pi/lambda_const*z_position+phi_const)
return contour
def contour_2(z_position, x0_const, current_dens, mass, charge, velocity, B_field):
omega_const = charge * B_field / mass
c_const = current_dens * mass / (2*eps0*charge*velocity*B_field**2)
c_const = -c_const # todo: remove
angle_const=np.cos(omega_const*z_position/velocity)
contour = x0_const-c_const+c_const*angle_const
return contour
filename = "contour_0001000.h5"
h5 = h5py.File( filename, mode="r")
phi_shift = 0 # to combine phase
B_field = get_B_field( h5 )
B_field_cathode = B_field
current = get_source_current( h5 )
mass, charge, momentum_z = get_source_particle_parameters( h5 )
length_of_cathode, half_thick, center_of_beam = get_source_geometry( h5 )
start_z, end_z = get_zlim( h5 )
voltage = get_voltage( momentum_z, mass, charge )
current_dens = get_current_dens(current,length_of_cathode)
eta = eta(charge,mass)
velocity = velocity(eta,voltage)
conv_deg_to_rad = np.pi/180
angle = 0 * conv_deg_to_rad
x0_const = x0_const(eta, current_dens, voltage, B_field, B_field_cathode, half_thick)
R_const = R_const(half_thick, x0_const, velocity, angle, B_field)
lambda_const = lambda_const(eta, voltage ,B_field)
phi_const = phi_const(x0_const, half_thick, velocity, angle, eta, B_field) + phi_shift
print(x0_const)
steps_z = 100
position_z = np.arange(0,end_z-start_z,(end_z-start_z)/steps_z) # points in z direction, from 0 to 0.01 m with step 0,00001 m
contour = contour( position_z , x0_const, R_const, lambda_const, phi_const) # countour calculation, m
contour2 = contour_2(position_z, half_thick, current_dens, mass, charge, velocity, B_field)
h5 = h5py.File( filename , mode="r") # read h5 file
plt.tick_params(axis='both', which='major', labelsize=18)
plt.xlabel("Z position, mm",fontsize=18)
plt.ylabel("X position, mm",fontsize=18)
#plt.ylim(0.0002,0.0003)
x= | np.array([]) | numpy.array |
import sys, os
sys.path.append(os.pardir)
import numpy as np
from lib.functions import sigmoid, softmax, cross_entropy
class MatMulLayer():
def __init__(self):
self.X = None
self.W = None
def forward(self, X, W):
Y = np.dot(X, W)
self.X = X
self.W = W
return Y
def backward(self, dY):
dX = np.dot(dY, self.W.T)
dW = np.dot(self.X.T, dY)
return dX, dW
class MatAddLayer():
def forward(self, X, b):
Y = X + b
return Y
def backward(self, dY):
dA = dY
db = np.sum(dY, axis=0)
return dA, db
class DenseLayer():
def __init__(self, W, b):
self.W = W
self.b = b
self.dW = None
self.db = None
self.mat_mul_layer = MatMulLayer()
self.mat_add_layer = MatAddLayer()
def forward(self, X):
Y = self.mat_add_layer.forward(self.mat_mul_layer.forward(X, self.W), self.b)
return Y
def backward(self, dY):
_, self.db = self.mat_add_layer.backward(dY)
dX, self.dW = self.mat_mul_layer.backward(dY)
return dX
class ConvolutionLayer:
def __init__(self, W, b, padding=0, stride=1):
self.W = W
self.b = b
self.padding = padding
self.stride = stride
self.X = None
self.X_col = None
self.W_col = None
self.dW = None
self.db = None
def forward(self, X):
self.X = X
N_batch, H_in, W_in, C_in = X.shape
H_filter, W_filter, C_in, C_out = self.W.shape
H_out = (H_in + 2 * self.padding - H_filter) // self.stride + 1
W_out = (W_in + 2 * self.padding - W_filter) // self.stride + 1
if self.padding > 0:
X = np.pad(X, ((0, 0), (self.padding, self.padding), (self.padding, self.padding), (0, 0)), 'constant')
X_col = np.zeros((N_batch * H_out * W_out, H_filter * W_filter * C_in))
X_col_row_index = 0
for n_batch in range(N_batch): # TODO: Maybe I can remove this loop over N_batch?
for h in range(H_out):
for w in range(W_out):
h_start = h * self.stride
h_end = h_start + H_filter
w_start = w * self.stride
w_end = w_start + W_filter
X_slice = X[n_batch, h_start:h_end, w_start:w_end, :].transpose(2, 0, 1)
X_col[X_col_row_index, :] = X_slice.reshape(1, -1)
X_col_row_index += 1 # X_col_row_index = n_batch * (H_out * W_out) + h * W_out + w
W_col = self.W.transpose(2, 0, 1, 3).reshape(-1, C_out)
Y_col = np.dot(X_col, W_col)
Y = Y_col.reshape(N_batch, H_out, W_out, C_out) + self.b
self.X_col = X_col
self.W_col = W_col
return Y
def backward(self, dY):
N_batch, H_in, W_in, C_in = self.X.shape
H_filter, W_filter, _, C_out = self.W.shape
_, H_out, W_out, _ = dY.shape
# dY
dY_col = dY.reshape(-1, C_out)
# db
db = np.sum(dY, axis=(0, 1, 2))
# dW
dW_col = np.dot(self.X_col.T, dY_col)
dW = dW_col.reshape(C_in, H_filter, W_filter, C_out).transpose(1, 2, 0, 3)
# dX
dX_col = np.dot(dY_col, self.W_col.T)
dX = np.zeros((N_batch, H_in + 2 * self.padding, W_in + 2 * self.padding, C_in))
dX_col_row_index = 0
for n_batch in range(N_batch):
for h in range(H_out):
for w in range(W_out):
h_start = h * self.stride
h_end = h_start + H_filter
w_start = w * self.stride
w_end = w_start + W_filter
dX_col_slice = dX_col[dX_col_row_index, :].reshape(C_in, H_filter, W_filter).transpose(1, 2, 0)
dX[n_batch, h_start:h_end, w_start:w_end, :] += dX_col_slice
dX_col_row_index += 1 # dX_col_row_index = n_batch * (H_out * W_out) + h * W_out + w
if self.padding > 0:
dX = dX[:, self.padding:-self.padding, self.padding:-self.padding, :]
self.dW = dW
self.db = db
return dX
class MaxPoolingLayer:
def __init__(self, stride):
self.stride = stride
self.X = None
def forward(self, X):
N_batch, H_in, W_in, C_in = X.shape
H_out = H_in // self.stride
W_out = W_in // self.stride
Y = np.zeros((N_batch, H_out, W_out, C_in))
for h in range(H_out):
h_start = h * self.stride
h_end = h_start + self.stride
for w in range(W_out):
w_start = w * self.stride
w_end = w_start + self.stride
X_slice = X[:, h_start:h_end, w_start:w_end, :]
Y[:, h, w, :] = np.max(X_slice, axis=(1, 2))
self.X = X
return Y
def backward(self, dY):
N_batch, H_in, W_in, C_in = self.X.shape
H_out = H_in // self.stride
W_out = W_in // self.stride
dX = np.zeros_like(self.X)
for n_batch in range(N_batch):
for h in range(H_out):
for w in range(W_out):
h_start = h * self.stride
h_end = h_start + self.stride
w_start = w * self.stride
w_end = w_start + self.stride
current_dY = dY[n_batch, h, w, :]
X_slice = self.X[n_batch, h_start:h_end, w_start:w_end, :]
flat_X_slice_by_channel = X_slice.transpose(2, 0, 1).reshape(C_in, -1)
max_index = np.argmax(flat_X_slice_by_channel, axis=1)
gradient = | np.zeros_like(flat_X_slice_by_channel) | numpy.zeros_like |
import numpy as np
def get_HNF_diagonals(n):
"""Finds the diagonals of the HNF that reach the target n value.
Args:
n (int): The target determinant for the HNF.
Retruns:
diags (list of lists): The allowed values of the determinant.
"""
diags = []
for i in range(1,n+1):
if not n%i == 0:
continue
else:
q = n/i
for j in range(1,q+1):
if not q%j == 0:
continue
else:
diags.append([i,j,q/j])
return diags
def forms_group(gens,pg):
"""Tests if the given generators forms a group.
Args:
gens (list of list): The generators to check.
pg (list of list): The group the generators form.
Returns:
corret_gens (bool): True if the generators form the group.
"""
correct_gens = False
group = []
for i in gens:
for j in gens:
test = np.matmul(i,j)
in_group = False
for k in group:
if np.allclose(test,k):
in_group = True
if not in_group:
group.append(test)
growing = True
while growing:
nfound = 0
for i in gens:
for j in group:
test = np.matmul(i,j)
in_group = False
for k in group:
if np.allclose(test,k):
in_group = True
if not in_group:
group.append(test)
nfound += 1
if nfound == 0:
growing = False
if not len(pg) == len(group):
correct_gens = False
else:
for i in pg:
in_group = False
for k in group:
if np.allclose(i,k):
correct_gens = True
break
if correct_gens == False:
break
return correct_gens
def find_gens_of_pg(pg):
"""This subroutine finds the generators of the point group.
Args:
pg (list of list): A list of the matrix form of the point group.
Returns:
gens (list of list): Those operations that will generate the
remainder of the group.
"""
from itertools import combinations
n_gens = 1
found_gens = False
while not found_gens:
possible_gens = list(combinations(range(len(pg)),r=n_gens))
for test in possible_gens:
test_gens = []
for i in test:
test_gens.append(pg[i])
if forms_group(test_gens,pg):
gens = test_gens
found_gens = True
break
n_gens += 1
return gens
def div_HNF(lat,n):
"""Finds the HNFs that preserve the symmetry of the lattice.
Args:
lat (numpy.ndarray): The vectors (as rows) of the parent lattice.
n (int): The volume factor for the supercell.
Returns:
HNFs (list of lists): The HNFs the preserve the symmetry.
"""
from phenum.symmetry import _get_lattice_pointGroup
diags = get_HNF_diagonals(n)
pg = _get_lattice_pointGroup(lat)
gens = find_gens_of_pg(pg)
# transpose the lattice so that it has the right form for the rest of the
# operations.
lat = np.transpose(lat)
lat_gens = []
for g in gens:
temp = np.matmul(np.linalg.inv(lat),np.matmul(g,lat))
lat_gens.append(np.transpose(temp))
x11 = []
x12 = []
x13 = []
x21 = []
x22 = []
x23 = []
x31 = []
x32 = []
x33 = []
for g in lat_gens:
# print("g",g)
x11.append(g[0][0])
x12.append(g[0][1])
x13.append(g[0][2])
x21.append(g[1][0])
x22.append(g[1][1])
x23.append(g[1][2])
x31.append(g[2][0])
x32.append(g[2][1])
x33.append(g[2][2])
x11 = np.array(x11)
x12 = np.array(x12)
x13 = np.array(x13)
x21 = np.array(x21)
x22 = np.array(x22)
x23 = np.array(x23)
x31 = np.array(x31)
x32 = np.array(x32)
x33 = np.array(x33)
count = 0
HNFs = []
for diag in diags:
print("diag",diag)
a = diag[0]
c = diag[1]
f = diag[2]
# a divides tests
if np.allclose((x13*f)%a,0):
d = None
e = None
b = None
if np.allclose(x13,0) and not np.allclose(x12,0):
# d and e are unknown and b=0.
if not np.allclose((x12*c)%a,0):
# print("c cond",(x12*c)%a)
continue
b = 0
al1 = b*x12/a
al2 = c*x12/a
al3 = f*x13/a
tHNFs = cdivs(a,b,c,d,e,f,al1,al2,al3,x11,x21,x22,x23,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
count += 1
elif np.allclose(x12,0) and not np.allclose(x13,0):
# b is unkown but d and e can have same values.
vals = []
N = 0
xt = x13[np.nonzero(x13)]
val = np.unique(N*a/xt)
while any(abs(val) < f):
for v in val:
if v < f:
vals.append(v)
N += 1
val = np.unique(N*a/xt)
for d in vals:
for e in vals:
al1 = d*x13/a
al2 = e*x13/a
al3 = f*x13/a
tHNFs = cdivs(a,b,c,d,e,f,al1,al2,al3,x11,x21,x22,x23,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
count += 1
else:
for e in range(f):
if np.allclose((c*x12 +e*x13)%a,0):
for b in range(c):
for d in range(f):
if np.allclose((b*x12+d*x13)%a,0):
al1 = (b*x12+d*x13)/a
al2 = (c*x12+e*x13)/a
al3 = f*x13/a
tHNFs = cdivs(a,b,c,d,e,f,al1,al2,al3,x11,x21,x22,x23,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
count += 1
else:
continue
else:
continue
else:
# print("f cond")
continue
return HNFs
def fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33):
"""Finds the f divides conditions for the symmetry preserving HNFs.
Args:
a (int): a from the HNF.
b (int): b from the HNF.
c (int): c from the HNF.
d (int): d from the HNF.
e (int): e from the HNF.
f (int): f from the HNF.
al1 (numpy.array): array of alpha1 values from write up.
al2 (numpy.array): array of alpha2 values from write up.
be1 (numpy.array): array of beta1 values from write up.
be2 (numpy.array): array of beta2 values from write up.
x11 (numpy.array): array of pg values for x(1,1) spot.
x22 (numpy.array): array of pg values for x(2,2) spot.
x31 (numpy.array): array of pg values for x(3,1) spot.
x32 (numpy.array): array of pg values for x(3,2) spot.
x33 (numpy.array): array of pg values for x(3,3) spot.
Returns:
HNFs (list of lists): The symmetry preserving HNFs.
"""
# print("***************enter fdivs")
# print("b: ",b," d: ",d," e: ",e)
HNFs = []
if b == None and d == None and e == None:
xvar1 = (x33-x22-be2)
xvar2 = (x33-x11-al1)
for b in range(c):
for e in range(f):
if not np.allclose(xvar2,0):
N = min(np.round((a*x31+b*x32-be1*e)/f))
xt = xvar2[np.nonzero(xvar2)]
val = np.unique(np.reshape(np.outer(N*f-a*x31-b*x32+be1*e,1/xt),len(xt)*len(x32)))
while any(abs(val)<f):
for v in val:
if v < f and v >= 0 and np.allclose(v%1,0):
d = v
f1 = a*x31+b*x32+d*var2-be1*e
f2 = c*x32-d*al2+e*(x33-x33-be2)
if np.allclose(f1%f,0) and np.allclose(f2%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
N += 1
val = np.unique(np.reshape(np.outer(N*f-a*x31-b*x32+be1*e,1/xt),len(xt)*len(x32)))
elif not np.allclose(al2,0):
N = max(np.round((c*x32+e*var1)/f))
at = al2[np.nonzero(al2)]
val = np.unique(np.reshape(np.outer(-N*f+c*x32+e*var1,1/at),len(x32)*len(at)))
while any(abs(val)<f):
for v in val:
if v < f and v >= 0 and np.allclose(v%1,0):
d = v
f1 = a*x31+b*x32+d*var2-be1*e
f2 = c*x32-d*al2+e*(x33-x33-be2)
if np.allclose(f1%f,0) and np.allclose(f2%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
N -= 1
val = np.unique(np.reshape(np.outer(-N*f+c*x32+e*var1,1/at),len(x32)*len(at)))
else:
for d in range(f):
f1 = a*x31+b*x32+d*var2-be1*e
f2 = c*x32-d*al2+e*(x33-x33-be2)
if np.allclose(f1%f,0) and np.allclose(f2%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
elif b == None:
f2 = c*x32-d*al2+e*(x33-x22-be2)
if np.allclose(f2%f,0):
if not np.allclose(x32,0):
N = min(np.round(a*x31+d*(x33-x11-al1)/f))
xt = x32[np.nonzero(x32)]
val = np.unique(np.reshape(np.outer(N*f-a*x31-d*(x33-x11-al1),1/xt),len(x33)*len(xt)))
while any(abs(val)<c):
for v in val:
if v<c and v>=0 and np.allclose(v%1,0):
b = v
f1 = a*x32 + b*x32 + e*be1 +d*(x33-x11-al1)
if np.allclose(f1%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
N += 1
val = np.unique(np.reshape(np.outer(N*f-a*x31-d*(x33-x11-al1),1/xt),len(x33)*len(xt)))
else:
for b in range(c):
f1 = a*x32 + b*x32 + e*be1 +d*(x33-x11-al1)
if np.allclose(f1%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
elif d==None and e == None:
for e in range(f):
if not np.allclose(xvar2,0):
N = min(np.round((a*x31+b*x32-be1*e)/f))
xt = xvar2[np.nonzero(xvar2)]
val = np.unique(np.reshape(np.outer(N*f-a*x31-b*x32+be1*e,1/xt),len(xt)*len(x32)))
while any(abs(val)<f):
for v in val:
if v < f and v >= 0 and np.allclose(v%1,0):
d = v
f1 = a*x31+b*x32+d*var2-be1*e
f2 = c*x32-d*al2+e*(x33-x33-be2)
if np.allclose(f1%f,0) and np.allclose(f2%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
N += 1
val = np.unique(np.reshape(np.outer(N*f-a*x31-b*x32+be1*e,1/xt),len(xt)*len(x32)))
elif not np.allclose(al2,0):
N = max(np.round((c*x32+e*var1)/f))
at = al2[np.nonzero(al2)]
val = np.unique(np.reshape(np.outer(-N*f+c*x32+e*var1,1/at),len(x32)*len(at)))
while any(abs(val)<f):
for v in val:
if v < f and v >= 0 and np.allclose(v%1,0):
d = v
f1 = a*x31+b*x32+d*var2-be1*e
f2 = c*x32-d*al2+e*(x33-x33-be2)
if np.allclose(f1%f,0) and np.allclose(f2%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
N -= 1
val = np.unique(np.reshape(np.outer(-N*f+c*x32+e*var1,1/at),len(x32)*len(at)))
else:
for d in range(f):
f1 = a*x31+b*x32+d*var2-be1*e
f2 = c*x32-d*al2+e*(x33-x33-be2)
if np.allclose(f1%f,0) and np.allclose(f2%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
else:
if e==None or d==None or b==None:
print("*****************ERROR IN fdivs**************")
else:
f2 = c*x32-d*al2+e*(x33-x22-be2)
f1 = a*x31 + b*x32 + e*be1 +d*(x33-x11-al1)
# if np.allclose(e,1) and np.allclose(d,1):
# print("e: ",e," d: ",d," b: ",b)
# print("x31: ",x31)
# print("x32: ",x32)
# print("al2: ",al2)
# print("x33: ",x33)
# print("x22: ",x22)
# print("be2: ",be2)
# print("al1: ",al1)
# print("f1: ",f1)
# print("f2: ",f2)
if np.allclose(f1%f,0) and np.allclose(f2%f,0):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
HNFs.append(HNF)
# print("***********exit fdivs**************")
return HNFs
def cdivs(a,b,c,d,e,f,al1,al2,al3,x11,x21,x22,x23,x31,x32,x33):
"""Finds the c divides conditions for the symmetry preserving HNFs.
Args:
a (int): a from the HNF.
b (int): b from the HNF.
c (int): c from the HNF.
d (int): d from the HNF.
e (int): e from the HNF.
f (int): f from the HNF.
al1 (numpy.array): array of alpha1 values from write up.
al2 (numpy.array): array of alpha2 values from write up.
al3 (numpy.array): array of alpha3 values from write up.
x11 (numpy.array): array of pg values for x(1,1) spot.
x21 (numpy.array): array of pg values for x(2,1) spot.
x22 (numpy.array): array of pg values for x(2,2) spot.
x23 (numpy.array): array of pg values for x(2,3) spot.
x31 (numpy.array): array of pg values for x(3,1) spot.
x32 (numpy.array): array of pg values for x(3,2) spot.
x33 (numpy.array): array of pg values for x(3,3) spot.
Returns:
HNFs (list of lists): The symmetry preserving HNFs.
"""
HNFs = []
if np.allclose(x23,0):
if b == None:
# find the b values, d and e still unkown
if not np.allclose(al3, 0):
N=0
at = al3[np.nonzero(al3)]
val = np.unique(N*c/at)
while any(abs(val) <c):
for v in val:
if v < c and v >= 0 and np.allclose(v%1==0):
b = v
c1 = a*x21 + b*(x22-al1-x11)
c2 =(-b*al2)
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 =c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N += 1
val = np.unique(N*c/at)
elif not np.allclose(al2,0):
N=0
at = al2[np.nonzero(al2)]
val = np.unique(N*c/at)
while any(abs(val) <c):
for v in val:
if v < c and v>=0 and np.allclose(v%1,0):
b = v
c1 = a*x21 + b*(x22-al1-x11)
c3 =(-b*al3)
if np.allclose(c1%c,0) and np.allclose(c3%c,0):
be1 = c1/c
be2 =-b*al2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N += 1
val = np.unique(N*c/at)
else:
if not np.allclose((x22-x11-al1),0):
N=0
xt = (x22-x11-al1)
xt = xt[np.nonzero(xt)]
val = np.unique(np.reshape(np.outer(N*c-a*x21,1/xt),len(x21)*len(xt)))
while any(abs(val) <c):
for v in val:
if v < c and v>=0 and np.allclose(v%1,0):
b = v
c2 = -b*al2
c3 =(-b*al3)
if np.allclose(c2%c,0) and np.allclose(c3%c,0):
be1 = (a*x21+b*(x22-x11-al1))/c
be2 =-b*al2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in HNFs:
HNFs.append(t)
N += 1
xt = (x22-x11-al1)
xt = xt[np.nonzero(xt)]
val = np.unique(np.reshape(np.outer(N*c-a*x21,1/xt),len(x21)*len(xt)))
else:
c1 = a*x21
c2 = 0
c3 = 0
if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0):
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in HNFs:
HNFs.append(t)
else:
c1 = a*x21 + b*(x22-al1-x11)
c2 = (-b*al2)
c3 = (-b*a13)
if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0):
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in HNFs:
HNFs.append(t)
else:
if np.allclose(al3,0):
if np.allclose((f*x23)%c,0):
if b == None and e == None and d == None:
if np.allclose(al3,0) and np.allclose(al2,0) and np.allclose(al3,0):
N = 0
xt = x23[np.nonzero(x23)]
val = np.unique(N*c/xt)
while any(abs(val)<f):
for v in val:
if v <f and v>=0 and np.allclose(v%1,0):
e = v
for b in range(c):
N2 =0
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer((N2*c-a*x21-b*(x22-x11)),1/xt),len(x22)*len(xt)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
d = v2
be1 = (a*x21+b*(x22-x11)+d*x23)/c
be2 = e*x23/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.appned(t)
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer((N2*c-a*x21-b*(x22-x11)),1/xt),len(x22)*len(xt)))
N += 1
val = np.unique(N*c/xt)
elif not np.allclose(al3,0):
N = max(np.round(f*x23/c))
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(al3)))
while any(abs(val) < c):
for v in val:
if v < c and v>=0 and np.allclose(v%1,0):
b = v
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22)))
while any(abs(val2)<f):
for v3 in val3:
if v3 <f and v3>=0 and np.allclose(v3%1,0):
d = v3
be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c
be2 = (e*x32-b*al2)/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(x22)*len(xt)))
N -= 1
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
else:
for b in range(c):
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2 >= 0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
while any(abs(val2)<f):
for v3 in val3:
if v3 <f and v3 >= 0 and np.allclose(v3%1,0):
d = v3
be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c
be2 = (e*x32-b*al2)/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(al2)*len(xt)))
elif b == None:
if not np.allclose(al3,0):
N = max(np.round(f*x23/c))
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
while any(abs(val) < c):
for v in val:
if v < c and v>= 0 and np.allclose(v%1,0):
b = v
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N -= 1
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
elif not np.allclose(al2,0):
N = max(np.round(e*x23/c))
at = al2[np.nonzero(al2)]
val = np.unique(np.reshape(np.outer(-N*c+e*x23,1/at),len(x23)*len(at)))
while any(abs(val) < c):
for v in val:
if v < c and v>= 0 and np.allclose(v%1,0):
b = v
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N -= 1
at = al2[np.nonzero(al2)]
val = np.unique(np.reshape(np.outer(-N*c+e*x23,1/at),len(x23)*len(at)))
else:
if not np.allclose((x22-x11-al1),0):
N = min(np.round((a*x21-d*x23)/c))
xt = (x22-x11-al1)
xt = xt[np.nonzero(xt)]
val = np.unique(np.reshape(np.outer(N*c-a*x21sd*x23,1/xt),len(x23)*len(xt)))
while any(abs(val) < c):
for v in val:
if v < c and v>=0 and np.allclose(v%1,0):
b = v
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N += 1
xt = (x22-x11-al1)
xt = xt[np.nonzero(xt)]
val = np.unique(np.reshape(np.outer(N*c-a*x21sd*x23,1/xt),len(x23)*len(xt)))
else:
c1 = a*x21+d*x23
c2 = e*x23
c3 = f*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0):
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
elif d == None and e == None:
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
while any(abs(val3)<f):
for v3 in val3:
if v3 <f and v3>=0 and np.allclose(v3%1,0):
d = v3
be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c
be2 = (e*x32-b*al2)/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
else:
c1 = a*x21+b*(x22-al1-x11)+d*x23
c2 = -b*al2+e*x23
c3 = -b*al3+f*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
# else:
# print("f: ",f)
# print("c: ",c)
# print("x32: ",x32)
# print("failed f*x32/c")
else:
if b==None and d==None and e==None:
N = max(np.round(f*x23/c))
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
while any(abs(val) < c):
for v in val:
if v < c and v>= 0 and np.allclose(v%1,0):
b = v
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
while any(abs(val3)<f):
for v3 in val3:
if v3 <f and v3>=0 and np.allclose(v3%1,0):
d = v3
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
N -= 1
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
elif b==None:
N = max(np.round(f*x23/c))
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
while any(abs(val) < c):
for v in val:
if v < c and v>= 0 and np.allclose(v%1,0):
b = v
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N -= 1
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
elif d==None and e==None:
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
while any(abs(val3)<f):
for v3 in val3:
if v3 <f and v3>=0 and np.allclose(v3%1,0):
d = v3
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape( | np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt) | numpy.outer |
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from tfunet.image.utils import BaseDataProvider
class GrayScaleDataProvider(BaseDataProvider):
channels = 1
n_class = 2
def __init__(self, nx, ny, **kwargs):
super(GrayScaleDataProvider, self).__init__()
self.nx = nx
self.ny = ny
self.kwargs = kwargs
rect = kwargs.get("rectangles", False)
if rect:
self.n_class = 3
def _next_data(self):
return create_image_and_label(self.nx, self.ny, **self.kwargs)
class RgbDataProvider(BaseDataProvider):
channels = 3
n_class = 2
def __init__(self, nx, ny, **kwargs):
super(RgbDataProvider, self).__init__()
self.nx = nx
self.ny = ny
self.kwargs = kwargs
rect = kwargs.get("rectangles", False)
if rect:
self.n_class = 3
def _next_data(self):
data, label = create_image_and_label(self.nx, self.ny, **self.kwargs)
return to_rgb(data), label
def create_image_and_label(nx, ny, cnt=10, r_min=5, r_max=50, border=92, sigma=20, rectangles=False):
image = np.ones((nx, ny, 1))
label = np.zeros((nx, ny, 3), dtype=np.bool)
mask = np.zeros((nx, ny), dtype=np.bool)
for _ in range(cnt):
a = np.random.randint(border, nx - border)
b = np.random.randint(border, ny - border)
r = np.random.randint(r_min, r_max)
h = np.random.randint(1, 255)
y, x = np.ogrid[-a:nx - a, -b:ny - b]
m = x * x + y * y <= r * r
mask = np.logical_or(mask, m)
image[m] = h
label[mask, 1] = 1
if rectangles:
mask = np.zeros((nx, ny), dtype=np.bool)
for _ in range(cnt // 2):
a = np.random.randint(nx)
b = np.random.randint(ny)
r = np.random.randint(r_min, r_max)
h = np.random.randint(1, 255)
m = np.zeros((nx, ny), dtype=np.bool)
m[a:a + r, b:b + r] = True
mask = np.logical_or(mask, m)
image[m] = h
label[mask, 2] = 1
label[..., 0] = ~(np.logical_or(label[..., 1], label[..., 2]))
image += np.random.normal(scale=sigma, size=image.shape)
image -= np.amin(image)
image /= np.amax(image)
if rectangles:
return image, label
else:
return image, label[..., 1]
def to_rgb(img):
img = img.reshape(img.shape[0], img.shape[1])
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
blue = np.clip(4 * (0.75 - img), 0, 1)
red = | np.clip(4 * (img - 0.25), 0, 1) | numpy.clip |
# Large amount of credit goes to:
# https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py
# which I've used as a reference for this implementation
from __future__ import print_function, division
from functools import partial
import json
import os
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import mnist
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.layers.merge import _Merge
from keras.models import model_from_json, Sequential, Model
from keras.optimizers import RMSprop
from keras_gan.gan_base import GANBase
class RandomWeightedAverage(_Merge):
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
def wasserstein_loss(y_true, y_pred):
return K.mean(y_true * y_pred)
def gradient_penalty_loss(_, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
class ModelBuilder(object):
def build_layers(self):
raise NotImplemented
def build(self):
model = Sequential()
self.build_layers(model)
input_layer = Input(shape=(self.input_shape,))
output_layer = model(input_layer)
return Model(input_layer, output_layer)
class WGANGPGeneratorBuilder(ModelBuilder):
def __init__(self,
input_shape,
initial_n_filters=128,
initial_height=7,
initial_width=7,
n_layer_filters=(128, 64),
channels=1):
"""Example usage:
builder = WGANGPGeneratorBuilder()
generator_model = builder.build()
:param input_shape:
:param initial_n_filters:
:param initial_height:
:param initial_width:
:param n_layer_filters:
:param channels:
"""
self.input_shape = input_shape
self.initial_n_filters = initial_n_filters
self.initial_height = initial_height
self.initial_width = initial_width
self.n_layer_filters = n_layer_filters
self.initial_layer_shape = (self.initial_height, self.initial_width, self.initial_n_filters)
self.channels = channels
def build_first_layer(self, model):
model.add(Dense( | np.prod(self.initial_layer_shape) | numpy.prod |
import inaccel.coral as inaccel
import numpy as np
import time
class StereoBM:
def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None, distC_r=None, irA_l=None, irA_r=None, bm_state=None ):
# allocate mem for camera parameters for rectification and bm_state class
with inaccel.allocator:
if cameraMA_l is None:
self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_l_fl = | np.array(cameraMA_l, dtype=np.float32) | numpy.array |
# TODO:
# - Check ros dbw node to make sure all vehicle states are available (pose, speed, yaw rate)
from gekko import GEKKO
import numpy as np
from scipy import interpolate
from math import pi
import rospy
class LateralMPC(object):
def __init__(self, vehicle_mass, wheel_base, max_steer_angle, steer_ratio):
self.vehicle_mass = vehicle_mass
self.wheel_base = wheel_base
self.steer_ratio = steer_ratio
self.front_to_cg = 0.35*wheel_base
self.rear_to_cg = wheel_base - self.front_to_cg
self.yaw_inertial_moment = 2.86*vehicle_mass - 1315
self.max_steer = max_steer_angle
self.min_steer = -max_steer_angle
self.front_cornering_stiffness = 867*180/pi
self.rear_cornering_stiffness = 867*180/pi
self.pred_horizon = 20
self.pred_time = 0.1
self.ctrl_horizon = 1
def get_steering(self, current_steer, current_x, current_y, current_psi, current_velocity, current_lateral_velocity, current_yaw_rate, trajectory_x, trajectory_y, trajectory_psi):
# Translate vehicle and trajectory points to trajectory frame
x_t = trajectory_x[0]
y_t = trajectory_y[0]
current_x -= x_t
current_y -= y_t
for i in range(len(trajectory_x)):
trajectory_x[i] -= x_t
trajectory_y[i] -= y_t
# Rotate vehicle and trajectory points clockwise to trajectory frame
theta = -np.arctan2(trajectory_y[1], trajectory_x[1])
x0 = current_x*np.cos(theta) - current_y*np.sin(theta)
y0 = current_x*np.sin(theta) + current_y*np.cos(theta)
psi0 = current_psi + theta
for i in range(len(trajectory_x)):
trajectory_x[i] = trajectory_x[i]*np.cos(theta) - trajectory_y[i]*np.sin(theta)
trajectory_y[i] = trajectory_x[i]*np.sin(theta) + trajectory_y[i]* | np.cos(theta) | numpy.cos |
import numpy as np
from scipy.integrate import quad
from scipy.special import binom
from scipy.special import gamma
from scipy.special import gammainc
from scipy.stats import binom
from numba import njit
def get_thetami_mat(mmax, beta, f=lambda m: 1, K=1, alpha=2, tmin=1, T=np.inf):
#uses an exponential dose distribution
#thetami = thetam(i/m-1)
thetami = np.zeros((mmax+1,mmax+1))
Z = (tmin**(-alpha)-T**(-alpha))/alpha
for m in range(2,mmax+1):
for i in range(1,m):
tau_c = K*(m-1)/(beta*i*f(m))
thetami[m,i] += np.exp(-tau_c)-np.exp(-tau_c/T)*T**(-alpha)
thetami[m,i] += tau_c**(-alpha)*gamma(alpha+1)*(\
gammainc(alpha+1,tau_c)-
gammainc(alpha+1,tau_c/T))
return thetami/(Z*alpha)
@njit
def get_binom(N,p):
if N > 0:
pmf = np.zeros(N+1)
pmf[0] = (1-p)**N
for i in range(N):
pmf[i+1] = pmf[i]*(N-i)*p/((i+1)*(1-p))
else:
pmf = np.array([1.])
return pmf
@njit
def get_thetam_bar(rho_bar,thetami,mvec,mmax):
thetam_bar = | np.zeros(mmax+1) | numpy.zeros |
# -*- coding: UTF-8 -*-
import sys
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from rklib.utils import dirDetectCreate
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
from matplotlib import font_manager as fm
from matplotlib_venn import venn2,venn3
import itertools
from rblib import mutilstats
import scipy.cluster.hierarchy as sch
from rklib import utils
# for projection='3d'
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.hierarchy import fcluster; import pandas
###动态设置字体
###
from matplotlib.patches import Polygon
# to get kmeans and scipy.cluster.hierarchy
from scipy.cluster.vq import *
from scipy.cluster.hierarchy import *
###
from matplotlib.colors import LogNorm
##kmeans归一化处理 from scipy.cluster.vq import whiten
from scipy.cluster.vq import whiten
#mpl.style.use('ggplot')
from rblib import mplconfig
from rblib.mplconfig import styles,color_grad,rgb2hex,makestyles
def test_iter(num):
fig = plt.figure(dpi=300)
x = 1
y = 1
ax = fig.add_subplot(111)
ret_color,ret_lines,ret_marker = styles(num)
for i in range(num):
ax.plot([x,x+1,x+2,x+3,x+4],[y,y,y,y,y],color=ret_color[i],linestyle=ret_lines[i],marker=ret_marker[i],markeredgecolor=ret_color[i],markersize=12,alpha=0.8)
y += 1
plt.savefig("test_style.png",format='png',dpi=300)
plt.clf()
plt.close()
return 0
def admixture_plot():
return 0
def plot_enrich(resultmark,resultothers,fig_prefix,xlabel,ylabel):
fig = plt.figure(figsize=(8,6),dpi=300)
num = len(resultmark) + 1
ret_color,ret_lines,ret_marker = styles(num)
ax = fig.add_subplot(111)
maxlim = 0
for i in range(num-1):
#ax.plot(resultmark[i][1],resultmark[i][2],ret_color[i]+ret_marker[i],label=resultmark[i][0],markeredgecolor=ret_color[i],markersize=8,alpha=0.7)
ax.plot(resultmark[i][1],resultmark[i][2],color=ret_color[i],linestyle='',marker=ret_marker[i],label=resultmark[i][0],markeredgecolor=ret_color[i],markersize=10,alpha=0.7)
if resultmark[i][2] > maxlim:
maxlim = resultmark[i][2]
xarr = []
yarr = []
for ret in resultothers:
xarr.append(ret[0])
yarr.append(ret[1])
ax.plot(xarr,yarr,'ko',label="others",markeredgecolor='k',markersize=3,alpha=0.5)
art = []
lgd = ax.legend(bbox_to_anchor=(1.02, 1),loc=0,borderaxespad=0,numpoints=1,fontsize=6)
art.append(lgd)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_ylim(0,maxlim+2)
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',additional_artists=art,bbox_inches="tight",dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',additional_artists=art,bbox_inches="tight",dpi=300)
plt.clf()
plt.close()
return 0
# 1425 ax1.scatter(xy[:,0],xy[:,1],c=colors)
#1426 ax1.scatter(res[:,0],res[:,1], marker='o', s=300, linewidths=2, c='none')
#1427 ax1.scatter(res[:,0],res[:,1], marker='x', s=300, linewidths=2)
def verrorbar(ynames,data,fig_prefix="simerrorbar",figsize=(5,4),log=False):
# data n , mean , lower, upper, sig
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
yaxis_locations = list(range(len(ynames)))
ax.errorbar(data[:,0], yaxis_locations, xerr=np.transpose(data[:,[1,2]]),markeredgewidth=1.25,elinewidth=1.25,capsize=3,fmt="s",c="k",markerfacecolor="white")
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(ynames)
if log == True:
ax.set_xscale("log")
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
return 0
def sim_scatter(X,Y,xlabel,ylabel,alpha=0.3,fig_prefix="simscatter"):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
ax.scatter(X,Y,marker='o',linewidths=0,color='gray',alpha=alpha)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def groups_scatter_flatdata(xvec,yvec,groups,xlabel,ylabel,addline=None,fig_prefix="test",alpha=0.6,colors=None,figsize=(5,4),markersize=10):
## groups is a list , like [0,0,0,0,1,1,1,1,2,2,2,2,2,4,4,4,4,4]
# ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=8)
setgroups = sorted(list(set(groups)))
xs = []
ys = []
npgroups = np.asarray(groups)
for i in setgroups:
xs.append(xvec[npgroups == i])
ys.append(yvec[npgroups == i])
group_scatter(xs,ys,setgroups,xlabel,ylabel,addline,fig_prefix,alpha,figsize=figsize,markersize=markersize)
return 0
from matplotlib.patches import Ellipse
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
def group_scatter(xs,ys,groups,xlabel,ylabel,addline=None,fig_prefix="test",alpha=0.8,colors=None,figsize=(5,4),markersize=30,addEllipse=True,xlim=None,ylim=None):
if colors == None:
colors,lines,markers = styles(len(groups))
else:
lines,markers = styles(len(groups))[1:]
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
patchs = []
nstd = 2
for i in range(len(groups)):
group = groups[i]
x = xs[i]
y = ys[i]
#patch = ax.scatter(x,y,marker=markers[i],linewidths=0,color=colors[i],alpha=alpha,s=markersize)
#patch = ax.scatter(x,y,marker=markers[i],linewidths=0,color=colors[i],alpha=alpha,s=markersize)
patch = ax.scatter(x,y,marker=markers[i],linewidths=0,color=colors[i],alpha=alpha,s=markersize)
patchs.append(patch)
if addline != None:
[x1,x2],[y1,y2] = addline[i]
ax.plot([x1,x2],[y1,y2],color=colors[i],ls='--',lw=1.0)
##
cov = np.cov(x, y)
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * nstd * np.sqrt(vals)
if addEllipse:
ell = Ellipse(xy=(np.mean(x), np.mean(y)), width=w, height=h, angle=theta, edgecolor=colors[i],alpha=1.0,facecolor='none')
ax.add_patch(ell)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
#ax.set_ylim(-1.5,1.5)
ax.legend(patchs,groups,loc=0,fancybox=False,frameon=False,numpoints=1,handlelength=0.75)
ax.grid(True,ls='--')
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def scatter2(x,y,xlabel,ylabel,addline=None,fig_prefix="test",alpha=0.3,ylog=0,xlog=0,log=0,figsize=(10,3),marker='o',linewidths=0): # line is [[x1,x2],[y1,y2]] = addline
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
colors = styles(3)[0]
ax.scatter(x,y,marker=marker,linewidths=linewidths,color=colors[0],alpha=alpha) #,label=labels[0])
if addline is not None:
[x1,x2],[y1,y2] = addline
ax.plot([x1,x2],[y1,y2],color="gray",ls='--',lw=1.0) #ax.plot(xp,yp,color=colors[n-i-1],linestyle='--',lw=1.0)
#ax.set_xlim(x1,x2)
#ax.set_ylim(y1,y2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
if log:
ax.set_yscale("log")
ax.set_xscale("log")
if ylog:
ax.set_yscale("log")
if xlog:
ax.set_xscale("log")
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def volcanoplot(siglog2fc,siglogp,totlog2fc,totlogp,xlabel = "Log2 (Fold Change)", ylabel = "-Log10(q-value)",alpha=0.3,figprefix="test"):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
ax.scatter(totlog2fc,totlogp,marker='o',linewidth=0,color='gray',alpha=alpha)
ax.scatter(siglog2fc[siglog2fc>0],siglogp[siglog2fc>0],marker='o',linewidths=0,color='#F15B6C',alpha=alpha)
ax.scatter(siglog2fc[siglog2fc<0],siglogp[siglog2fc<0],marker='o',linewidths=0,color='#2A5CAA',alpha=alpha)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
plt.savefig(figprefix+".png",format='png',dpi=300)
plt.savefig(figprefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def scatter(xother,yother,xsig,ysig,xlabel="X",ylabel="Y",labels =["No differential","Up regulated","Down regulated"] ,fig_prefix="DEGs_scatter_plot",alpha=0.3):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xother = np.asarray(xother)
yother = np.asarray(yother)
xsig = np.asarray(xsig)
ysig = np.asarray(ysig)
ax.scatter(xother,yother,marker='^',linewidths=0,color='gray',alpha=alpha,label=labels[0])
ax.scatter(xsig[ysig>xsig],ysig[ysig>xsig],marker='o',linewidths=0,color='#F15B6C',alpha=alpha,label=labels[1]) ### up
ax.scatter(xsig[xsig>ysig],ysig[xsig>ysig],marker='o',linewidths=0,color='#2A5CAA',alpha=alpha,label=labels[2]) ### down
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
ax.legend(loc=0,scatterpoints=1)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def venn_plot(datalist,setnames,fig_prefix="venn_plot",hplot=None,figsize=(5,4)):
if len(setnames) == 2:
vennfun = venn2
colors_arr = ["magenta","cyan"]
elif len(setnames) == 3:
vennfun = venn3
colors_arr = ["magenta","cyan","blue"]
else:
sys.stderr.write("[Warning] Only support 2 or 3 sets' venn plot")
return 1
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
vennfun(datalist,setnames,normalize_to=1,set_colors=colors_arr,alpha=0.3)
plt.savefig(fig_prefix+"_venn.png",format='png',dpi=300)
plt.savefig(fig_prefix+"_venn.svg",format='svg',dpi=300)
plt.clf()
plt.close()
dirDetectCreate(fig_prefix+"_venn_list")
outdir = fig_prefix+"_venn_list"
if len(setnames) == 3:
f = open(outdir+"/"+setnames[0]+".specific.lst.xls","w")
f.write("\n".join(datalist[0]-(datalist[1] | datalist[2] )))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[1]+".specific.lst.xls","w")
f.write("\n".join(datalist[1]-(datalist[0] | datalist[2] )))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[2]+".specific.lst.xls","w")
f.write("\n".join(datalist[2]-(datalist[0] | datalist[1] )))
f.write("\n")
f.close()
comb = datalist[0] & datalist[2] & datalist[1]
f = open(outdir+"/"+setnames[0]+"_and_"+setnames[1]+".lst.xls","w")
f.write("\n".join(datalist[0] & datalist[1] - comb))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[1]+"_and_"+setnames[2]+".lst.xls","w")
f.write("\n".join(datalist[1] & datalist[2] - comb))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[0]+"_and_"+setnames[2]+".lst.xls","w")
f.write("\n".join(datalist[0] & datalist[2] - comb))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[0]+"_and_"+setnames[1]+"_and_"+setnames[2]+".lst.xls","w")
f.write("\n".join(datalist[0] & datalist[2] & datalist[1] ))
f.write("\n")
f.close()
if len(setnames) == 2:
f = open(outdir+"/"+setnames[0]+".specific.lst.xls","w")
f.write("\n".join(datalist[0]-datalist[1]))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[1]+".specific.lst.xls","w")
f.write("\n".join(datalist[1]-datalist[0] ))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[0]+"_and_"+setnames[1]+".lst.xls","w")
f.write("\n".join(datalist[0] & datalist[1]))
f.write("\n")
f.close()
return 0
def kdensity(var_arr,num = 500,fun='pdf',cdfstart=-np.inf):
"""
plot theory distribution
y = P.normpdf( bins, mu, sigma)
l = P.plot(bins, y, 'k--', linewidth=1.5)
"""
if fun not in ['cdf','pdf']:
sys.stderr.write("kdensity Fun should be 'cdf' or 'pdf'")
sys.exit(1)
#idx = mutilstats.check_vecnan(var_arr)
#if idx == None:
# return [0,0],[0,0]
#kden = stats.gaussian_kde(np.asarray(var_arr)[idx])
kden = stats.gaussian_kde(np.asarray(var_arr))
#kden.covariance_factor = lambda : .25
#kden._compute_covariance()
#============ never use min and max, but use the resample data
#min_a = np.nanmin(var_arr)
#max_a = np.nanmax(var_arr)
tmpmin = []
tmpmax = []
for i in range(30):
resample_dat = kden.resample(5000)
resample_dat.sort()
tmpmin.append(resample_dat[0,4])
tmpmax.append(resample_dat[0,-5])
min_a = np.mean(tmpmin)
max_a = np.mean(tmpmax)
xnew = np.linspace(min_a, max_a, num)
if fun == 'cdf':
ynew = np.zeros(num)
ynew[0] = kden.integrate_box_1d(cdfstart,xnew[0])
for i in range(1,num):
ynew[i] = kden.integrate_box_1d(cdfstart,xnew[i])
else: ynew = kden(xnew)
return xnew,ynew
def hcluster(Xnp,samplenames,fig_prefix,figsize=(5,4)):
linkage_matrix = linkage(Xnp,'ward','euclidean')
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
#dendrogram(linkage_matrix,labels=samplenames,leaf_label_rotation=45) ## new version of scipy
dendrogram(linkage_matrix,labels=samplenames,orientation='right')
ax.grid(visible=False)
fig.tight_layout()
plt.savefig(fig_prefix+"_hcluster.png",format='png',dpi=300)
plt.savefig(fig_prefix+"_hcluster.svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_hmc_curve(X,Y,colors,classlabels,figname_prefix="out",scale=0):
#调和曲线生成Harmonic curve
#X = n x p Y is list, colors is list
n,p = X.shape
if n == len(Y) and len(Y) == len(colors):pass
else: return 1
if scale ==1:
X = whiten(X)
step = 100
t = np.linspace(-np.pi, np.pi, num=step)
f = np.zeros((n,step))
for i in range(n):
f[i,:] = X[i,0]/np.sqrt(2)
for j in range(1,p):
if j%2 == 1:
f[i,:] += X[i,j]*np.sin(int((j+1)/2)*t)
else:
f[i,:] += X[i,j]*np.cos(int((j+1)/2)*t)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
uniq_colors = []
for tmpcolor in colors:
if tmpcolor not in uniq_colors:
uniq_colors.append(tmpcolor)
idx = [colors.index(color) for color in uniq_colors]
labels = [classlabels[i] for i in idx]
for i in idx:
ax.plot(t,f[i,:],colors[i])
ax.legend(labels,loc=0)
for i in range(n):
ax.plot(t,f[i,:],colors[i])
ax.set_xlabel("$t(-\pi,\ \pi)$",fontstyle='italic')
ax.set_ylabel("$f(t)$",fontstyle='italic')
ax.grid(True)
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_simple_lr(X,Y,xlabel,ylabel,color="bo",figname_prefix="out"):
slope,intercept,rvalue,pvalue,stderr = stats.linregress(X,Y)
tmpX = np.linspace(np.min(X),np.max(X),num=50)
tmpY = tmpX*slope+intercept
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(tmpX,tmpY,'k--')
ax.grid(True,color='k',alpha=0.5,ls=':')
ax.plot(X,Y,color,alpha=0.6)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title('slope:%.3g,intercept:%.3g,r:%.3g,p:%.3g,stderr:%.3g'%(slope,intercept,rvalue,pvalue,stderr))
ax.grid(True)
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def plot_linear_regress(X,Y,xlabel,ylabel,classnum,h_uniq_colors,h_uniq_classlabels,figname_prefix="out"):
##h_uniq_classlabels = {0:'class1',1:'class2'} , 0 and 1 must be the classnum
##h_uniq_colors = {0:'r^',1:'b.'}
#plt.style.use('grayscale')
if X.size != Y.size != len(classnum):
sys.stderr("Error: X, Y should be same dimensions")
return 1
slope,intercept,rvalue,pvalue,stderr = stats.linregress(X,Y)
tmpX = np.linspace(np.min(X),np.max(X),num=50)
tmpY = tmpX*slope+intercept
uniq_classnum = list(set(classnum))
np_classnum = np.array(classnum)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(tmpX,tmpY,'k--')
ax.grid(True,color='k',alpha=0.5,ls=':')
for i in uniq_classnum:
try:
color = h_uniq_colors[i]
label = h_uniq_classlabels[i]
except:
plt.clf()
plt.close()
sys.stderr("Error: key error")
return 1
idx = np.where(np_classnum == i)
ax.plot(X[idx],Y[idx],color,label=label,alpha=0.6)
ax.legend(loc=0,numpoints=1)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title('slope:%.3g,intercept:%.3g,r:%.3g,p:%.3g,stderr:%.3g'%(slope,intercept,rvalue,pvalue,stderr))
ax.grid(True)
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
#def plot_vec_boxplot(Xvecs,fig_prefix,xlabels,ylabel,xticks_labels,outshow=1,colors=None,ylim=0):
def CNVgenome(X,Y,segY,CNVstatus,fig_prefix,xlabel,ylabel,ylim=[],markersize=10,xlim=[]):
fig = plt.figure(dpi=300,figsize=(12,1.5))
ax = fig.add_subplot(111)
idx_base = CNVstatus == 2
ax.plot(X[idx_base],Y[idx_base],'o',markeredgecolor="None",markerfacecolor="gray",alpha=0.3,markersize=markersize)
ax.plot(X[idx_base],segY[idx_base],'o',markeredgecolor = "black", markerfacecolor = "black", alpha=0.5,markersize=max(markersize-7,1))
idx_base = CNVstatus >=3
ax.plot(X[idx_base],Y[idx_base],'o',markeredgecolor="None",markerfacecolor = "red",alpha=0.3,markersize=markersize)
ax.plot(X[idx_base],segY[idx_base],'o',markeredgecolor = "black",markerfacecolor="black",alpha=0.5,markersize=max(markersize-7,1))
idx_base = CNVstatus <=1
ax.plot(X[idx_base],Y[idx_base],'o',markeredgecolor="None",markerfacecolor = "blue",alpha=0.3,markersize=markersize)
ax.plot(X[idx_base],segY[idx_base],'o',markeredgecolor = "black",markerfacecolor="black",alpha=0.5,markersize=max(markersize-7,1))
# for freec result
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if ylim: ax.set_ylim(ylim)
if xlim: ax.set_xlim(xlim)
plt.savefig(fig_prefix+".png",format='png',dpi=300);plt.savefig(fig_prefix+".svg",format='svg',dpi=300);
plt.clf();plt.close()
return 0
def plot_boxplotscatter(X,fig_prefix,xlabel,ylabel,xticks_labels,colors=None,ylim=[],scatter=1,markersize=7,figsize=(5,4)):
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
bp = ax.boxplot(X)
colors = styles(len(xticks_labels))[0]
for box in bp['boxes']:
box.set( color='#7570b3', linewidth=2)
#box.set( facecolor = '#1b9e77')
for whisker in bp['whiskers']:
whisker.set(color='#7570b3', linewidth=2)
for median in bp['medians']:
median.set(color='red', linewidth=2)
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0)
if scatter:
for i in range(len(X)):
x = np.random.normal(i+1, 0.03, size=len(X[i]))
ax.plot(x, X[i], 'o',color=colors[i] ,alpha=0.3,markersize=markersize)
ax.set_xticklabels(xticks_labels,rotation=45,ha="right")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if ylim:
ax.set_ylim(ylim)
ax.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300);plt.savefig(fig_prefix+".svg",format='svg',dpi=300);
plt.clf();plt.close()
return 0
from rblib.plotx import retain_y,retain_xy
## statplot.CNVline(xdata,ydata,freqlables,11,"Chromsome %s"%(str(chrom)),11,sys.argv[2]+".chr%s"%(str(chrom)),[rawx_raw,],-0.5,0.5)
def chrom_scatterinfo(xdata,ydata,freqlables,xlabel,ylabel,figprefix,ylimmin=None,ylimmax=None,xlimmin=None,xlimmax=None):
fig = plt.figure(figsize=(8,3),dpi=300)
ax1 = fig.add_subplot(111)
numberscatter = len(freqlables)
hcolor = mplconfig.inscolor(freqlables)
for i in range(len(freqlables)):
ax1.plot(xdata,ydata[i],color=hcolor[freqlables[i]],linestyle="-",lw=2.0,label=freqlables[i])
ax1.set_xlabel(xlabel)
if ylimmin and ylimmax:
ax1.set_ylim(ylimmin,ylimmax)
if xlimmin is not None:
ax1.set_xlim(xlimmin,xlimmax)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.set_yscale("log",nonposy='clip')
retain_xy(ax1)
ax1.legend(loc=0)
ax1.grid(True,ls="--")
fig.tight_layout()
plt.savefig(figprefix+".png",format='png',dpi=300);plt.savefig(figprefix+".svg",format='svg',dpi=300);plt.clf();plt.close();
return 0
def CNVline(xdata,ydata,zdata,xtickslabels,xlabel,ylabel,figprefix,lineplot,ylimmin=-1,ylimmax=1):
fig = plt.figure(figsize=(8,3),dpi=300)
ax1 = fig.add_subplot(111)
ax1.plot(xdata,ydata,color="#EF0000",linestyle="-",lw=2.0)
ax1.plot(xdata,zdata*1,color = "#0076AE",linestyle="-",lw=2.0)
ax1.fill(xdata,ydata,"#EF0000",xdata,zdata,"#0076AE")
#ax1.fill(xdata,zdata,color="#0076AE")
ax1.set_xlabel(xlabel)
for x in lineplot:
ax1.plot([x,x],[-1,1],color="gray",ls="--",linewidth=0.5)
ax1.set_ylim(ylimmin,ylimmax)
ax1.set_ylabel("Frequency")
retain_y(ax1)
#fig.tight_layout()
plt.savefig(figprefix+".png",format='png',dpi=300);plt.savefig(figprefix+".svg",format='svg',dpi=300);
plt.clf();plt.close()
#ax.plot(xp,yp,color=colors[n-i-1],linestyle='--',lw=1.0)
return 0
#def plot_boxplotgroup(X,groups,legendgroups,fig_prefix,xlabel,ylabel,xticks_labels,outshow=1,colors=None,ylim=1):
def plotenrich_qipao(plotdatax,figprefix,xlabel,figsize=(8,6),aratio=1.0,color="#3E8CBF"):
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
x = []
y = []
area = []
labels = []
logpvalues = []
nums = []
xx = 0
for i in plotdatax:
item,logpvalue,logqvalue,num,M = i
print(item,logqvalue)
x.append(logqvalue)
xx += 1
y.append(xx)
area.append((M*10 + 20)*aratio)
labels.append(item)
logpvalues.append(logpvalue)
nums.append(M)
if color == None:
cminstance = cm.get_cmap("Spectral") ######("Purples")
cplot = ax.scatter(x,y,s=area,alpha=0.8, c=logpvalues,cmap=cminstance,vmin=0, vmax=np.max(logpvalues),edgecolors="black",linewidths=0.5)
cb = fig.colorbar(cplot,ax=ax,fraction=0.15,shrink=0.25,aspect=6,label='-log$_{10}$p-value')
#cb.ax.yaxis.set_ticks_position('right')
#print cb.ax
else:
cplot = ax.scatter(x,y,s=area,alpha=0.8,c=color)
ax.set_xlabel(xlabel)
ax.set_yticks(np.arange(len(plotdatax))+1)
ax.set_yticklabels(labels)
ax.grid(False)
ax.set_ylim(0,len(plotdatax) + 1)
a,b = ax.get_xlim()
ax.set_xlim(a-(b-a)*0.15,b+(b-a)*0.15)
fig.tight_layout()
plt.savefig(figprefix+".png",format='png',dpi=300);plt.savefig(figprefix+".svg",format='svg',dpi=300);
plt.clf();plt.close()
return 0
def plot_scatter_qipao(x,y,pvalue,status,figprefix,xlabel,ylabel,figsize=(8,6)): # status = 1 and -1
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
pvaluetrans = np.log(pvalue) * -1 + 60
maxpvaluetrans = np.max(pvaluetrans)
minpvaluetrans = np.min(pvaluetrans)
#print pvaluetrans
#print maxpvaluetrans
#print minpvaluetrans
#print (pvaluetrans - minpvaluetrans) / (maxpvaluetrans - minpvaluetrans)
x = np.asarray(x)
y = np.asarray(y)
pvaluetransed = np.int64(4.5**((pvaluetrans - minpvaluetrans) / (maxpvaluetrans - minpvaluetrans) * 3))
#nx = np.asarray(x) + (np.random.rand(len(x))-0.5) * 0.05
#ny = np.asarray(y) + (np.random.rand(len(y))-0.5) * 0.05
#nx[nx<0] = 0
#ny[ny<0] = 0
#nx = x
#ny = y
cplot = ax.scatter(x,y,s=pvaluetransed*15,alpha=0.8,c = (np.asarray(y)-np.asarray(x))/2,cmap=cm.Spectral,edgecolors="black",linewidths=0.5)#cmap=cm.gist_earth)
#ax.plot([0,1],[0,1],'--',color="gray")
cb = fig.colorbar(cplot,ax=ax)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
min_all = min(np.min(x),np.min(y))-0.02
max_all = max(np.max(x),np.max(y))+0.02
ax.plot([min_all,max_all],[min_all,max_all],'--',color="gray")
#ax.set_xlim(-0.02,0.3)
#ax.set_ylim(-0.02,0.3)
ax.grid(True,ls='--')
fig.tight_layout()
plt.savefig(figprefix+".png",format='png',dpi=300);plt.savefig(figprefix+".svg",format='svg',dpi=300);
plt.clf();plt.close();
return 0
def adjacent_values(xmin,xmax, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, xmax)
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, xmin, q1)
return lower_adjacent_value, upper_adjacent_value
def plot_dfboxplot(df,fig_prefix,xlabel,ylabel,outshow=False,colors=None,ylim=[],markersize=8,showmeans=False,showscatter=False,figsize=(3,8),violin=0,uniq_xticklabels=None,linewidths=0.0,rotation=45):
#plt.style.use('bmh')
if uniq_xticklabels is None:
uniq_xticklabels = sorted(set(df["xlabelticks"]))
nxticklabels = len(uniq_xticklabels)
uniq_legends = sorted(set(df["group"]))
nlegends = len(uniq_legends)
if colors == None:
colors = styles(nlegends)[0]
if nlegends == 1:
colors = styles(nxticklabels)[0]
xpos = np.arange(nxticklabels)
width = 0.9/nlegends
if showscatter: alpha=0.4
else:alpha=0.5
fig = plt.figure(figsize=figsize,dpi=300) # 3,8
ax = fig.add_subplot(111)
slplot = []
for i in range(nlegends):
x = []
for j in range(nxticklabels):
tmpdata = df[ (df["xlabelticks"] == uniq_xticklabels[j]) & (df["group"] == uniq_legends[i])]["data"].values
x.append(tmpdata)
if not violin:
print(i)
bp = ax.boxplot(x,widths=width,positions=xpos+width*i,showmeans=showmeans,meanline=showmeans,notch=False,showfliers=outshow)
## violinplot(data, pos, points=20, widths=0.3, showmeans=True, showextrema=True, showmedians=True)
plt.setp(bp['boxes'], color="black",linewidth=1.0); plt.setp(bp['whiskers'], color='black',linewidth=1.0); plt.setp(bp['medians'], color='black',linewidth=1.0)
if outshow: plt.setp(bp['fliers'], color=colors[i], marker='o',markersize=6)
#box = bp['boxes']
for j in range(nxticklabels):
if nlegends > 1: ploti = i
else: ploti = j
if showscatter:
tx = x[j]
ttx = np.random.normal(j+width*i, width/10, size=len(tx))
#ax.plot(ttx, tx, 'o',color=colors[ploti] ,alpha=0.3,markersize=markersize)
ax.scatter(ttx,tx,marker='o',color=colors[ploti],alpha=alpha,s=markersize,linewidths=linewidths)
box = bp['boxes'][j]
boxX = box.get_xdata().tolist(); boxY = box.get_ydata().tolist(); boxCoords = list(zip(boxX,boxY));
boxPolygon = Polygon(boxCoords, facecolor=colors[ploti],alpha=alpha)
ax.add_patch(boxPolygon)
sp, = ax.plot([1,1],'o',color=colors[ploti])
slplot.append(sp)
else:
vp = ax.violinplot(x,xpos+width*i,widths=width,showmeans=False, showmedians=False,showextrema=False)
for j in range(nxticklabels):
if nlegends > 1: ploti = i
else: ploti = j
pc = vp['bodies'][j]
pc.set_facecolor(colors[ploti]); pc.set_edgecolor('black');pc.set_alpha(0.5)
xmin = []; xmax = []; xquartile1 = []; xmedians = []; xquartile3 = []
for xi in x:
quartile1, medians, quartile3 = np.percentile(xi, [25, 50, 75])
xmin.append(np.min(xi)); xmax.append(np.max(xi))
xquartile1.append(quartile1); xmedians.append(medians); xquartile3.append(quartile3)
whiskers = np.array([adjacent_values(x_min,x_max, q1, q3) for x_min,x_max, q1, q3 in zip(xmin,xmax, xquartile1, xquartile3)])
whiskersMin, whiskersMax = whiskers[:, 0], whiskers[:, 1]
ax.scatter(xpos+width*i, xmedians, marker='o', color='black', s=30, zorder=3)
ax.vlines(xpos+width*i, xquartile1, xquartile3, color='white', linestyle='-', lw=5)
ax.vlines(xpos+width*i, whiskersMin, whiskersMax, color='white', linestyle='-', lw=1)
sp, = ax.plot([1,1],'o',color=colors[ploti]) # ax.plot(xarr,yarr,'ko',label="others",markeredgecolor='k',markersize=3,alpha=0.5)
#sp = ax.scatter([1,1],[1,1], marker='o',color=colors[ploti])
slplot.append(sp)
"""
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True)
fig.tight_layout(rect = [0,0,1,0.9])
"""
if nlegends > 1:
ax.legend(slplot,uniq_legends,loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True,numpoints=1)
for sp in slplot: sp.set_visible(False)
ax.set_xticks(xpos+width/2*(nlegends-1))
hafmt = "right" if rotation in [0,90] else "center" ### xticklabel position set
ax.set_xticklabels(uniq_xticklabels,rotation=rotation,ha="center")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0-width*1.4/2,xpos[-1]+1-width/2)
if ylim:ax.set_ylim(ylim[0],ylim[-1])
ax.grid(False)
#ax.grid(True,axis='y')
if nlegends > 1:
fig.tight_layout(rect = [0,0,1,0.9])
else:
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300);plt.savefig(fig_prefix+".svg",format='svg',dpi=300);plt.clf();plt.close();
return 0
def plot_boxplot(Xnp,fig_prefix,xlabel,ylabel,xticks_labels,outshow=1,colors=None,ylim=1,figsize=(6,5)):
fig = plt.figure(dpi=300,figsize=figsize)
ax1 = fig.add_subplot(111)
if outshow == 1:
bp = ax1.boxplot(Xnp.T)
plt.setp(bp['boxes'], color='white')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
else:
bp = ax1.boxplot(Xnp.T,0,'')
n,p = Xnp.shape
if colors == None:
colors = color_grad(n,cm.Paired)
for i in range(n):
box = bp['boxes'][i]
boxX = box.get_xdata().tolist()
boxY = box.get_ydata().tolist()
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=colors[i])
ax1.add_patch(boxPolygon)
ax1.set_xticklabels(xticks_labels,rotation=45)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
if ylim:
ax1.set_ylim(-10,10)
ax1.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_Xscore(Xnp,classnums,uniqclassnum,uniqcolor,uniqmarker,uniqclasslabel,fig_prefix,xlabel,ylabel,zlabel=None,dim=2,figsize=(5,4),markersize=30):
#plt.style.use('grayscale')
leng = len(uniqclassnum)
Xnp = np.asarray(Xnp)
fig = plt.figure(figsize=figsize,dpi=300)
if dim == 3:
ax1 = fig.add_subplot(111,projection ='3d')
elif dim==2:
ax1 = fig.add_subplot(111)
else:
sys.stderr.write("[ERROR] Dim '%d' plot failed\n"%dim)
return 1
for i in range(leng):
tmpclassidx = np.array(classnums) == uniqclassnum[i]
tmplabel = uniqclasslabel[i]
tmpcolor = uniqcolor[i%(len(uniqcolor))]
tmpmarker = uniqmarker[i%(len(uniqmarker))]
if dim == 2:
ax1.scatter(Xnp[tmpclassidx,0],Xnp[tmpclassidx,1],color=tmpcolor,marker=tmpmarker,label=tmplabel,alpha=0.7,s=markersize)
ax1.grid(True)
else:ax1.scatter(Xnp[tmpclassidx,0],Xnp[tmpclassidx,1],Xnp[tmpclassidx,2],color=tmpcolor,marker=tmpmarker,label=tmplabel,alpha=0.7,s=markersize) # markerfacecolor=tmpcolor
ax1.legend(loc=0,numpoints=1)
ax1.grid(True,ls='--')
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
if dim == 3 and zlabel !=None:
ax1.set_zlabel(zlabel)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_XYscore(Xnp,Y,classnums,uniqclassnum,uniqcolor,uniqmarker,uniqclasslabel,fig_prefix,xlabel,ylabel,zlabel=None,dim=2,figsize=(5,4)):
Xnp[:,dim-1] = Y[:,0]
return plot_Xscore(Xnp,classnums,uniqclassnum,uniqcolor,uniqmarker,uniqclasslabel,fig_prefix,xlabel,ylabel,zlabel,dim,figsize=figsize)
def plot_markxy(X1,Y1,X2,Y2,xlabel,ylabel,fig_prefix):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
ax.plot(X1,Y1,'b+')
ax.plot(X2,Y2,'ro')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def draw_lines(data,xlabels,legends,ylabel,fig_prefix,colors=None,markers=None,lstyles=None,figsize=(5,4),linewidth=2.0,alpha=0.8,rotation=45):
n,p = data.shape
ret_color,ret_lines,ret_marker = styles(n)
if colors is not None:
ret_color = makestyles(colors,n)
if lstyles is not None:
ret_lines = makestyles(lstyles,n)
if markers is not None:
ret_marker= makestyles(markers,n)
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
xloc = list(range(p))
for i in range(n):
tmpdata = data[i,:]
ax.plot(xloc,tmpdata,ls=ret_lines[i],color=ret_color[i],label=legends[i])
#ax.plot(xloc,tmpdata,ls=ret_lines[i],marker=ret_marker[i],markerfacecolor=ret_color[i],markeredgecolor=ret_color[i],color=ret_color[i],label=legends[i])
# ls='--',marker='.',markerfacecolor=linecolor,markeredgecolor=linecolor,color=linecolor
ax.set_ylabel(ylabel)
ax.set_xticklabels(xlabels,ha="right",rotation=rotation)
ax.set_xlim(-0.5,p-0.5)
ax.set_xticks(np.arange(0,p))
yrange = np.max(data) - np.min(data)
ax.set_ylim(np.min(data)-yrange*0.1,np.max(data)+yrange*0.1)
ax.legend(loc=0)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plotline(Xvector,Ys,fig_prefix,xlabel,ylabel,colors,legends=None,title=None,xlimmax = None,ylimmax = None, figsize=(6,4),linewidth=1.0,xlim=[]):
n,p = Ys.shape
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
if legends is not None:
leng = len(legends)
else:
leng = 0
for i in range(n):
if i < leng:
tmplabel = legends[i]
ax.plot(Xvector,Ys[i,:],colors[i],label=tmplabel,linewidth=linewidth)
else:
ax.plot(Xvector,Ys[i,:],colors[i],linewidth=linewidth)
if legends != None:
ax.legend(loc=0)
#ax.grid()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if title != None:
ax.set_title(title)
if ylimmax:
ax.set_ylim(0,ylimmax)
if xlimmax:
ax.set_xlim(0,p)
if xlim:
ax.set_xlim(xlim[0],xlim[-1])
ax.grid(True,ls='--')
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_time_pos(hdata,fig_prefix,xlabel,colors=None,t1color=None,t2color=None,figsize=(6,4),width=5.0,hcolor=None,hshape=None):
fig = plt.figure(dpi=300,figsize=figsize)
timepos = []
for pid in hdata:
timepos.append([pid,hdata[pid][0],hdata[pid][1]])
timepos_sort = utils.us_sort(timepos,1,2)
ax = fig.add_subplot(111)
idx = 0
vmin = np.inf
vmax = -np.inf
if colors is None:
pcolors = styles(len(hdata))[0]
else:
pcolors = [colors,] * len(hdata)
for pid,start,end in timepos_sort:
idx += 1
pdtp1tp2 = hdata[pid]
# ax.plot([start,end],[idx,idx],pcolors,linewidth=linewidth)
vmin = np.min([vmin,start])
vmax = np.max([vmax,end])
ax.arrow(start,idx,end-start,0,fc=pcolors[idx-1], ec=pcolors[idx-1],lw=0.5,ls='-',width=width,head_width=width,head_length=0,shape='full',alpha=0.2,length_includes_head=True)
# hcolor to plot, hshape to plot
tmpall = hdata[pid][2]
for ttimepos,tclin,tissue in tmpall:
ax.scatter([ttimepos],[idx,],marker=hshape[tclin],color=hcolor[tissue],s=30) # ax.scatter(ttx,tx,marker='o',color=colors[ploti],alpha=alpha,s=markersize,linewidths=linewidths)
ax.set_yticks(np.arange(1,idx+1,1))
ax.yaxis.set_ticks_position('left')
yticklabels = ax.set_yticklabels([t[0] for t in timepos_sort])
ax.set_ylim(0,idx+1)
ax.set_xlim(vmin,vmax)
ax.grid(True,ls='--',axis='y')
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def barh_dict_class(hdata,fig_prefix,xlabel,ylabel,title = "",width=0.4,legends=[],colors=[],fmt="%.2f",ylog=0,rotation=0,plot_txt = 1):
data = []
yticklabels = []
classnames = []
classnumbers = [0] * len(hdata.keys())
if not colors:
color_class = cm.Paired(np.linspace(0, 1, len(hdata.keys())))
else:
color_class = colors
idx = 0
plot_idx = []
plot_start = 0
for classname in sorted(hdata.keys()):
classnames.append(classname)
for key in hdata[classname]:
if hdata[classname][key] <=0:continue
yticklabels.append(key)
classnumbers[idx] += 1
data.append(hdata[classname][key])
plot_idx.append([plot_start,len(data)])
plot_start += len(data)-plot_start
idx += 1
if len(data) > 16:
fig = plt.figure(figsize=(5,15),dpi=300)
fontsize_off = 2
else:
fig = plt.figure(figsize=(5,7),dpi=300)
ax = fig.add_subplot(111)
linewidth = 0
alpha=0.8
ylocations = np.arange(len(data))+width*2
rects = []
for i in range(len(plot_idx)):
s,e = plot_idx[i]
rect = ax.barh(ylocations[s:e],np.asarray(data[s:e]),width,color=color_class[i],linewidth=linewidth,alpha=alpha,align='center')
rects.append(rect)
ax.set_yticks(ylocations)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ylabelsL = ax.set_yticklabels(yticklabels)
ax.set_ylim(0,ylocations[-1]+width*2)
tickL = ax.yaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 2)
ax.xaxis.grid(True)
ax.legend(classnames,loc=0,fontsize=8)
#print fig.get_size_inches()
fig.set_size_inches(10,12)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def bar_dict_class(hdata,fig_prefix,xlabel,ylabel,title = "",width=0.35,legends=[],colors=[],fmt="%.2f",ylog=0,rotation=0,plot_txt = 1):
data = []
xticklabels = []
classnames = []
classnumbers = [0] * len(hdata.keys())
if not colors:
color_class = cm.Paired(np.linspace(0, 1, len(hdata.keys())))
else:
color_class = colors
idx = 0
plot_idx = []
plot_start = 0
for classname in sorted(hdata.keys()):
flagxx = 0
for key in hdata[classname]:
if hdata[classname][key] <=0:continue
xticklabels.append(key)
classnumbers[idx] += 1
data.append(hdata[classname][key])
flagxx = 1
if flagxx:
plot_idx.append([plot_start,len(data)])
plot_start += len(data)-plot_start
idx += 1
classnames.append(classname)
fontsize_off = 2
if len(data) > 16:
fig = plt.figure(figsize=(10,5),dpi=300)
fontsize_off = 3
else:
fig = plt.figure(figsize=(7,5),dpi=300)
ax = fig.add_subplot(111)
if ylog:
ax.set_yscale("log",nonposy='clip')
linewidth = 0
alpha=0.8
xlocations = np.arange(len(data))+width*2
#rects = ax.bar(xlocations,np.asarray(data),width,color=plot_colors,linewidth=linewidth,alpha=alpha,align='center')
rects = []
for i in range(len(plot_idx)):
s,e = plot_idx[i]
rect = ax.bar(xlocations[s:e],np.asarray(data[s:e]),width,color=color_class[i],linewidth=linewidth,alpha=alpha,align='center')
rects.append(rect)
max_height = 0
if plot_txt:
for rk in rects:
for rect in rk:
height = rect.get_height()
if height < 0.1:continue
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=(8-fontsize_off))
ax.set_xticks(xlocations)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if rotation == 0 or rotation == 90:hafmt="center"
else:hafmt="right"
xlabelsL = ax.set_xticklabels(xticklabels,ha=hafmt,rotation=rotation)
#print xlocations
ax.set_xlim(0,xlocations[-1]+width*2)
tickL = ax.xaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 2)
ax.yaxis.grid(True)
#print classnames
if ylog:
ax.set_ylim(0.99,np.max(data)*2)
else:
ax.set_ylim(0,np.max(data)*1.35)
ax.legend(classnames,fancybox=True, loc=0, fontsize=(8-fontsize_off))
#ax.legend(classnames,loc='upper center', bbox_to_anchor=(0.5, 1.0),ncol=6,fancybox=True, shadow=True)
#else:
#ax.xaxis.set_major_locator(plt.NullLocator())
plt.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='on') # labels along the bottom edge are off
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def lineraworder(data,xticklabels,fig_prefix,xlabel,ylabel,title = "",width=0.4,fmt="%.2f",ylog=0,rotation=0,linecolor="r",ls="--",marker='.'):
fig = plt.figure(figsize=(7,5),dpi=300)
ax = fig.add_subplot(111)
if ylog: ax.set_yscale("log",nonposy='clip')
linewidth = 0; alpha=1.0
if not linecolor:
linecolor = styles(len(data))[0]
xlocations = np.arange(len(data))+width*2
ax.plot(xlocations,data,ls=ls,marker=marker,markerfacecolor=linecolor,markeredgecolor=linecolor,color=linecolor)
ax.set_xticks(xlocations);ax.set_ylabel(ylabel); ax.set_xlabel(xlabel);
ax.set_xlim(0,xlocations[-1]+width*2);fig.tight_layout();
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def lineplot(data,labels,fig_prefix,xlabel,ylabel,title = "",width=0.4,fmt="%.2f",ylog=0,rotation=0):
fig = plt.figure(figsize=(7,6),dpi=300)
ax = fig.add_subplot(111)
if ylog: ax.set_yscale("log",nonposy='clip')
linewidth = 0; alpha=1.0
n,p = data.shape
linecolors,lses,markers = styles(p)
assert p >=2
for i in range(1,p):
ax.plot(data[:,0],data[:,i],ls=lses[i],marker=markers[i],markerfacecolor=linecolors[i],markeredgecolor=linecolors[i],color=linecolors[i],label=labels[i])
ax.set_ylabel(ylabel); ax.set_xlabel(xlabel);
ax.legend(loc=0,numpoints=1) # ax.legend(labels,loc=0,numpoints=1)
ax.grid(True)
fig.tight_layout();
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def barlineraworder(data,xticklabels,fig_prefix,xlabel,ylabel,title = "",width=0.4,colors=[],fmt="%.2f",ylog=0,rotation=0,linecolor="r",figsize=(7,5)):
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
if ylog: ax.set_yscale("log",nonposy='clip')
linewidth = 0; alpha=1.0
if not colors:
colors = styles(len(data))[0]
xlocations = np.arange(len(data))+width*2
rects = ax.bar(xlocations,np.asarray(data),width,color=colors,linewidth=linewidth,alpha=alpha,align='center')
idxtmp = 0
for rect in rects:
height = rect.get_height()
idxtmp += 1
if height < 0.1:continue
if data[idxtmp-1] < 0:
height = -1 * height
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='top',fontsize=10)
else:
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=10)
ax.plot(xlocations,data,ls='--',marker='.',markerfacecolor=linecolor,markeredgecolor=linecolor,color=linecolor)
ax.set_xticks(xlocations)
ax.set_ylabel(ylabel); ax.set_xlabel(xlabel)
if rotation == 0 or rotation == 90:
hafmt='center'
else:hafmt = 'right'
xlabelsL = ax.set_xticklabels(xticklabels,ha=hafmt,rotation=rotation)
ax.set_title(title)
ax.set_xlim(0,xlocations[-1]+width*2)
#tickL = ax.xaxis.get_ticklabels()
#for t in tickL:
# t.set_fontsize(t.get_fontsize() - 2)
ax.yaxis.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def bar_dict(hdata,fig_prefix,xlabel,ylabel,title = "",width=0.4,legends=[],colors=[],fmt="%.2f",ylog=0,hlist=None,rotation=0,filter_flag=1):
data = []
xticklabels = []
if hlist == None:
for key in sorted(hdata):
if hdata[key] <=0 and filter_flag:
continue
xticklabels.append(key)
data.append(hdata[key])
else:
for key in sorted(hlist):
if hdata[key] <=0 and filter_flag:
continue
xticklabels.append(key)
data.append(hdata[key])
fig = plt.figure(figsize=(7,5),dpi=300)
ax = fig.add_subplot(111)
if ylog:
ax.set_yscale("log",nonposy='clip')
linewidth = 0
alpha=1.0
if not colors:
colors = cm.Accent(np.linspace(0, 1, len(data)))
xlocations = np.arange(len(data))+width*2
rects = ax.bar(xlocations,np.asarray(data),width,color=colors,linewidth=linewidth,alpha=alpha,align='center')
idxtmp = 0
for rect in rects:
height = rect.get_height()
idxtmp += 1
if height < 0.1:continue
if data[idxtmp-1] < 0:
height = -1 * height
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='top',fontsize=8)
else:
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=8)
ax.set_xticks(xlocations)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if rotation == 0 or rotation == 90:
hafmt='center'
else:
hafmt = 'right'
xlabelsL = ax.set_xticklabels(xticklabels,ha=hafmt,rotation=rotation)
#if rotation:
# for label in xlabelsL:
# label.set_rotation(rotation)
ax.set_title(title)
ax.set_xlim(0,xlocations[-1]+width*2)
tickL = ax.xaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 2)
ax.yaxis.grid(True)
#ax.set_adjustable("datalim")
if ylog and filter_flag:
ax.set_ylim(0.99,np.max(data)*2)
elif filter_flag:
ax.set_ylim(0,np.max(data)*1.5)
#ax.set_ylim(ymin=0)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def cluster_stackv_bar_plot(data,xticks_labels,fig_prefix,xlabel,ylabel,title="",width=0.7,legends=[],colors=[],scale=0,rotation=0,nocluster=0,noline=0):
Xnpdata = data.T.copy()
#Xnpdata = np.random.random((12,9))
lfsm = 4#8
if len(xticks_labels) > 40:
lfsm = int(len(xticks_labels) * 1.0 * 8/40); lfsm = np.min([lfsm,16])
widsm = 8#8
fig = plt.figure(figsize=(widsm,lfsm))
stackmapGS = gridspec.GridSpec(1,2,wspace=0.0,hspace=0.0,width_ratios=[0.15,1])
if not nocluster:
col_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xnpdata,'euclidean'))
#print col_pairwise_dists
col_clusters = linkage(col_pairwise_dists,method='ward')
col_denAX = fig.add_subplot(stackmapGS[0,0])
col_denD = dendrogram(col_clusters,orientation='left')
col_denAX.set_axis_off()
n,p = data.shape
ind = np.arange(p)
if not nocluster:
tmp = np.float64(data[:,col_denD['leaves']])
else:
tmp = data
if scale:
tmp = tmp/np.sum(tmp,0)*100
if not colors:
colors = styles(n)[0]
lfsm = 8
stackvAX = fig.add_subplot(stackmapGS[0,1])
linewidth = 0
alpha=0.8
def plot_line_h(ax,rects):
for i in range(len(rects)-1):
rk1 = rects[i]
rk2 = rects[i+1]
x1 = rk1.get_x()+rk1.get_width()
y1 = rk1.get_y()+rk1.get_height()
x2 = rk2.get_x()+rk2.get_width()
y2 = rk2.get_y()
ax.plot([x1,x2],[y1,y2],'k-',linewidth=0.4)
return 0
for i in range(n):
if i:
cumtmp = cumtmp + np.asarray(tmp[i-1,:])[0]
rects = stackvAX.barh(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,left=cumtmp,align='edge',label=legends[i])
if not noline:plot_line_h(stackvAX,rects)
else:
cumtmp = 0
rects = stackvAX.barh(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,align='edge',label=legends[i])
if not noline:plot_line_h(stackvAX,rects)
stackvAX.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,fancybox=True, shadow=True)
stackvAX.set_ylim(0-(1-width),p)
#clean_axis(stackvAX)
#stackvAX.set_ylabel(xlabel)
#stackvAX.set_yticks(ind)
#stackvAX.set_yticklabels(xticks_labels,rotation=rotation)
if scale:
stackvAX.set_xlim(0,100)
if nocluster:
t_annonames = xticks_labels
else:
t_annonames = [xticks_labels[i] for i in col_denD['leaves']]
stackvAX.set_yticks(np.arange(p)+width/2)
stackvAX.yaxis.set_ticks_position('right')
stackvAX.set_yticklabels(t_annonames)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def stackv_bar_plot(data,xticks_labels,fig_prefix,xlabel,ylabel,title="",width=0.8,legends=[],colors=[],scale=0,rotation=45,orientation="vertical",legendtitle="",figsize=(8,6)):
"""orientation is "vertical" or horizontal"""
n,p = data.shape
ind = np.arange(p)
tmp = np.float64(data.copy())
#tmp = np.cumsum(data,0)
#print tmp - data
if scale:
tmp = tmp/np.sum(tmp,0)*100
#print tmp
#tmp = np.cumsum(tmp,0)
if not colors:
#colors = cm.Dark2(np.linspace(0, 1, n))
colors = styles(n)[0]
if figsize is None:
lfsm = 6
widsm = 8
if len(xticks_labels) > 40:
lfsm = int(len(xticks_labels) * 1.0 * 8/40); lfsm = np.min([lfsm,16])
else:
widsm, lfsm = figsize
if orientation == "vertical":
fig = plt.figure(figsize=(widsm,lfsm),dpi=300)
elif orientation == "horizontal":
fig = plt.figure(figsize=(widsm,lfsm),dpi=300)
ax = fig.add_subplot(121)
linewidth = 0
alpha=1.0
def plot_line_h(ax,rects):
for i in range(len(rects)-1):
rk1 = rects[i]
rk2 = rects[i+1]
x1 = rk1.get_x()+rk1.get_width()
y1 = rk1.get_y()+rk1.get_height()
x2 = rk2.get_x()+rk2.get_width()
y2 = rk2.get_y()
ax.plot([x1,x2],[y1,y2],'k-',linewidth=0.4)
return 0
def plot_line_v(ax,rects):
for i in range(len(rects)-1):
rk1 = rects[i]
rk2 = rects[i+1]
x1 = rk1.get_y()+ rk1.get_height()
y1 = rk1.get_x()+rk1.get_width()
x2 = rk2.get_y()+rk2.get_height()
y2 = rk2.get_x()
ax.plot([y1,y2],[x1,x2],'k-',linewidth=0.4)
for i in range(n):
if i:
cumtmp = cumtmp + np.asarray(tmp[i-1,:])[0]
if orientation == "vertical":
rects = ax.bar(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,bottom=cumtmp,align='center',label=legends[i])
#for rk in rects:
# print "h",rk.get_height()
# print "w",rk.get_width()
# print "x",rk.get_x()
# print "y",rk.get_y()
#break
if scale:
plot_line_v(ax,rects)
elif orientation == "horizontal":
rects = ax.barh(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,left=cumtmp,align='center',label=legends[i])
if scale:
plot_line_h(ax,rects)
#for rk in rects:
#print "h",rk.get_height()
#print "w",rk.get_width()
#print "x",rk.get_x()
#print "y",rk.get_y()
else:
cumtmp = 0
#print ind,np.asarray(tmp[i,:])[0]
if orientation == "vertical":
rects = ax.bar(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,align='center',label=legends[i])
if scale:
plot_line_v(ax,rects)
elif orientation == "horizontal":
rects = ax.barh(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,align='center',label=legends[i])
if scale:
plot_line_h(ax,rects)
#ax.legend(loc=0)
if orientation == "vertical":
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xticks(ind)
ax.set_xticklabels(xticks_labels,rotation=rotation,ha="right")
if scale:
ax.set_ylim(0,100)
ax.set_xlim(0-1,p)
else:
ax.set_xlim(0-1,p)
else:
ax.set_ylabel(xlabel)
ax.set_xlabel(ylabel)
ax.set_yticks(ind)
ax.set_yticklabels(xticks_labels,rotation=rotation)
if scale:
ax.set_xlim(0,100)
ax.set_ylim(0-1,p)
else:
ax.set_ylim(0-1,p)
ax.set_title(title)
#ax.grid(True)
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True, shadow=True, handlelength=1.1)
#ax.legend(loc=0, fancybox=True, bbox_to_anchor=(1.02, 1),borderaxespad=0)
plt.legend(title=legendtitle,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def bar_group(data,group_label,xticklabel,xlabel,ylabel,colors=None,fig_prefix="bar_group",title=None,width=0.3,ylog=0,text_rotation=0):
num_groups,p = data.shape
assert num_groups == len(group_label)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(p)
rects = []
if colors == None:
"""
110 def color_grad(num,colorgrad=cm.Set2):
111 color_class = cm.Set2(np.linspace(0, 1, num))
112 return color_class
"""
colors = color_grad(num_groups,colorgrad="Dark2")
for i in range(num_groups):
rect=ax.bar(xlocations+width*i, np.asarray(data)[i,:], width=width,linewidth=0,color=colors[i],ecolor=colors[i],alpha=0.6,label=group_label[i])
rects.append(rect)
for rk in rects:
for rect in rk:
height = rect.get_height()
if height < 0.0001:continue
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, "%.0f"%float(height),ha='center', va='bottom',fontsize=(8-0),rotation=text_rotation)
ax.legend(group_label,loc=0)
ax.set_xticks(xlocations+width/2*num_groups)
ax.set_xticklabels(xticklabel,ha="right",rotation=45)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if ylog:
ax.set_yscale("log")
ax.grid(True)
if title is not None:ax.set_title(title)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def err_line_group(data,error,group_label,xticklabel,xlabel,ylabel,colors,fig_prefix,title=None,xlim=None,ylim=None,figsize=(5,4)):
num_groups,p = data.shape
assert num_groups == len(group_label)
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
xlocations = np.arange(p) + 1
ret_color,ret_lines,ret_marker = styles(num_groups)
for i in range(num_groups):
ax.errorbar(xlocations,data[i,:],yerr=error[i,:],marker=ret_marker[i],ms=8,ls='dotted',color=ret_color[i],capsize=5,alpha=0.6,label=group_label[i])
ax.legend(group_label,loc=0)
ax.set_xticks(xlocations)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if title is not None:ax.set_title(title)
xregion = (xlocations[-1] - xlocations[0]) * 0.1
if xlim == None:
ax.set_xlim(xlocations[0]-xregion,xlocations[-1]+xregion)
yregion = np.max(data) - np.min(data)
if ylim == None:
ax.set_ylim(np.min(data)-yregion*0.1,np.max(data) + yregion*0.1)
fig.tight_layout()
ax.grid(False)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def err_line_group_low_up(data,lower,upper,group_label,xticklabel,xlabel,ylabel,fig_prefix="test",title=None,xlim=None,ylim=None,figsize=(5,4),ylog=1):
num_groups,p = data.shape
assert num_groups == len(group_label)
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
xlocations = np.arange(p) + 1
ret_color,ret_lines,ret_marker = styles(num_groups)
tmperr = np.zeros((2,p))
tmpwidth = 0.95/num_groups
for i in range(num_groups):
tmperr[0,:] = lower[i,:]
tmperr[1,:] = upper[i,:]
ax.errorbar(xlocations + tmpwidth *i ,data[i,:],yerr=tmperr,marker=ret_marker[i],ms=8,color=ret_color[i],capsize=5,alpha=0.8,label=group_label[i])
ax.legend(group_label,loc=0)
ax.set_xticks(xlocations+tmpwidth*num_groups/2)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if title is not None:ax.set_title(title)
xregion = (xlocations[-1]+0.95 - xlocations[0]) * 0.1
if xlim is None:
ax.set_xlim(xlocations[0]-xregion,xlocations[-1]+xregion)
yregion = np.max(upper) - np.min(lower)
if ylim is None:
ax.set_ylim(np.min(data)-yregion*0.1,np.max(data) + yregion*0.1)
if ylog:
ax.set_yscale('log')
fig.tight_layout()
ax.grid(False)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def bargrouperr(data,yerror=None,xlabel=None,ylabel=None,colors = None,fig_prefix="test",title=None,width=None,figsize=(5,4),rotation=0):
groupnames = data.columns
xticklabels = data.index
num_groups = len(groupnames)
if colors is None: colors = styles(num_groups)[0]
if width is None: width = 0.95/num_groups
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
xlocations = np.arange(len(xticklabels))
for i in range(num_groups):
groupname = groupnames[i]
if yerror is None:
ax.bar(xlocations+width*i, data.loc[:,groupname].tolist(),width=width,linewidth=1.0,facecolor=colors[i],edgecolor='black',alpha=0.6,label=groupnames[i])
else:
yerrlim = np.zeros((2,len(yerror.loc[:,groupname].tolist())))
yerrlim[1,:] = np.float64(yerror.loc[:,groupname].tolist())
ax.bar(xlocations+width*i, data.loc[:,groupname].tolist(),yerr=yerrlim,capsize=10,error_kw={'elinewidth':1.0,'capthick':1.0,},width=width,linewidth=1.0,facecolor=colors[i],edgecolor='black',ecolor=colors[i],alpha=0.6,label=groupnames[i])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True)
ax.set_xticks(xlocations+width/2*(num_groups-1))
ax.set_xticklabels(xticklabels,rotation=rotation)
if xlabel is not None:ax.set_xlabel(xlabel)
if ylabel is not None:ax.set_ylabel(ylabel)
if title is not None: ax.set_title(title)
ax.set_xlim(0-width*0.75,xlocations[-1]+(num_groups-1+0.75)*width)
fig.tight_layout()
#ax.grid(True,axis="y")
fig.tight_layout(rect = [0,0,1,0.9])
plt.savefig(fig_prefix+".png",format='png',dpi=300); plt.savefig(fig_prefix+".svg",format='svg',dpi=300); plt.clf();plt.close()
return 0
def bargroup(data,group_label,xticklabel,xlabel,ylabel,colors=None,fig_prefix="test",title=None,width=None): # group * xticks
num_groups,p = data.shape
if colors == None:
colors = styles(len(group_label))[0]
assert num_groups == len(group_label)
if width==None:
width = 0.95/num_groups
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(p)
for i in range(num_groups):
ax.bar(xlocations+width*i, data[i,:],width=width,linewidth=0,color=colors[i],ecolor=colors[i],alpha=0.6,label=group_label[i])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True)
ax.set_xticks(xlocations+width/2*(num_groups-1))
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xlim(-1,xlocations[-1]+1)
if title is not None:ax.set_title(title)
fig.tight_layout()
ax.grid(True,axis="y")
fig.tight_layout(rect = [0,0,1,0.9])
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def err_bar_group(data,error,group_label,xticklabel,xlabel,ylabel,colors=None,fig_prefix="test",title=None,width=0.3,ylog=0,rotation=0):
num_groups,p = data.shape
if colors == None:
colors = color_grad(len(group_label))
assert num_groups == len(group_label)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(p)
for i in range(num_groups):
ax.bar(xlocations+width*i, data[i,:],yerr=error[i,:], width=width,linewidth=0,color=colors[i],ecolor=colors[i],alpha=0.6,label=group_label[i])# capsize=5
#ax.legend(group_label,loc=0)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0)
ax.set_xticks(xlocations+width/2*num_groups)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel,rotation=rotation)
if title is not None:ax.set_title(title)
if ylog: ax.set_yscale("log",nonposy='clip')
fig.tight_layout()
ax.grid(True)
fig.tight_layout(rect = [0,0,1,0.9])
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def err_bar(data,error,xlabel,ylabel,fig_prefix,title=None,mark_sig=None,mark_range=[[0,1],],width=0.3):
num = len(data)
assert num == len(error) == len(xlabel)
#colors = cm.Set3(np.linspace(0, 1, len(xlabel)))
#colors = ["black","gray"]
if num == 2:
colors = ["black","gray"]
colors,ret_lines,ret_marker = styles(num)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(len(data))+width
ax.bar(xlocations, data, yerr=error, width=width,linewidth=0.5,ecolor='r',capsize=5,color=colors,alpha=0.5)
ax.set_xticks(xlocations+width/2)
ax.set_xticklabels(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0, xlocations[-1]+width*2)
if title is not None:ax.set_title(title)
if mark_sig is not None:
xlocations = xlocations+width/2
ybin = np.max(np.asarray(data)+np.asarray(error))
step = ybin/20
offset = ybin/40
assert len(mark_sig) == len(mark_range)
for i in range(len(mark_range)):
mark_r = mark_range[i]
sig_string = mark_sig[i]
xbin = np.asarray(mark_r)
ybin += step
ax.plot([xlocations[mark_r[0]],xlocations[mark_r[1]]],[ybin,ybin],color='gray',linestyle='-',alpha=0.5)
ax.text((xlocations[mark_r[0]]+xlocations[mark_r[1]])/2,ybin,sig_string)
ax.set_ylim(0,ybin+step*2.5)
ax.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def trendsiglabel(Xvec,Yvec,meansdata,totmean,color,xticklabel,fig_prefix="trend",rotation=45):
num = len(Xvec)
ngenes_sig,p = meansdata.shape
ngenes_tot,p = totmean.shape
assert num == len(Yvec) == len(xticklabel) == p
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
#ax.plot(Xvec,Yvec,color+'^-',markeredgecolor='None',markersize = 12)
for i in range(ngenes_tot):
#print i
ax.plot(Xvec,totmean[i,:],'g-',lw=0.5,alpha=0.3)
for i in range(ngenes_sig):
ax.plot(Xvec,meansdata[i,:],'b-',lw=0.5,alpha=0.3)
ax.plot(Xvec,Yvec,color+'^-',markeredgecolor=color,markersize = 5)
ax.set_xticks(np.arange(num))
xlabelsL = ax.set_xticklabels(xticklabel,rotation=rotation)
ax.grid(True)
#clean y
#ax.get_yaxis().set_ticks([])
#min_a = np.min(Yvec)
#max_a = np.max(Yvec)
#ax.set_ylim(min_a-1,max_a+1)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def twofactor_diff_plot(Xmeanarr,Xstdarr,xticklabel,fig_prefix="Sigplot",title=None,xlabel=None,ylabel="Expression",width=0.3,labels=None,ylimmin=-0.5):
num = Xmeanarr.shape[-1]
fmts = ['o-','^--','x-.','s--','v-.','+-.']
ecolors = ['r','b','g','c','m','y','k']
assert num == Xstdarr.shape[-1] == len(xticklabel)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(num)+width
n,p = Xmeanarr.shape
for i in range(n):
ax.errorbar(xlocations, Xmeanarr[i,:], yerr=Xstdarr[i,:],fmt=fmts[i],ecolor=ecolors[i],markeredgecolor=ecolors[i])
if labels:
ax.legend(labels,loc=0,numpoints=1)
ax.set_xticks(xlocations)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
if xlabel: ax.set_xlabel(xlabel)
ax.set_xlim(0, xlocations[-1]+width*2)
#ax.set_ylim(bottom=ylimmin)
if title is not None:ax.set_title(title)
ax.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def onefactor_diff_plot(Xmeanarr,Xstdarr,xticklabel,fig_prefix="Sigplot",title=None,xlabel=None,ylabel="Expression",width=0.3):
num = len(Xmeanarr)
assert num == len(Xstdarr) == len(xticklabel)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(len(Xmeanarr))+width
ax.errorbar(xlocations, Xmeanarr, yerr=Xstdarr,fmt='o-',ecolor='r')
ax.set_xticks(xlocations)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
if xlabel: ax.set_xlabel(xlabel)
ax.set_xlim(0, xlocations[-1]+width*2)
if title is not None:ax.set_title(title)
ax.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def bar_plot(data,xticks_labels,fig_prefix,xlabel,ylabel,title="",width=0.3,rotation=0,fmt='%.2f',ylog=0,colors=None):
ind = np.arange(len(data))
fig = plt.figure()
ax = fig.add_subplot(111)
if ylog:
ax.set_yscale("log",nonposy='clip')
linewidth = 0
alpha=0.5
if not colors:
colors = 'k'
rects = ax.bar(ind,data,width,color=colors,linewidth=linewidth,alpha=alpha,align='center')
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xticks(ind)
ax.yaxis.grid(True)
#ax.set_xticks(ind+width/2)
if rotation == 0 or rotation == 90:hafmt='center'
else:hafmt = 'right'
xlabelsL = ax.set_xticklabels(xticks_labels,ha=hafmt,rotation=rotation)
#rotate labels 90 degrees
if rotation:
for label in xlabelsL:
label.set_rotation(rotation)
ax.set_title(title)
for rect in rects:
height = rect.get_height()
if height < 0.1:continue
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=8)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def MA_vaco_plot2(sfc,slogq,fc,logq,fig_prefix,xlabel,ylabel,xlim=None,ylim=None,title="MAplot",figsize=(5,4)):
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
ax.plot(sfc[sfc>0],slogq[sfc>0],'o',markersize=2.0,alpha=0.5,markeredgecolor='#BC3C29',markerfacecolor='#BC3C29')
ax.plot(sfc[sfc<0],slogq[sfc<0],'o',markersize=2.0,alpha=0.5,markeredgecolor='#00468B',markerfacecolor='#00468B')
ax.plot(fc,logq,'o',markersize=1.0,markeredgecolor='#9E9E9E',markerfacecolor='#9E9E9E')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True,ls='--')
if xlim is not None:ax.set_xlim(xlim[0],xlim[-1])
if ylim is not None:ax.set_ylim(ylim[0],ylim[-1])
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();
plt.close()
return 0
def MA_vaco_plot(avelogFC,logFC,totavelogFC,totlogFC,fig_prefix,xlabel,ylabel,xlim=None,ylim=None,title="MAplot",figsize=(5,4)):
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
ax.plot(avelogFC,logFC,'ro',markersize = 1.5,alpha=0.5,markeredgecolor='r')
ax.plot(totavelogFC,totlogFC,'bo',markersize = 1.5,alpha=0.5,markeredgecolor='b')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.grid(True,ls='--')
if xlim is not None:
ax.set_xlim(xlim[0],xlim[-1])
if ylim is not None:
ax.set_ylim(ylim[0],ylim[-1])
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def vaco_plot(X,Y,Xcut,Ycut,fig_prefix,xlabel,ylabel,title=None,figsize=(5,4)):
# X is rho or fc
Xcutx = [np.min(X),np.max(X)]
Ycuts = [Ycut,Ycut]
idx1 = (Y > Ycut) & (np.abs(X) > Xcut)
idx2 = ~idx1
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
ax.plot(X[idx1],Y[idx1],'ro',markersize = 5,alpha=0.5,markeredgecolor='None')
ax.plot(X[idx2],Y[idx2],'bo',markersize = 5,alpha=0.5,markeredgecolor='None')
ax.plot(Xcutx,Ycuts,'r--')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
#ax.set_xlim(-6,6)
if title != None:
ax.set_title(title)
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def baohedu_plot(genes,reads,samples,fig_prefix,xlabel="number of reads",ylabel="number of detected genes",title=None,lim=0):
n1,p1 = genes.shape
n2,p2 = reads.shape
assert n1==n2 and p1==p2
"saturability"
#types = ['ro-','b^--','gs-.','kv:','c^-.','m*--','yp:']
ret_color,ret_lines,ret_marker = styles(n1)
fig = plt.figure(figsize=(8,6),dpi=300)
ax = fig.add_subplot(111)
for i in range(n1):
x = reads[i,:]
y = genes[i,:]
ax.plot(x,y,color=ret_color[i],linestyle=ret_lines[i],marker=ret_marker[i],markeredgecolor=ret_color[i],markersize = 4,alpha=0.7,label=samples[i])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if title != None: ax.set_title(title)
ax.legend(loc=0,numpoints=1)
ax.grid(True)
ax.set_ylim(bottom=0)
if lim:
ax.set_xlim(-1,101)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.tight_layout()
plt.clf()
plt.close()
return 0
def plotyy(Xvector,Y1np,Y2np,fig_prefix,xlabel,ylabel1,ylabel2,title=None,figsize=(6,5)):
Y1np = np.asarray(Y1np)
Y2np = np.asarray(Y2np)
fig = plt.figure(figsize=figsize,dpi=300)
ax1 = fig.add_subplot(111)
try:
n1,p1 = Y1np.shape
except ValueError:
n1 = 1
try:
n2,p2 = Y2np.shape
except ValueError:
n2 = 1
for i in range(n1):
if n1 == 1:
ax1.plot(Xvector,Y1np, 'b-')
break
if i == 0:
ax1.plot(Xvector,Y1np[i,:], 'b-')
else:
ax1.plot(Xvector,Y1np[i,:], 'b--')
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel1, color='b')
if title: ax1.set_title(title)
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
for i in range(n2):
if n2 == 1:
ax2.plot(Xvector,Y2np, 'r-')
break
if i == 0:
ax2.plot(Xvector,Y2np[i,:], 'r-')
else:
ax2.plot(Xvector,Y2np[i,:], 'r-.')
ax2.set_ylabel(ylabel2, color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax1.grid(True,ls='--')
plt.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plotyy_barline(Xvec,Y1vec,Y2vec,fig_prefix,xlabel,ylabel1,ylabel2,figsize=(6,5),xticklabels=None):
assert len(Xvec) == len(Y1vec) == len(Y2vec) > 1
fig = plt.figure(figsize=figsize,dpi=300)
ax1 = fig.add_subplot(111)
width = np.abs(Xvec[-1]-Xvec[0]) / (len(Xvec)-1)
ax1.bar(Xvec,Y1vec,width*0.9,color='b',lw=1.0,alpha=0.5)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel1, color='b')
for tl in ax1.get_yticklabels(): tl.set_color('b')
if xticklabels is not None:
ax1.set_xticks(Xvec)
ax1.set_xticklabels(xticklabels,ha="right",rotation=45)
ax2 = ax1.twinx()
ax2.plot(Xvec,Y2vec,'r-',lw=1.0)
ax2.set_ylabel(ylabel2, color='r')
for tl in ax2.get_yticklabels():tl.set_color('r')
ax1.grid(True,ls='--')
plt.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def clean_axis(ax):
"""Remove ticks, tick labels, and frame from axis"""
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for spx in ax.spines.values():
spx.set_visible(False)
def density_plt(Xarr,colors,legendlabel,figname_prefix="density",xlabel=None,ylabel=None,fun="pdf",fill=0,title=None,exclude=0.0,xlog=0,xliml=None,xlimr=None):
"""not at the same scale """
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
n = len(Xarr)
assert len(colors) == len(legendlabel)
for i in range(n):
dat = np.asarray(Xarr[i])
xp,yp = kdensity(dat[dat != exclude],num = 400,fun=fun)
ax.plot(xp,yp,colors[i],label=legendlabel[i],markeredgecolor='None')
if fill:
ax.fill_between(xp,yp,y2=0,color=colors[i],alpha=0.2)
ax.legend(loc=0,numpoints=1)
if xliml is not None:
ax.set_xlim(left = xliml)
if xlimr is not None:
ax.set_xlim(right = xlimr)
#if xliml and xlimr:
# print "get"
# ax.set_xlim((xliml,xlimr))
if xlog:
ax.set_xscale("log")
if xlabel: ax.set_xlabel(xlabel)
if ylabel: ax.set_ylabel(ylabel)
if title: ax.set_title(title)
ax.grid(True)
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def exprs_density(Xnp,colors,classlabels,figname_prefix="out",xlabel=None,ylabel=None,fun="cdf",exclude=0.0,ylim=10,figsize=(6,5)):
n,p = Xnp.shape
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
uniq_colors = []
for tmpcolor in colors:
if tmpcolor not in uniq_colors:
uniq_colors.append(tmpcolor)
idx = [colors.index(color) for color in uniq_colors]
labels = [classlabels[i] for i in idx]
for i in idx:
dat = np.asarray(Xnp[i,:])
if fun == "cdf":
xp,yp = kdensity(dat[dat != exclude],fun="cdf")
elif fun == "pdf":
xp,yp = kdensity(dat[dat != exclude],fun="pdf")
ax.plot(xp,yp,colors[i])
ax.legend(labels,loc=0)
for i in range(n):
dat = np.asarray(Xnp[i,:])
if fun == "cdf":
xp,yp = kdensity(dat[dat != exclude],fun="cdf")
elif fun == "pdf":
xp,yp = kdensity(dat[dat != exclude],fun="pdf")
ax.plot(xp,yp,colors[i])
#print xp
#print yp
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
if ylim:
ax.set_xlim(0,ylim)
fig.tight_layout()
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def hist_groups(data,labels,xlabel,fig_prefix,bins=25,alpha=0.7,normed=True,colors=None,rwidth=1,histtype="stepfilled",linewidth=0.5,xlim=None,ylim=None,hist=True,figsize=(6,2)):
"""
histtype='bar', rwidth=0.8
stepfilled
"""
n = len(data)
assert n == len(labels)
if colors is None:
ret_color,ret_lines,ret_marker = styles(n)
colors = ret_color
if normed:ylabel = "Probability density"
else:ylabel = "Frequency"
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
for i in range(n):
xp,yp = kdensity(data[i],fun="pdf")
if hist:
ax.hist(data[i],histtype=histtype,rwidth=rwidth,linewidth=linewidth,bins=bins, alpha=alpha,density=normed,color=colors[n-i-1])
ax.plot(xp,yp,color=colors[n-i-1],linestyle='--',lw=1.0)
else:
ax.plot(xp,yp,color=colors[n-i-1],linestyle='-',lw=2.0)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend(labels,loc=0)
if xlim is not None:
ax.set_xlim(xlim[0],xlim[1])
if ylim is not None:
ax.set_ylim(ylim[0],ylim[1])
ax.grid(True,ls='--')
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def hist_groups2(data,labels,xlabel,fig_prefix,bins=25,alpha=0.7,normed=True,colors=None,rwidth=1,histtype="stepfilled",linewidth=0.5,xlim=(0,10000),cutline = 0.54,observe=0.64,figsize=(4,2.5)):
n = len(data)
colors = styles(n)[0]
if normed:ylabel = "Density"
else:ylabel = "Frequency"
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
miny = 1
maxy = 0
for i in range(n):
xp,yp = kdensity(data[i],fun="pdf")
miny = min(miny,np.min(yp))
maxy = max(maxy,np.max(yp))
ax.plot(xp,yp,color=colors[n-i-1],linestyle='-',lw=1.0,label=labels[i])
ax.fill(xp,yp,color=colors[n-i-1],alpha=0.3)
ax.plot([cutline,cutline],[miny,maxy],linestyle="--",color="black",lw=2,label="cutoff")
ax.plot([observe,observe],[miny,maxy],linestyle="--",color="red",lw=3,label="your data")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend(loc=0)
if xlim:
ax.set_xlim(xlim[0],xlim[1])
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf(); plt.close()
return 0
def logdist(data,fig_prefix,cutline=0.54,observe=0.64):
# Theoretical
#x = np.linspace(-50,50,100)
#p = 1.0/(1+np.exp(x))
fig = plt.figure(dpi=300,figsize=(7,5))
ax = fig.add_subplot(111)
#ax.plot(x,p,color="black",linestyle='--',lw=1.0,label="Theoretical")
ax.hist(data, 50, normed=1, histtype='step', cumulative=True, label='Empirical')
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf(); plt.close()
return 0
def exprs_RLE(Xnp,mean="median",fig_prefix=None,samplenames=None,colors=None):
###!!!!用median 保持robust
#在同一组实验中,即使是相互比较的对照组与实验组之间,大部分基因的表达量还是应该保持一致的,何况平行实验之间。当我们使用相对对数表达(Relative Log Expression(RLE))的的箱线图来控制不同组之间的实验质量时,我们会期待箱线图应该在垂直中央相类的位置(通常非常接近0)。如果有一个芯片的表现和其它的平行组都很不同,那说明它可能出现了质量问题。
n,p = Xnp.shape
if mean == "median":
Xmean =np.median(Xnp,axis=0)
elif mean == "mean":
Xmean =np.mean(Xnp,axis=0)
plot_boxplot(Xnp-Xmean,fig_prefix,"","Relative Log Expression",samplenames,colors=colors,ylim=0)
return 0
def exprs_NUSE():
#1. hist
#2. julei
#3. RLE
#array corr
#
#相对标准差(Normalized Unscaled Standard Errors(NUSE))
#是一种比RLE更为敏感 的质量检测手段。如果你在RLE图当中对某组芯片的质量表示怀疑,那当你使用NUSE图时,这种怀疑就很容易被确定下来。NUSE的计算其实也很简单,它是某芯片基因标准差相对于全组标准差的比值。我们期待全组芯片都是质量可靠的话,那么,它们的标准差会十分接近,于是它们的NUSE值就会都在1左右。然而,如果有实验芯片质量有问题的话,它就会严重的偏离1,进而影响其它芯片的NUSE值偏向相反的方向。当然,还有一种非常极端的情况,那就是大部分芯片都有质量问题,但是它们的标准差却比较接近,反而会显得没有质量问题的芯片的NUSE值会明显偏离1,所以我们必须结合RLE及NUSE两个图来得出正确的结论
return 0
#from itertools import izip
izip = zip
def show_values2(pc,markvalues,fmt="%.3f",fontsize=10,**kw):
pc.update_scalarmappable()
newmarkvalues = []
n,p = markvalues.shape
#for i in range(n-1,-1,-1):
for i in range(n):
newmarkvalues.extend(markvalues[i,:].tolist())
ax = pc.axes
count = 0
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % newmarkvalues[count], ha="center", va="center", color=color,fontsize=fontsize)
count += 1
def show_values(pc, fmt="%.3f", **kw):
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % value, ha="center", va="center", color=color)
def pcolor_plot(Xnp,xsamplenames,ylabelnames,figname_prefix,txtfmt = "%.3f",figsize=(8,6),measure="correlation"):
n,p = Xnp.shape
print(n,p)
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
clean_axis(ax)
cplot = ax.pcolor(Xnp, edgecolors='k', linestyle= 'dashed', linewidths=0.2, cmap = cm.Blues)
ax.set_yticks(np.arange(n)+ 0.5)
ax.set_yticklabels(ylabelnames)
ax.set_xticks(np.arange(p)+0.5)
xlabelsL = ax.set_xticklabels(xsamplenames)
for label in xlabelsL:
label.set_rotation(90)
cb = fig.colorbar(cplot,ax=ax)
cb.set_label(measure)
cb.outline.set_linewidth(0)
ax.grid(visible=False)
show_values(cplot,fmt=txtfmt)
#fig.tight_layout()
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def exprs_corrarray(Xnp,samplenames,figname_prefix,txtfmt = "%.2f",plottext=1,Xdist=None,cbarlabel = "correlation",figsize=(7,6)):
"""
def show_values(pc, fmt="%.3f", **kw):
from itertools import izip
pc.update_scalarmappable()
ax = pc.get_axes()
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % value, ha="center", va="center", color=color, **kw)
"""
if type(Xdist) == type(None):
corr_coef = np.abs(np.corrcoef(Xnp))
else:
corr_coef = Xdist
n,p = corr_coef.shape
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
clean_axis(ax)
cplot = ax.pcolor(corr_coef, edgecolors='k', linestyle= 'dashed', linewidths=0.2, cmap = 'RdBu_r')
#image_instance = ax.imshow(corr_coef,interpolation='nearest',aspect='auto',alpha=0.8,origin='lower',cmap=cm.coolwarm)
ax.set_yticks(np.arange(p)+ 0.5)
ax.set_yticklabels(samplenames)
ax.set_xticks(np.arange(n)+0.5)
xlabelsL = ax.set_xticklabels(samplenames)
for label in xlabelsL:
label.set_rotation(90)
cb = fig.colorbar(cplot,ax=ax)
cb.set_label(cbarlabel)
cb.outline.set_linewidth(0)
ax.grid(visible=False)
if plottext:
show_values(cplot,fmt=txtfmt,fontsize=4)
fig.tight_layout()
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return corr_coef
def pie_plot(sizes,labels,fig_prefix="pie_plot",autopct='%1.1f%%',colors=None,explode=None,shadow=False, startangle=90,radius=1):
fig = plt.figure(figsize=(6,6),dpi=300)
ax5 = fig.add_subplot(111)
if not colors:
colors = cm.Paired(np.linspace(0, 1, len(labels)))
#patches, texts, autotexts = ax5.pie(sizes,explode,labels=labels, colors=colors,autopct=autopct, shadow=shadow, startangle=startangle,radius=radius)
patches, texts = ax5.pie(sizes,explode,colors=colors, shadow=shadow, startangle=startangle,radius=radius)
tmplabels = []
total = sum(sizes)
for i in range(len(labels)):
lable = labels[i]
size = float(sizes[i])/total*100
#print lable+"("+ autopct+")"
tmplabels.append((lable+"("+ autopct+")")%size)
ax5.legend(patches,tmplabels,loc='best')
for w in patches:
w.set_linewidth(0.2)
w.set_edgecolor('white')
##plt.legend(patches, labels, loc="best")
#proptease = fm.FontProperties()
#proptease.set_size('xx-small')
#plt.setp(autotexts, fontproperties=proptease)
#plt.setp(texts, fontproperties=proptease)
plt.axis('equal')
plt.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def polar_pie(percentage,labels,fig_prefix='polar_plot',figsize=(6,5),width=None,color = None,ylog=0):
n = len(percentage) # 0~100
assert np.max(percentage) <=100 and np.min(percentage) >= 0
theta = np.linspace(0.0, 2 * np.pi, n, endpoint=False)
radii = np.float64(percentage)
radiixx = radii*1.0
radiixx[radiixx<10] = 10.0
if width is None:
width = 2 * np.pi / n * (radiixx/np.max(radii))
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111,projection='polar')
bars = ax.bar(theta, radii, width=width, bottom=0.0)
if color is None:
for r, bar in zip(radii, bars):
bar.set_facecolor(cm.viridis(r/100.0))
bar.set_alpha(0.5)
else:
colors = styles(n,colorgrad=color)[0]
idx = 0
for r, bar in zip(radii, bars):
bar.set_facecolor(colors[idx])
idx +=1
bar.set_alpha(0.8)
## color use rhe str to get grade
if ylog:
ax.set_yscale('log')
ax.set_xticks(theta)
ax.set_xticklabels(labels)
ax.grid(True,ls='-',alpha=0.5,)
plt.savefig("%s.png"%fig_prefix,format='png',ppi=300)
plt.savefig("%s.svg"%fig_prefix,format='svg',ppi=300)
plt.clf()
plt.close()
return 0
def cluster_pcolor_dist(Xdist,samplenames,annos,fig_prefix="test_cluster_pcolor",colornorm = True,normratio=0.1,nosample=False,noannos=False,plotxlabel=1,plotylabel=1,cbarlabel="scaled measures",usepcolor=1,cmcolor="coolwarm",spacelinewidth=1.0,markvalues = None,markfmt = "%.2f",markfontsize=12,colorbarfmt="%.1f",figsize=(12,10),metric='euclidean'):# show_values2(pc,markvalues,fmt="%.3f",**kw):
n,p = Xdist.shape
if n > p:
Xdist = Xdist.T
samplenames, annos = annos,samplenames
n,p = p,n
nosample,noannos = noannos,nosample
plotxlabel,plotylabel = plotylabel,plotxlabel
if markvalues is not None: markvalues = markvalues.T
if colornorm:
vmin = np.min(Xdist) # np.floor(np.min(Xdist))
vmax = np.max(Xdist) # np.ceil(np.max(Xdist))
#vmax = max([vmax,abs(vmin)])
vrange = (vmax - vmin) * normratio
my_norm = mpl.colors.Normalize(vmin-vrange, vmax+vrange)
else: my_norm = None
lfsm = 8
if len(samplenames) > 20:
lfsm = int(len(samplenames) * 1.0 * 4/40); lfsm = np.min([lfsm,8])
print(n,p)
rfsm = 8
if len(annos) > 20:
rfsm = int(len(annos) * 1.0 * 4/40); rfsm = np.min([rfsm,8])
print(lfsm,rfsm)
fig = plt.figure(figsize=figsize) # width, height, rfsm,lfsm ## 14,10
heatmapGS = gridspec.GridSpec(2,2,wspace=0.0,hspace=0.0,width_ratios=[0.14,p*1.0/n],height_ratios=[0.14,1])
if not nosample: # gene is col
row_clusters = linkage(Xdist,method='average',metric=metric)
row_denAX = fig.add_subplot(heatmapGS[1,0])
sch.set_link_color_palette(['black'])
row_denD = dendrogram(row_clusters,color_threshold=np.inf,orientation='left')
row_denAX.set_axis_off()
Xtmp = Xdist[row_denD['leaves'],:]
if markvalues is not None: markertmp = markvalues[row_denD['leaves'],:]
else:
Xtmp = Xdist
markertmp = markvalues
if not noannos:
col_clusters = linkage(Xdist.T,method='average',metric=metric)
col_denAX = fig.add_subplot(heatmapGS[0,1])
sch.set_link_color_palette(['black'])
col_denD = dendrogram(col_clusters,color_threshold=np.inf,)
col_denAX.set_axis_off()
Xtmp = Xtmp[:,col_denD['leaves']]
if markvalues is not None: markertmp = markertmp[:,col_denD['leaves']]
heatmapAX = fig.add_subplot(heatmapGS[1,1])
clean_axis(heatmapAX)
axi = heatmapAX.pcolor(np.asarray(Xtmp), edgecolors='gray', linestyle= '-', linewidths=spacelinewidth,norm=my_norm ,cmap = cmcolor)
#heatmapAX.grid(visible=False)
if markvalues is not None:
show_values2(axi,markertmp,markfmt,fontsize=markfontsize)
print(row_denD['leaves'])
print(samplenames)
if plotxlabel:
if not nosample:
t_samplenames = [samplenames[i] for i in row_denD['leaves']]
else: t_samplenames = samplenames
heatmapAX.set_yticks(np.arange(n)+0.5)
heatmapAX.yaxis.set_ticks_position('right')
heatmapAX.set_yticklabels(t_samplenames)
if plotylabel:
if not noannos: t_annonames = [annos[i] for i in col_denD['leaves']]
else: t_annonames = annos
heatmapAX.set_xticks(np.arange(p)+0.5)
xlabelsL = heatmapAX.set_xticklabels(t_annonames,rotation=90)
#for label in xlabelsL: label.set_rotation(90)
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines(): l.set_markersize(0)
scale_cbAX = fig.add_subplot(heatmapGS[0,0])
scale_cbAX.set_axis_off()
cb = fig.colorbar(axi,ax=scale_cbAX,shrink=1.0,fraction=2.0,aspect=1.5)
font = {'size': 10}
tl = cb.set_label(cbarlabel,fontdict=font)
cb.ax.yaxis.set_ticks_position('right')
cb.ax.yaxis.set_label_position('right')
tmpticks = cb.ax.get_yticks()
cb.ax.yaxis.set_ticks([tmpticks[0],(tmpticks[0]+tmpticks[-1])/2.0,tmpticks[-1]])
cb.ax.yaxis.set_ticklabels(map(str,[colorbarfmt%vmin,colorbarfmt%((vmax+vmin)/2.0),colorbarfmt%vmax]))
cb.outline.set_linewidth(0)
tl = cb.set_label(cbarlabel,fontdict=font)
tickL = cb.ax.yaxis.get_ticklabels()
for t in tickL: t.set_fontsize(t.get_fontsize() - 3)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def cluster_heatmap_dist(Xdist,samplenames,fig_prefix="test_cluster_heatmap",colornorm = True,nosample=False,plotxlabel=1,plotylabel=1,cbarlabel="scaled measures",usepcolor=0,cmcolor="autumn"):
n,p = Xdist.shape
assert n == p
assert np.sum(np.isnan(Xdist)) == 0
if colornorm:
vmin = np.floor(np.min(Xdist))
vmax = np.ceil(np.max(Xdist))
vmax = max([vmax,abs(vmin)])
my_norm = mpl.colors.Normalize(vmin, vmax)
else:my_norm = None
lfsm = 8
if len(samplenames) > 20:
lfsm = int(len(samplenames) * 1.0 * 8/40); lfsm = np.min([lfsm,16])
sys.stderr.write("[INFO] plot size is %dX%d\n"%(lfsm,lfsm))
fig = plt.figure(figsize=(lfsm,lfsm))
heatmapGS = gridspec.GridSpec(2,2,wspace=0.0,hspace=0.0,width_ratios=[0.15,1],height_ratios=[0.15,1])
if not nosample:
col_clusters = linkage(Xdist,method='average')
col_denAX = fig.add_subplot(heatmapGS[0,1])
sch.set_link_color_palette(['black'])
col_denD = dendrogram(col_clusters,color_threshold=np.inf,) # use color_threshold=np.inf not to show color
col_denAX.set_axis_off()
heatmapAX = fig.add_subplot(heatmapGS[1,1])
if nosample:pass
else:
Xtmp = Xdist[:,col_denD['leaves']]
Xtmp = Xtmp[col_denD['leaves'],:]
clean_axis(heatmapAX)
if not usepcolor:
axi = heatmapAX.imshow(Xtmp,interpolation='nearest',aspect='auto',origin='lower',norm=my_norm,cmap = cmcolor)
else:
axi = heatmapAX.pcolor(np.asarray(Xtmp), edgecolors='k', linestyle= 'dashdot', linewidths=0.2, cmap = cmcolor) # cmap = cm.coolwarm
if plotxlabel:
if not nosample:
t_samplenames = [samplenames[i] for i in col_denD['leaves']]
else:
t_samplenames = samplenames
heatmapAX.set_xticks(np.arange(n)+0.5)
xlabelsL = heatmapAX.set_xticklabels(t_samplenames)
for label in xlabelsL:
label.set_rotation(90)
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines():
l.set_markersize(0)
#heatmapAX.grid()
scale_cbGSSS = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=heatmapGS[1,0],wspace=0.0,hspace=0.0)
scale_cbAX = fig.add_subplot(scale_cbGSSS[0,0])
scale_cbAX.set_axis_off()
cb = fig.colorbar(axi,ax=scale_cbAX,shrink=0.6,fraction=0.8,aspect=8)
font = {'size': 10}
tl = cb.set_label(cbarlabel,fontdict=font)
cb.ax.yaxis.set_ticks_position('right')
cb.ax.yaxis.set_label_position('right')
cb.outline.set_linewidth(0)
tl = cb.set_label(cbarlabel,fontdict=font)
tickL = cb.ax.yaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 2)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def highfreq_mutmap(topgenesmuted,mut_stack,samplenames,annonames,fig_prefix="test_cluster_muted",colornorm=True,nosample=False,nogene=False,plotxlabel= 1,plotylabel=1,cbarlabel="Mutation Frequency",genecolors=None,samplecolors=None,cmap='RdYlBu_r',tree=3,stacklegends=[],colorbarlabels=[]):
Xnp = topgenesmuted
n,p = Xnp.shape
assert n == len(samplenames) and p == len(annonames)
if colornorm:
vmin = np.floor(np.min(Xnp))
vmax = np.ceil(np.max(Xnp))
vmax = max([vmax,abs(vmin)])
my_norm = mpl.colors.Normalize(vmin, vmax)
else:my_norm = None
if len(samplenames)/3 <=9:rightx = 8
else:rightx = len(samplenames)/3
if len(annonames)/5 <=9: leftx = 8
else:
leftx = int(len(annonames)/4.5)
if len(samplenames) > 80:
rightx = 8;plotxlabel = 0
if len(annonames) > 80:
leftx = 8;plotylabel = 0
leftx = min(int(32700/300.0),leftx)
rightx = min(int(32700/300.0),rightx)
fig = plt.figure(figsize=(rightx,leftx))
sys.stderr.write("[INFO] plot size is %dX%d\n"%(leftx,rightx))
width_ratios = [0.07,0.115,1];height_ratios=[0.15,1]
samples_l = 3; genes_l = 2;
if samplecolors is not None:
samples_l += 1
width_ratios = [0.07,0.115,0.05,1]
if genecolors is not None:
genes_l = 3
height_ratios = [0.1,0.05,1]
heatmapGS = gridspec.GridSpec(samples_l,genes_l,wspace=0.0,hspace=0.0,width_ratios=height_ratios,height_ratios=width_ratios)
Xtmp = Xnp.T.copy()
if not nosample:
col_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xnp))
col_clusters = linkage(col_pairwise_dists,method='average')
#cutted_trees = cut_tree(col_clusters)
col_denAX = fig.add_subplot(heatmapGS[0,genes_l-1])
col_denD = dendrogram(col_clusters)
col_denAX.set_axis_off()
Xtmp = Xtmp[:,col_denD['leaves']]
if not nogene:
row_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xtmp))
row_clusters = linkage(row_pairwise_dists,method='average')
#assignments = fcluster(row_clusters, cut_tree, 'distance')
#row_cluster_output = pandas.DataFrame({'team':annonames, 'cluster':assignments})
row_denAX = fig.add_subplot(heatmapGS[samples_l-1,0])
row_denD = dendrogram(row_clusters,orientation='left')
row_denAX.set_axis_off()
Xtmp = Xtmp[row_denD['leaves'],:]
# stack plot:
stackvAX = fig.add_subplot(heatmapGS[1,genes_l-1])
mut_stack = np.asmatrix(mut_stack)
stackn,stackp = mut_stack.shape
stackcolors = color_grad(3,cm.Dark2)
#mut_stackT = mut_stack.T
if not nosample: mut_stack = mut_stack[col_denD['leaves'],:]
ind = np.arange(stackn)
for i in range(stackp):
if i:
cumtmp = cumtmp + np.asarray(mut_stack[:,i-1].T)[0]
rects = stackvAX.bar(ind,np.asarray(mut_stack[:,i].T)[0],0.6,color=stackcolors[i],linewidth=0,alpha=0.7,align='center',bottom=cumtmp,label=stacklegends[i])
else:
cumtmp = 0
rects = stackvAX.bar(ind,np.asarray(mut_stack[:,i].T)[0],0.6,color=stackcolors[i],linewidth=0,alpha=0.7,align='center',label=stacklegends[i])
# ax.legend(alx,bbox_to_anchor=(1.02, 1),loc=0,borderaxespad=0,numpoints=1,fontsize=6)
stackvAX.legend(loc=0, fancybox=True, bbox_to_anchor=(1.02, 1),borderaxespad=0)
stackvAX.set_ylabel("Mutations")
stackvAX.set_xlim(-0.5,stackn-0.5)
heatmapAX = fig.add_subplot(heatmapGS[samples_l-1,genes_l-1])
if samplecolors is not None:
if not nosample:
tmpxxx = []
for x in col_denD['leaves']:
tmpxxx.append(samplecolors[x])
samplecolors = tmpxxx[:]
del tmpxxx
col_cbAX = fig.add_subplot(heatmapGS[2,genes_l-1])
col_axi = col_cbAX.imshow([list(samplecolors)],interpolation='nearest',aspect='auto',origin='lower')
clean_axis(col_cbAX)
if genecolors is not None:
if not nogene:
genecolors = genecolors[row_denD['leaves']]
row_cbAX = fig.add_subplot(heatmapGS[samples_l-1,1])
row_axi = row_cbAX.imshow([genecolors.tolist(),],interpolation='nearest',aspect='auto',origin='lower')
clean_axis(row_cbAX)
# cmap = 'RdBu_r'
#tmpmap = cm.Set2()
axi = heatmapAX.pcolor(Xtmp,edgecolors='w', linewidths=1,cmap="Set2")
#axi = heatmapAX.imshow(Xtmp,interpolation='nearest',aspect='auto',origin='lower',norm=my_norm,cmap = cmap)
clean_axis(heatmapAX)
if plotylabel:
if not nogene:
t_annonames = [annonames[i] for i in row_denD['leaves']]
else:
t_annonames = annonames
heatmapAX.set_yticks(np.arange(p)+0.5)
heatmapAX.yaxis.set_ticks_position('right')
heatmapAX.set_yticklabels(t_annonames)
if plotxlabel:
if not nosample:
t_samplenames = [samplenames[i] for i in col_denD['leaves']]
else:
t_samplenames = samplenames
heatmapAX.set_xticks(np.arange(n)+0.5)
xlabelsL = heatmapAX.set_xticklabels(t_samplenames)
for label in xlabelsL:
label.set_rotation(90)
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines():
l.set_markersize(0)
heatmapAX.grid(False)
#scale_cbGSSS = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=heatmapGS[samples_l-1,0],wspace=0.0,hspace=0.0)
scale_cbAX = fig.add_subplot(heatmapGS[samples_l-1,0])
scale_cbAX.set_axis_off()
cb = fig.colorbar(axi,ax=scale_cbAX,fraction=0.5,shrink=0.6)
font = {'size': 8}
#tl = cb.set_label(cbarlabel,fontdict=font)
cb.ax.yaxis.set_ticks_position('left')
cb.ax.yaxis.set_label_position('left')
#cb.outline.set_linewidth(0)
#tickL = cb.ax.yaxis.get_ticklabels()
cb.set_ticks(np.arange(len(colorbarlabels)))
cb.set_ticklabels(colorbarlabels)
#for t in tickL:
# t.set_fontsize(t.get_fontsize() - 7)
fig.subplots_adjust(bottom = 0)
fig.subplots_adjust(top = 1)
fig.subplots_adjust(right = 1)
fig.subplots_adjust(left = 0)
plt.savefig(fig_prefix+".png",format='png',additional_artists=fig,bbox_inches="tight",dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',additional_artists=fig,bbox_inches="tight",dpi=300)
plt.clf()
plt.close()
return 0
def mesh_contour(X,Y,Z,xlabel,ylabel,zlabel,figprefix = "test",color=cm.coolwarm,alpha=0.3):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=alpha,linewidth=0, antialiased=False)
ax.plot_wireframe(X, Y, Z, rstride=8, cstride=8)
#linewidth=0, antialiased=False
cset = ax.contour(X, Y, Z, zdir='z', offset=-0.4, cmap=color)
cset = ax.contour(X, Y, Z, zdir='x', offset=-3, cmap=color)
cset = ax.contour(X, Y, Z, zdir='y', offset=3, cmap=color)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
ax.set_xlim(-3, 3)
ax.set_ylim(-3, 3)
ax.set_zlim(-0.4,0.4)
plt.savefig(figprefix+".png",format='png',dpi=300)
plt.savefig(figprefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_contest(data,ynames,xlabel=None,ylabel=None,fig_prefix="plot_ContEst"):
"""
data = [[mean,low,up],...]
"""
meandat = []; lowdat = []; updat = []; rangedat = []; num = len(data); yoffset = []
for i in range(num):
meandat.append(data[i][0]); lowdat.append(data[i][1]); updat.append(data[i][2]); yoffset.append(i+1); rangedat.append(data[i][2]-data[i][1])
if num < 25: heightsize = 6
else: heightsize = int(num * 1.0 * 6/30)
widthsize = 6
fig = plt.figure(figsize=(widthsize,heightsize))
ax = fig.add_subplot(111)
ax.errorbar(meandat,yoffset,xerr=[lowdat,updat],ls="none", marker='o',color='r',markersize=4,markeredgecolor='None',capsize=2.2)
yoffset.append(num+1)
yoffset.insert(0,0)
# ls='',markerfacecolor=tmpcolor,marker=tmpmarker,label=tmplabel,markeredgecolor = tmpcolor,alpha=0.7
#ax.plot([1.5,1.5],[0,yoffset[-1]],ls='--',markerfacecolor=u'#E24A33',markeredgecolor = u'#E24A33', alpha=0.7)
ax.plot([1.0,1.0],[0,yoffset[-1]],ls='--',markerfacecolor=u'#E24A33',markeredgecolor = u'#E24A33', alpha=0.7)
ax.plot([5,5],[0,yoffset[-1]],ls='--',markerfacecolor=u'#988ED5',markeredgecolor = u'#988ED5', alpha=0.7)
#ax.plot([1.5,1.5],[yoffset,yoffset],ls='--',markerfacecolor=u'#E24A33',markeredgecolor = u'#E24A33', alpha=0.7)
#ax.fill_betweenx(yoffset,0,1.5,color=u'#E24A33',alpha=0.3)
#ax.fill_betweenx(yoffset,1.5,5,color=u'#348ABD',alpha=0.3)
#ax.fill_betweenx(yoffset,5,np.max(updat)+1,color=u'#988ED5',alpha=0.3)
ax.set_yticks(np.arange(1,num+1))
ax.yaxis.set_ticks_position('left')
ax.set_yticklabels(ynames)
ax.grid(True)
#ax.set_ylim(0,num+1)
#ax.set_xlim(0,np.max(updat)+1)
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_ylabel(ylabel)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def cluster_heatmap(Xnp,samplenames,annonames,fig_prefix="test_cluster_heatmap",colornorm = True,nosample=False,nogene=False,plotxlabel=1,plotylabel=1,cbarlabel="Expression",genecolors=None,samplecolors=None,cmap='RdYlBu_r', trees = 3,numshow=80,metric="euclidean",usepcolor=0,normratio=1.0,samplecolormap="Dark2"):
n,p = Xnp.shape
#print n,p,len(samplenames),len(annonames)
assert n == len(samplenames) and p == len(annonames)
# make norm
if colornorm:
vmin = np.floor(np.min(Xnp))
vmax = np.ceil(np.max(Xnp))
vmax = max([vmax,abs(vmin)]) # choose larger of vmin and vmax
#vmin = vmax * -1
vrange = (vmax - vmin) * (1-normratio) / normratio * 0.5
my_norm = mpl.colors.Normalize(vmin-vrange, vmax+vrange)
else:my_norm = None
# heatmap with row names
if len(samplenames)/3 <=9:
rightx = 8
else:
rightx = len(samplenames)/3
if len(annonames)/3 <=9:
leftx = 8
else:
leftx = int(len(annonames)/4.5)
if len(samplenames) > numshow:
rightx = 8
plotxlabel = 0
if len(annonames) > numshow:
leftx = 8
plotylabel = 0
#import pdb; pdb.set_trace()
leftx = min(int(32700/300.0),leftx)
rightx = min(int(32700/300.0),rightx)
sys.stderr.write("[INFO] plot size is %dX%d\n"%(leftx,rightx))
# rightx, leftx
fig = plt.figure(figsize=(14,8))
samples_l = 2; genes_l = 2;
width_ratios = [0.15,1];height_ratios=[0.15,1]
if samplecolors is not None:
samples_l= 3
width_ratios = [0.15,0.05,1]
if (genecolors is not None) or (not nogene):
genes_l = 5
height_ratios = [0.15,0.015,0.025,0.015,1]
heatmapGS = gridspec.GridSpec(samples_l,genes_l,wspace=0.0,hspace=0.0,width_ratios=height_ratios,height_ratios=width_ratios)
### col dendrogram ### col is sample cluster
#import pdb; pdb.set_trace()
if not nosample and n >1:
col_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xnp,metric)) # 'correlation'
col_clusters = linkage(col_pairwise_dists,method='average')#ward, average
assignments = cut_tree(col_clusters,[trees,])
col_cluster_output = pandas.DataFrame({'team': samplenames, 'cluster':assignments.T[0]})
#print col_cluster_output
col_denAX = fig.add_subplot(heatmapGS[0,genes_l-1])
col_denD = dendrogram(col_clusters)
col_denAX.set_axis_off()
### fcluster(col_clusters,0.7*max(col_clusters[:,2]),'distance')
### to return the index of each sample for each cluster
### row dendrogram ### row is anno cluster
if not nogene and p > 1:
row_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xnp.T,metric))
row_clusters = linkage(row_pairwise_dists,method='average')
assignments = cut_tree(row_clusters,[trees,])
row_cluster_output = pandas.DataFrame({'team':annonames, 'cluster':assignments.T[0]})
#print row_cluster_output
numbergenescluter = len(set(assignments.T[0].tolist()))
row_denAX = fig.add_subplot(heatmapGS[samples_l-1,0])
row_denD = dendrogram(row_clusters,orientation='left')
row_denAX.set_axis_off()
### heatmap ####
heatmapAX = fig.add_subplot(heatmapGS[samples_l-1,genes_l-1])
if nogene:
Xtmp = Xnp.T.copy()
else:
Xtmp = Xnp.T[row_denD['leaves'],:]
if nosample:
pass
else:
Xtmp = Xtmp[:,col_denD['leaves']]
if samplecolors is not None:
if not nosample:
tmpxxx = []
for x in col_denD['leaves']:
tmpxxx.append(samplecolors[x])
samplecolors = tmpxxx[:]
del tmpxxx
col_cbAX = fig.add_subplot(heatmapGS[1,genes_l-1])
print(samplecolors)
if not usepcolor:
col_axi = col_cbAX.imshow([list(samplecolors)],interpolation='nearest',aspect='auto',origin='lower',cmap=samplecolormap)
else:
col_axi = col_cbAX.pcolor([list(samplecolors)],edgecolors='gray',linestyle= 'dashdot', linewidths=0.3, cmap = samplecolormap,norm=my_norm)
clean_axis(col_cbAX)
if (genecolors is not None) or (not nogene):
if not nogene:
uniqgenecolors = color_grad(numbergenescluter,colorgrad="Accent")
genecolors = [i for i in assignments.T[0]]
#print genecolors
genecolors = np.asarray(genecolors)[row_denD['leaves']]
#print genecolors
row_cbAX = fig.add_subplot(heatmapGS[samples_l-1,2])
print(np.asarray([genecolors.tolist(),]).T)
row_axi = row_cbAX.imshow(np.asarray([genecolors.tolist(),]).T,interpolation='nearest',aspect='auto',origin='lower',alpha=0.6)
clean_axis(row_cbAX)
tickoffset = 0
if not usepcolor:
axi = heatmapAX.imshow(Xtmp,interpolation='nearest',aspect='auto',origin='lower',norm=my_norm,cmap = cmap)## 'RdBu_r' 'RdYlGn_r'
else:
tickoffset += 0.5
axi = heatmapAX.pcolor(Xtmp,edgecolors='gray',linestyle= 'dashdot', linewidths=0.3, cmap = cmap,norm=my_norm)
clean_axis(heatmapAX)
## row labels ##
if plotylabel:
if not nogene:
t_annonames = [annonames[i] for i in row_denD['leaves']]
else:
t_annonames = annonames
heatmapAX.set_yticks(np.arange(p) + tickoffset)
heatmapAX.yaxis.set_ticks_position('right')
heatmapAX.set_yticklabels(t_annonames)
## col labels ##
if plotxlabel:
if not nosample:
t_samplenames = [samplenames[i] for i in col_denD['leaves']]
else:
t_samplenames = samplenames
heatmapAX.set_xticks(np.arange(n) + tickoffset)
xlabelsL = heatmapAX.set_xticklabels(t_samplenames)
#rotate labels 90 degrees
for label in xlabelsL:
label.set_rotation(90)
#remove the tick lines
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines():
l.set_markersize(0)
heatmapAX.grid(False)
#cplot = ax.pcolor(corr_coef, edgecolors='k', linestyle= 'dashed', linewidths=0.2, cmap = 'RdBu_r')
### scale colorbar ###
#scale_cbGSSS = gridspec.GridSpecFromSubplotSpec(1,2,subplot_spec=heatmapGS[0,0],wspace=0.0,hspace=0.0)
#scale_cbAX = fig.add_subplot(scale_cbGSSS[0,1])
scale_cbGSSS = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=heatmapGS[0,0],wspace=0.0,hspace=0.0)
scale_cbAX = fig.add_subplot(scale_cbGSSS[0,0])
scale_cbAX.set_axis_off()
cb = fig.colorbar(axi,ax=scale_cbAX,fraction=0.5,shrink=1.0)
font = {'size': 8}
tl = cb.set_label(cbarlabel,fontdict=font)
cb.ax.yaxis.set_ticks_position('left')
cb.ax.yaxis.set_label_position('left')
cb.outline.set_linewidth(0)
#print cb.get_ticks()
#print cb.ax.get_fontsize()
tickL = cb.ax.yaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 7)
#fig.tight_layout()
fig.subplots_adjust(bottom = 0)
fig.subplots_adjust(top = 1)
fig.subplots_adjust(right = 1)
fig.subplots_adjust(left = 0)
#plt.savefig(fig_prefix+".tiff",format='tiff',additional_artists=fig,bbox_inches="tight",dpi=300)
plt.savefig(fig_prefix+".png",format='png',additional_artists=fig,bbox_inches="tight",dpi=300)
#if n * p < 200000:
plt.savefig(fig_prefix+".svg",format='svg',additional_artists=fig,bbox_inches="tight",dpi=300)
plt.clf()
plt.close()
try:
return 0, row_cluster_output
except:
return 0, ''
def loess_testplot(x,y,ynp,labels=[]):
fig = plt.figure()
ax = fig.add_subplot(111)
n,p = ynp.shape
assert len(labels) == n
ret_color,ret_lines,ret_marker = styles(n)
ax.plot(x,y,"ko")
#for i in range(n)
def show_grad():
colors = mplconfig.__getallcolors()
numcolors = len(colors)
ns = 10
fig = plt.figure(figsize=(6,34))
ax = fig.add_subplot(111)
idx = 1
x = np.arange(10)
y = 0
for color in colors:
retcolors = styles(10,color)[0]
for i in range(10):
ax.plot([x[i],],y,'o',color=retcolors[i],markersize=12)
y += 1
ax.set_xlim(-1,10)
ax.set_ylim(-1,y+1)
ax.set_yticks(np.arange(0,y))
ax.set_yticklabels(colors)
fig.tight_layout()
plt.savefig("colorgrad_show.png",format='png',dpi=300)
plt.savefig("colorgrad_show.svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def __test():
X1 = np.random.normal(0,0.5,(3,3))
X2 = np.random.normal(3,0.5,(2,3))
X3 = np.random.normal(6,0.5,(4,3))
X = np.concatenate((X1,X2,X3))
Y = [0,0,0,1,1,2,2,2,2]
color = ['r-','k--','g+']
uniqclasslables= ['r3','k2','g4']
colors = [color[i] for i in Y]
classlabels = [uniqclasslables[i] for i in Y]
print(plot_hmc_curve(X,Y,colors,classlabels,"test_hmc_curve"))
def __testplot():
##绘制kde估计的概率密度 测试 kdensity
#======================================
aa = np.random.randn(10000)
xn,yn = kdensity(aa.tolist())
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xn,yn,'r--',label="Scott Rule")
ax.legend(loc=0)
plt.savefig("test_density.png",format='png',dpi=300)
#plt.savefig("test_density.jpg",format='jpg',dpi=300)
#plt.savefig("test_density.tif",format='tif',dpi=300)
plt.savefig("test_density.svg",format='svg',dpi=300)
plt.savefig("test_density.pdf",format='pdf',dpi=300)
plt.clf()
plt.close()
##boxplot
#======================================
mm = np.array([np.random.randn(100).tolist(),np.random.lognormal(1,1, 100).tolist()])
mm = mm.transpose()
boxColors = ['darkkhaki','royalblue']
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.2)
bp = ax1.boxplot(mm)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
for i in range(2):
box = bp['boxes'][i]
boxX = box.get_xdata().tolist()
boxY = box.get_ydata().tolist()
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i])
ax1.add_patch(boxPolygon)
#ax1.set_xticklabels(["Normal","Uniform"],rotation=45)
ax1.set_xticklabels(["Normal","Lognormal"],rotation=45)
ax1.set_title('Test Boxplot')
#ax1.set_title(u'箱图')
ax1.set_xlabel('Distribution',fontstyle='italic')
#ax1.set_xlabel('Distribution',fontstyle='oblique')
ax1.set_ylabel('Values')
#ax1.set_axis_off() 不显示坐标轴
plt.savefig("test_boxplot.png",format='png',dpi=300)
plt.savefig("test_boxplot.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#=====================================
##kmeans class plot
pt1 = np.random.normal(1, 0.2, (100,2))
pt2 = np.random.normal(2, 0.5, (300,2))
pt3 = np.random.normal(3, 0.3, (100,2))
pt2[:,0] += 1
pt3[:,0] -= 0.5
xy = np.concatenate((pt1, pt2, pt3))
##归一化处理 from scipy.cluster.vq import whiten
xy = whiten(xy)
## res 是类中心点坐标,idx为类别
res, idx = kmeans2(xy,3)
## 非常好的生成colors的方法
colors = ([([0.4,1,0.4],[1,0.4,0.4],[0.1,0.8,1])[i] for i in idx])
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(xy[:,0],xy[:,1],c=colors)
ax1.scatter(res[:,0],res[:,1], marker='o', s=300, linewidths=2, c='none')
ax1.scatter(res[:,0],res[:,1], marker='x', s=300, linewidths=2)
plt.savefig("test_kmeans.png",format='png',dpi=300)
plt.savefig("test_kmeans.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#====================================
##plot hierarchy
mat1 = np.random.normal(0,1,(3,3))
mat2 = np.random.normal(2,1,(2,3))
mat = np.concatenate((mat1,mat2))
linkage_matrix = linkage(mat,'ward','euclidean')
fig = plt.figure()
#ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
dendrogram(linkage_matrix,labels=["N1","N2","N3","P1","P2"],leaf_rotation=45)
ax3 = fig.add_subplot(223)
dendrogram(linkage_matrix,labels=["N1","N2","N3","P1","P2"],orientation='right',leaf_rotation=45)
#ax4 = fig.add_subplot(224)
plt.savefig("test_hcluster.png",format='png',dpi=300)
plt.savefig("test_hcluster.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#======================================
##plot hierarchy with image
mat1 = np.random.normal(0,1,(4,10))
mat2 = np.random.normal(5,1,(3,10))
mat = np.concatenate((mat1,mat2))
mat[:,3:] -= 20
mat -= np.mean(mat,axis=0)
samplenames = ["N1","N2","N3","N4","P1","P2","P3"]
dimensions = ["A1","A2","A3","A4","A5","A6","A7","A8","A9","A10"]
cluster_heatmap(mat,samplenames,dimensions)
#===============================================
##bar plot and err barplot
N = 5
menMeans = (20, 35, 30, 35, 27)
womenMeans = (25, 32, 34, 20, 25)
menStd = (2, 3, 4, 1, 2)
womenStd = (3, 5, 2, 3, 3)
ind = np.arange(N)
width = 0.35
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(ind, menMeans, width, color='r', yerr=womenStd,label='Men')
ax.bar(ind, womenMeans, width, color='y',bottom=menMeans, yerr=menStd,label = 'Women')
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind+width/2)
ax.set_xlim(left = -0.25)
ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
#ax.set_xticks(ind+width/2., ('G1', 'G2', 'G3', 'G4', 'G5'))
ax.set_yticks(np.arange(0,81,10))
ax.legend(loc=0)
plt.savefig("test_bar.png",format='png',dpi=300)
plt.savefig("test_bar.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#==============================================
##hist plot
mu=2
x = mu + np.random.randn(1000,3)
fig = plt.figure()
ax = fig.add_subplot(111)
n,bins,patches = ax.hist(x, 15, normed=1, histtype='bar',linewidth=0,color=['crimson', 'burlywood', 'chartreuse'],label=['Crimson', 'Burlywood', 'Chartreuse'])
ax.legend(loc=0)
plt.savefig("test_hist.png",format='png',dpi=300)
plt.savefig("test_hist.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#===============================================
##hist2D plot and image colorbar plot on the specific ax
x = np.random.randn(100000)
y = np.random.randn(100000)+5
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
counts, xedges, yedges, image_instance = ax4.hist2d(x, y, bins=40, norm=LogNorm())
ax1.set_axis_off()
plt.colorbar(image_instance,ax=ax1)
plt.savefig("test_hist2d.png",format='png',dpi=300)
plt.savefig("test_hist2d.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#===============================================
##image show plot
y,x = np.ogrid[-2:2:200j,-3:3:300j]
z = x*np.exp(-x**2 - y**2)
extent = [np.min(x),np.max(z),np.min(y),np.max(y)]
fig = plt.figure()
ax1 = fig.add_subplot(111)
#alpha: scalar The alpha blending value, between 0 (transparent) and 1 (opaque)
#设定每个图的colormap和colorbar所表示范围是一样的,即归一化
#norm = matplotlib.colors.Normalize(vmin=160, vmax=300), 用法 imshow(norm = norm)
image_instance = ax1.imshow(z,extent=extent,cmap=cm.coolwarm,alpha=0.6,origin='lower')
plt.colorbar(image_instance,ax=ax1)
plt.savefig("test_image.png",format='png',dpi=300)
plt.savefig("test_image.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#===============================================
##contour map with image
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
#cs = ax1.contour(z,5,extent = extent,origin = 'lower',linestyles='dashed')
cs = ax2.contour(z,10,extent = extent,origin = 'lower',cmap=cm.coolwarm)
plt.clabel(cs,fmt = '%1.1f',ax=ax2)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
cs1 = ax4.contour(x.reshape(-1),y.reshape(-1),z,10,origin = 'lower',colors = 'k',linestyles='solid')
cs2 = ax4.contourf(x.reshape(-1),y.reshape(-1),z,10,origin = 'lower',cmap=cm.coolwarm)
plt.clabel(cs1,fmt = '%1.1f',ax=ax4)
plt.colorbar(cs2,ax=ax4)
plt.savefig("test_contour.png",format='png',dpi=300)
plt.savefig("test_contour.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#===============================================
##meshgird plot 3D
#生成格点数据,利用griddata插值
#grid_x, grid_y = np.mgrid[275:315:1, 0.60:0.95:0.01]
#from scipy.interpolate import griddata
#grid_z = griddata((LST,EMS), TBH, (grid_x, grid_y), method='cubic')
x,y = np.mgrid[-2:2:200j,-3:3:300j]
z = x*np.exp(-x**2 - y**2)
fig = plt.figure(figsize=(20,20), dpi=300)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222,projection ='3d')
ax3 = fig.add_subplot(223,projection ='3d')
ax4 = fig.add_subplot(224,projection ='3d')
cs1 = ax1.contour(x,y,z,10,extent = extent,origin = 'lower',cmap=cm.coolwarm)
plt.clabel(cs,fmt = '%1.1f',ax=ax1)
surf = ax2.plot_surface(x,y,z, rstride=20, cstride=20, cmap=cm.coolwarm,linewidth=1, antialiased=False)
fig.colorbar(surf,ax=ax2)
surf = ax3.plot_wireframe(x,y,z, rstride=20, cstride=20, cmap=cm.coolwarm)
#仰角elevation和方位轴azimuth
#ax.view_init(elevation, azimuth) ‘elev’ stores the elevation angle in the z plane, ‘azim’ stores the azimuth angle in the x,y plane.
ax4.plot_surface(x, y, z, rstride=20, cstride=20, alpha=0.3)
cset = ax4.contour(x, y, z, 10, offset = ax4.get_zlim()[0],zdir='z',cmap=cm.coolwarm)
cset = ax4.contour(x, y, z, 10, offset = ax4.get_xlim()[0],zdir='x',cmap=cm.coolwarm)
cset = ax4.contour(x, y, z, 10, offset = ax4.get_ylim()[-1],zdir='y',cmap=cm.coolwarm)
plt.savefig("test_surface3d.png",format='png',dpi=300)
plt.savefig("test_surface3d.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#====================================================
## pie plot
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = np.array([15.2, 31, 42, 10.5])
#sizes = sizes/np.sum(sizes)
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.05, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig = plt.figure(figsize=(8,8),dpi=300)
ax5 = fig.add_subplot(111)
ax5.pie(sizes,explode,labels=labels, colors=colors,autopct='%1.1f%%', shadow=False, startangle=90)
plt.savefig("test_pie.png",format='png',dpi=300)
plt.savefig("test_pie.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#====================================================
## scatter
N = 100
r0 = 0.6
x = 0.9*np.random.rand(N)
y = 0.9* | np.random.rand(N) | numpy.random.rand |
import numpy as np
import random
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
import pandas as pd
import warnings
from multiprocessing import Pool
from numba import njit, prange
def euclidean_distance_per_feature(a, b):
"""Compute the euclidean distance per shared feature between two numpy arrays.
Parameters
----------
a: numpy array
b: numpy array
Returns
-------
numpy array
"""
diff=a-b
n_feature = len(diff)-np.isnan(diff).sum()
if n_feature == 0:
print("warning was about to divide by zero")
return 10000*len(diff)
return np.sqrt(np.nansum(diff*diff))/n_feature
@njit(parallel=True)
def dist_edpf(XA,XB):
'''
dist(u=XA[i], v=XB[j]) is computed and stored in the ij'th entry.
where dist is the above euclidean_distance_per_feature
Parameters
----------
XA : numpy array
XB : numpy array
Returns
-------
arr : numpy array
'''
n_a = len(XA)
n_b = len(XB)
arr = np.empty((n_a,n_b))
for i in prange(n_a):
for j in prange(n_b):
diff=XA[i]-XB[j]
arr[i][j]=np.sqrt(np.nansum(diff*diff))/(len(diff)-np.isnan(diff).sum())
return arr
class KMeans(object):
'''
K-Means clustering
----------
continue
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init :
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
n_init : int, default: 1
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default: 1000
Maximum number of iterations of the k-means algorithm for a
single run.
tolerance : float, default : .00001
Attributes
----------
centroids_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
'''
def __init__(self, n_clusters=8, init='k-means++', n_init=1,
max_iter=300, tolerance = 1e-4, verbose = False):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tolerance = tolerance
self.n_init = n_init
self.verbose = verbose
self.centroids_ = None
self.labels_ = None
def _initialize_centroids(self, X):
'''
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
k-means++ initialization for centroids
'''
# use Kmeans plus plus
self.centroids_ = self._kmeans_plus_plus(X)
def _kmeans_plus_plus(self, X):
'''
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
helper function to initialize centroids in a smart way
'''
k=self.n_clusters
centroids = np.empty((k, X.shape[1]))
for j in range(k):
if j == 0:
centroids[j] = X[ | np.random.choice(X.shape[0]) | numpy.random.choice |
import numpy as np
from loguru import logger
from colorama import init, Fore
from .agent import Agent
from .action import Action
class Game:
"""
Class modeling.
Attributes
----------
grid: numpy.ndarray
player_1: ttt.models.Agent
player_2: ttt.models.Agent
result: int
grid_is_full: bool
game_sequence: list
Parameters
----------
player_1: ttt.models.Agent
player_2: ttt.models.Agent
"""
def __init__(self, player_1: Agent, player_2: Agent) -> None:
self.grid = | np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]) | numpy.array |
import os
import cv2
import numpy as np
in_path = './imgs1'
files= os.listdir(in_path)
print(files)
def sepia(src_image):
gray = cv2.cvtColor(src_image, cv2.COLOR_BGR2GRAY)
normalized_gray = np.array(gray, np.float32)/255
#solid color
sepia = | np.ones(src_image.shape) | numpy.ones |
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from pypex.poly2d.intersection import linter
from pypex.poly2d.point import Point
class LinterTestCase(unittest.TestCase):
@staticmethod
def intersection_equal(a, b):
for x, y, in zip(a, b):
assert_array_equal([x], [y])
def test_intersection_one_point_touch(self):
line1 = np.array([[0.0, 0.0], [1.0, 0.0]])
line2 = np.array([[1.0, 0.0], [1.0, 0.3]])
line3 = np.array([[0.5, 0.0], [0.3, 0.3]])
obtained = list(linter.intersection(line1[0], line1[1], line2[0], line2[1], in_touch=True))
expected = [True, True, Point(1.0, 0.0), np.nan, 'INTERSECT']
self.intersection_equal(obtained, expected)
obtained = list(linter.intersection(line1[0], line1[1], line2[0], line2[1], in_touch=False))
expected = [True, False, Point(1.0, 0.0), np.nan, 'INTERSECT']
self.intersection_equal(obtained, expected)
obtained = list(linter.intersection(line1[0], line1[1], line3[0], line3[1], in_touch=True))
expected = [True, True, Point(0.5, 0.0), np.nan, 'INTERSECT']
self.intersection_equal(obtained, expected)
obtained = list(linter.intersection(line1[0], line1[1], line3[0], line3[1], in_touch=False))
expected = [True, False, Point(0.5, 0.0), np.nan, 'INTERSECT']
self.intersection_equal(obtained, expected)
def test_overlap_in_single_point(self):
line1 = np.array([[1.2, 1.2], [1.5, 1.5]])
line2 = np.array([[1.5, 1.5], [11.3, 11.3]])
obtained = list(linter.intersection(line1[0], line1[1], line2[0], line2[1], in_touch=True))
expected = [True, True, np.nan, 0.0, 'OVERLAP']
self.intersection_equal(obtained, expected)
obtained = list(linter.intersection(line1[0], line1[1], line2[0], line2[1], in_touch=False))
expected = [True, False, np.nan, 0.0, 'OVERLAP']
self.intersection_equal(obtained, expected)
def test_intersection_intersect_common(self):
line1 = np.array([[-1, 0], [1, 0]])
line2 = np.array([[0, -1], [0, 1]])
obtained = list(linter.intersection(line1[0], line1[1], line2[0], line2[1]))
expected = (True, True, Point(0.0, 0.0), np.nan, 'INTERSECT')
self.intersection_equal(obtained, expected)
line1 = np.array([[-13, 10], [10, -3]])
line2 = np.array([[-5, -11], [15, 10]])
obtained = list(linter.intersection(line1[0], line1[1], line2[0], line2[1]))
obtained[2] = Point(round(obtained[2].x, 2), round(obtained[2].y, 2))
expected = [True, True, Point(5.20, -0.29), np.nan, 'INTERSECT']
self.intersection_equal(obtained, expected)
# reversed order
line1 = np.array([[-1, 0], [1, 0]])
line2 = np.array([[0, 1], [0, -1]])
obtained = linter.intersection(line1[0], line1[1], line2[0], line2[1])
expected = (True, True, Point(0.0, 0.0), np.nan, 'INTERSECT')
self.intersection_equal(obtained, expected)
def test_intersection_intersect_no_common(self):
line1 = np.array([[-0.5, -0.5], [0.5, 0.25]])
line2 = np.array([[0.5, 1.0], [1, 2]])
obtained = list(linter.intersection(line1[0], line1[1], line2[0], line2[1]))
obtained[2] = Point(round(obtained[2].x, 2), round(obtained[2].y, 2))
expected = [True, False, Point(-0.1, -0.2), np.nan, 'INTERSECT']
self.intersection_equal(obtained, expected)
def test_intersection_parallel(self):
line1 = np.array([[0, 1], [1, 1]])
line2 = np.array([[-1, 0], [10, 0]])
obtained = linter.intersection(line1[0], line1[1], line2[0], line2[1])
expected = (False, False, np.nan, 1.0, 'PARALLEL')
self.intersection_equal(obtained, expected)
line1 = | np.array([[0, 1], [1, 1]]) | numpy.array |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian circuit operations"""
# pylint: disable=duplicate-code,attribute-defined-outside-init
import numpy as np
from thewalrus.quantum import Xmat
from . import ops
from ..shared_ops import changebasis
class GaussianModes:
""" Base class for representing and operating on a collection of
continuous variable modes in the symplectic basis as encoded in a
covariance matrix and a mean vector.
The modes are initialized in the (multimode) vacuum state,
The state of the modes is manipulated by calling the various methods."""
# pylint: disable=too-many-public-methods
def __init__(self, num_subsystems):
r"""The class is initialized by providing an integer indicating the number of modes
Unlike the "standard" covariance matrix for the Wigner function that uses symmetric ordering
as defined in e.g.
[1] Gaussian quantum information
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
Rev. Mod. Phys. 84, 621 – Published 1 May 2012
we define covariance matrices in terms of the following two quantities:
$$
N_{i,j} =\langle a_i^\dagger a_j \rangle
M_{i,j} = \langle a_i a_j \rangle
$$
Note that the matrix $N$ is hermitian and the matrix M is symmetric.
The mean displacements are stored as expectation values of the destruction operator $\alpha_i = \langle a_i \rangle$
We also provide functions that give the symmetric ordered covariance matrices and the mean displacement for the quadrature
operators $q = a+a^\dagger$ and $p = i(a^\dagger -a)$. Note that with these conventions $[q,p]=2 i$.
For vacuum one has $N_{i,j}=M_{i,j}=alpha_i =0$,
The quantities $N,M,\alpha$ are stored in the variable nmat, mmat, mean respectively
"""
# Check validity
if not isinstance(num_subsystems, int):
raise ValueError("Number of modes must be an integer")
self.hbar = 2
self.reset(num_subsystems)
def add_mode(self, n=1):
"""add mode to the circuit"""
newnlen = self.nlen + n
newnmat = np.zeros((newnlen, newnlen), dtype=complex)
newmmat = np.zeros((newnlen, newnlen), dtype=complex)
newmean = np.zeros(newnlen, dtype=complex)
newactive = list(np.arange(newnlen, dtype=int))
for i in range(self.nlen):
newmean[i] = self.mean[i]
newactive[i] = self.active[i]
for j in range(self.nlen):
newnmat[i, j] = self.nmat[i, j]
newmmat[i, j] = self.mmat[i, j]
self.mean = newmean
self.nmat = newnmat
self.mmat = newmmat
self.active = newactive
self.nlen = newnlen
def del_mode(self, modes):
""" delete mode from the circuit"""
if isinstance(modes, int):
modes = [modes]
for mode in modes:
if self.active[mode] is None:
raise ValueError("Cannot delete mode, mode does not exist")
self.loss(0.0, mode)
self.active[mode] = None
def reset(self, num_subsystems=None):
"""Resets the simulation state.
Args:
num_subsystems (int, optional): Sets the number of modes in the reset
circuit. None means unchanged.
"""
if num_subsystems is not None:
if not isinstance(num_subsystems, int):
raise ValueError("Number of modes must be an integer")
self.nlen = num_subsystems
self.nmat = np.zeros((self.nlen, self.nlen), dtype=complex)
self.mmat = np.zeros((self.nlen, self.nlen), dtype=complex)
self.mean = np.zeros(self.nlen, dtype=complex)
self.active = list(np.arange(self.nlen, dtype=int))
def get_modes(self):
"""return the modes currently active"""
return [x for x in self.active if x is not None]
def displace(self, r, phi, i):
""" Implements a displacement operation by the complex number `beta = r * np.exp(1j * phi)` in mode i"""
# Update displacement of mode i by the complex amount bet
if self.active[i] is None:
raise ValueError("Cannot displace mode, mode does not exist")
self.mean[i] += r * np.exp(1j * phi)
def squeeze(self, r, phi, k):
""" Implements a squeezing operation in mode k by the amount z = r*exp(1j*phi)."""
if self.active[k] is None:
raise ValueError("Cannot squeeze mode, mode does not exist")
phase = np.exp(1j * phi)
phase2 = phase * phase
sh = np.sinh(r)
ch = np.cosh(r)
sh2 = sh * sh
ch2 = ch * ch
shch = sh * ch
nk = np.copy(self.nmat[k])
mk = np.copy(self.mmat[k])
alphak = np.copy(self.mean[k])
# Update displacement of mode k
self.mean[k] = alphak * ch - phase * np.conj(alphak) * sh
# Update covariance matrix elements. Only the k column and row of nmat and mmat need to be updated.
# First update the diagonal elements
self.nmat[k, k] = (
sh2
- phase * shch * np.conj(mk[k])
- shch * np.conj(phase) * mk[k]
+ ch2 * nk[k]
+ sh2 * nk[k]
)
self.mmat[k, k] = (
-(phase * shch) + phase2 * sh2 * np.conj(mk[k]) + ch2 * mk[k] - 2 * phase * shch * nk[k]
)
# Update the column k
for l in np.delete(np.arange(self.nlen), k):
self.nmat[k, l] = -(sh * np.conj(phase) * mk[l]) + ch * nk[l]
self.mmat[k, l] = ch * mk[l] - phase * sh * nk[l]
# Update row k
self.nmat[:, k] = np.conj(self.nmat[k])
self.mmat[:, k] = self.mmat[k]
def phase_shift(self, phi, k):
""" Implements a phase shift in mode k by the amount phi."""
if self.active[k] is None:
raise ValueError("Cannot phase shift mode, mode does not exist")
phase = np.exp(1j * phi)
phase2 = phase * phase
# Update displacement of mode k
self.mean[k] = self.mean[k] * phase
# Update covariance matrix elements. Only the k column and row of nmat and mmat need to be updated.
# First update the diagonal elements
self.mmat[k][k] = phase2 * self.mmat[k][k]
# Update the column k
for l in np.delete(np.arange(self.nlen), k):
self.nmat[k][l] = np.conj(phase) * self.nmat[k][l]
self.mmat[k][l] = phase * self.mmat[k][l]
# Update row k
self.nmat[:, k] = np.conj(self.nmat[k])
self.mmat[:, k] = self.mmat[k]
def beamsplitter(self, theta, phi, k, l):
""" Implements a beam splitter operation between modes k and l by the amount theta, phi"""
if self.active[k] is None or self.active[l] is None:
raise ValueError("Cannot perform beamsplitter, mode(s) do not exist")
if k == l:
raise ValueError("Cannot use the same mode for beamsplitter inputs")
phase = np.exp(1j * phi)
phase2 = phase * phase
sh = np.sin(theta)
ch = np.cos(theta)
sh2 = sh * sh
ch2 = ch * ch
shch = sh * ch
# alpha1 = self.mean[0]
nk = np.copy(self.nmat[k])
mk = np.copy(self.mmat[k])
nl = np.copy(self.nmat[l])
ml = np.copy(self.mmat[l])
# Update displacement of mode k and l
alphak = np.copy(self.mean[k])
alphal = np.copy(self.mean[l])
self.mean[k] = ch * alphak + phase * sh * alphal
self.mean[l] = ch * alphal - np.conj(phase) * sh * alphak
# Update covariance matrix elements. Only the k and l columns and rows of nmat and mmat need to be updated.
# First update the (k,k), (k,l), (l,l), and (l,l) elements
self.nmat[k][k] = (
ch2 * nk[k] + phase * shch * nk[l] + shch * np.conj(phase) * nl[k] + sh2 * nl[l]
)
self.nmat[k][l] = (
-(shch * np.conj(phase) * nk[k])
+ ch2 * nk[l]
- sh2 * np.conj(phase2) * nl[k]
+ shch * np.conj(phase) * nl[l]
)
self.nmat[l][k] = np.conj(self.nmat[k][l])
self.nmat[l][l] = (
sh2 * nk[k] - phase * shch * nk[l] - shch * np.conj(phase) * nl[k] + ch2 * nl[l]
)
self.mmat[k][k] = ch2 * mk[k] + 2 * phase * shch * ml[k] + phase2 * sh2 * ml[l]
self.mmat[k][l] = (
-(shch * np.conj(phase) * mk[k]) + ch2 * ml[k] - sh2 * ml[k] + phase * shch * ml[l]
)
self.mmat[l][k] = self.mmat[k][l]
self.mmat[l][l] = (
sh2 * np.conj(phase2) * mk[k] - 2 * shch * np.conj(phase) * ml[k] + ch2 * ml[l]
)
# Update columns k and l
for i in np.delete(np.arange(self.nlen), (k, l)):
self.nmat[k][i] = ch * nk[i] + sh * np.conj(phase) * nl[i]
self.mmat[k][i] = ch * mk[i] + phase * sh * ml[i]
self.nmat[l][i] = -(phase * sh * nk[i]) + ch * nl[i]
self.mmat[l][i] = -(sh * np.conj(phase) * mk[i]) + ch * ml[i]
# Update rows k and l
self.nmat[:, k] = np.conj(self.nmat[k])
self.mmat[:, k] = self.mmat[k]
self.nmat[:, l] = np.conj(self.nmat[l])
self.mmat[:, l] = self.mmat[l]
def scovmatxp(self):
r"""Constructs and returns the symmetric ordered covariance matrix in the xp ordering.
The order for the canonical operators is :math:`q_1,..,q_n, p_1,...,p_n`.
This differs from the ordering used in [1] which is :math:`q_1,p_1,q_2,p_2,...,q_n,p_n`
Note that one ordering can be obtained from the other by using a permutation matrix.
Said permutation matrix is implemented in the function changebasis(n) where n is
the number of modes.
"""
mm11 = (
self.nmat
+ np.transpose(self.nmat)
+ self.mmat
+ np.conj(self.mmat)
+ np.identity(self.nlen)
)
mm12 = 1j * (
-np.transpose(self.mmat)
+ np.transpose(np.conj(self.mmat))
+ np.transpose(self.nmat)
- self.nmat
)
mm22 = (
self.nmat
+ np.transpose(self.nmat)
- self.mmat
- np.conj(self.mmat)
+ np.identity(self.nlen)
)
return np.concatenate(
(
np.concatenate((mm11, mm12), axis=1),
np.concatenate((np.transpose(mm12), mm22), axis=1),
),
axis=0,
).real
def smeanxp(self):
r"""Constructs and returns the symmetric ordered vector of mean in the xp ordering.
The order for the canonical operators is :math:`q_1, \ldots, q_n, p_1, \ldots, p_n`.
This differs from the ordering used in [1] which is :math:`q_1, p_1, q_2, p_2, \ldots, q_n, p_n`.
Note that one ordering can be obtained from the other by using a permutation matrix.
Said permutation matrix is implemented in the function changebasis(n) where n is
the number of modes.
"""
nmodes = self.nlen
r = np.empty(2 * nmodes)
r[0:nmodes] = 2 * self.mean.real
r[nmodes : 2 * nmodes] = 2 * self.mean.imag
return r
def scovmat(self):
"""Constructs and returns the symmetric ordered covariance matrix as defined in [1]
"""
rotmat = changebasis(self.nlen)
return np.dot(np.dot(rotmat, self.scovmatxp()), np.transpose(rotmat))
def smean(self):
r"""the symmetric mean $[q_1,p_1,q_2,p_2,...,q_n,p_n]$"""
r = np.empty(2 * self.nlen)
for i in range(self.nlen):
r[2 * i] = 2 * self.mean[i].real
r[2 * i + 1] = 2 * self.mean[i].imag
return r
def fromsmean(self, r, modes=None):
r"""Populates the means from a provided vector of means with hbar=2 assumed.
Args:
r (array): vector of means in :math:`(x_1,p_1,x_2,p_2,\dots)` ordering
modes (Sequence): sequence of modes corresponding to the vector of means
"""
mode_list = modes
if modes is None:
mode_list = range(self.nlen)
for idx, mode in enumerate(mode_list):
self.mean[mode] = 0.5 * (r[2 * idx] + 1j * r[2 * idx + 1])
def fromscovmat(self, V, modes=None):
r"""Updates the circuit's state when a standard covariance matrix is provided.
Args:
V (array): covariance matrix in symmetric ordering
modes (Sequence): sequence of modes corresponding to the covariance matrix
"""
if modes is None:
n = len(V) // 2
modes = np.arange(self.nlen)
if n != self.nlen:
raise ValueError(
"Covariance matrix is the incorrect size, does not match means vector."
)
else:
n = len(modes)
modes = np.array(modes)
if n > self.nlen:
raise ValueError("Covariance matrix is larger than the number of subsystems.")
# convert to xp ordering
rotmat = changebasis(n)
VV = np.dot(np.dot(np.transpose(rotmat), V), rotmat)
A = VV[0:n, 0:n]
B = VV[0:n, n : 2 * n]
C = VV[n : 2 * n, n : 2 * n]
Bt = np.transpose(B)
if n < self.nlen:
# reset modes to be prepared back to the vacuum state
for mode in modes:
self.loss(0.0, mode)
rows = modes.reshape(-1, 1)
cols = modes.reshape(1, -1)
self.nmat[rows, cols] = 0.25 * (A + C + 1j * (B - Bt) - 2 * np.identity(n))
self.mmat[rows, cols] = 0.25 * (A - C + 1j * (B + Bt))
def qmat(self, modes=None):
""" Construct the covariance matrix for the Q function"""
if modes is None:
modes = list(range(self.nlen))
rows = np.reshape(modes, [-1, 1])
cols = np.reshape(modes, [1, -1])
sigmaq = np.concatenate(
(
np.concatenate(
(self.nmat[rows, cols], np.conjugate(self.mmat[rows, cols])), axis=1
),
np.concatenate(
(self.mmat[rows, cols], np.conjugate(self.nmat[rows, cols])), axis=1
),
),
axis=0,
) + np.identity(2 * len(modes))
return sigmaq
def fidelity_coherent(self, alpha, modes=None):
""" Returns a function that evaluates the Q function of the given state """
if modes is None:
modes = list(range(self.nlen))
Q = self.qmat(modes)
Qi = np.linalg.inv(Q)
delta = self.mean[modes] - alpha
delta = np.concatenate((delta, np.conjugate(delta)))
return np.sqrt(np.linalg.det(Qi).real) * np.exp(
-0.5 * np.dot(delta, np.dot(Qi, np.conjugate(delta))).real
)
def fidelity_vacuum(self, modes=None):
"""fidelity of the current state with the vacuum state"""
if modes is None:
modes = list(range(self.nlen))
alpha = np.zeros(len(modes))
return self.fidelity_coherent(alpha)
def Amat(self):
""" Constructs the A matrix from Hamilton's paper"""
######### this needs to be conjugated
sigmaq = np.concatenate(
(
np.concatenate((np.transpose(self.nmat), self.mmat), axis=1),
np.concatenate((np.transpose(np.conjugate(self.mmat)), self.nmat), axis=1),
),
axis=0,
) + np.identity(2 * self.nlen)
return np.dot(Xmat(self.nlen), np.identity(2 * self.nlen) - np.linalg.inv(sigmaq))
def loss(self, T, k):
r"""Implements a loss channel in mode k by amplitude loss amount \sqrt{T}
(energy loss amount T)"""
if self.active[k] is None:
raise ValueError("Cannot apply loss channel, mode does not exist")
sqrtT = np.sqrt(T)
self.nmat[k] = sqrtT * self.nmat[k]
self.mmat[k] = sqrtT * self.mmat[k]
self.nmat[k][k] = sqrtT * self.nmat[k][k]
self.mmat[k][k] = sqrtT * self.mmat[k][k]
self.nmat[:, k] = np.conj(self.nmat[k])
self.mmat[:, k] = self.mmat[k]
self.mean[k] = sqrtT * self.mean[k]
def thermal_loss(self, T, nbar, k):
r""" Implements the thermal loss channel in mode k by amplitude loss amount \sqrt{T}
unlike the loss channel, here the ancilliary mode that goes into the second arm of the
beam splitter is prepared in a thermal state with mean photon number nth """
if self.active[k] is None:
raise ValueError("Cannot apply loss channel, mode does not exist")
self.loss(T, k)
self.nmat += (1 - T) * nbar
def init_thermal(self, population, mode):
""" Initializes a state of mode in a thermal state with the given population"""
self.loss(0.0, mode)
self.nmat[mode][mode] = population
def is_vacuum(self, tol=0.0):
""" Checks if the state is vacuum by calculating its fidelity with vacuum """
fid = self.fidelity_vacuum()
return np.abs(fid - 1) <= tol
def measure_dyne(self, covmat, indices, shots=1):
""" Performs the general-dyne measurement specified in covmat, the indices should correspond
with the ordering of the covmat of the measurement
covmat specifies a gaussian effect via its covariance matrix. For more information see
Quantum Continuous Variables: A Primer of Theoretical Methods
by <NAME> page 129
"""
if covmat.shape != (2 * len(indices), 2 * len(indices)):
raise ValueError("Covariance matrix size does not match indices provided")
for i in indices:
if self.active[i] is None:
raise ValueError("Cannot apply homodyne measurement, mode does not exist")
expind = np.concatenate((2 * np.array(indices), 2 * np.array(indices) + 1))
mp = self.scovmat()
(A, B, C) = ops.chop_in_blocks(mp, expind)
V = A - np.dot(np.dot(B, np.linalg.inv(C + covmat)), np.transpose(B))
V1 = ops.reassemble(V, expind)
self.fromscovmat(V1)
r = self.smean()
(va, vc) = ops.chop_in_blocks_vector(r, expind)
vm = np.random.multivariate_normal(vc, C, size=shots)
# The next line is a hack in that it only updates conditioned on the first samples value
# should still work if shots = 1
va = va + np.dot(np.dot(B, np.linalg.inv(C + covmat)), vm[0] - vc)
va = ops.reassemble_vector(va, expind)
self.fromsmean(va)
return vm
def homodyne(self, n, shots=1, eps=0.0002):
""" Performs a homodyne measurement by calling measure dyne an giving it the
covariance matrix of a squeezed state whose x quadrature has variance eps**2"""
covmat = np.diag(np.array([eps ** 2, 1.0 / eps ** 2]))
res = self.measure_dyne(covmat, [n], shots=shots)
return res
def post_select_homodyne(self, n, val, eps=0.0002):
""" Performs a homodyne measurement but postelecting on the value vals for mode n """
if self.active[n] is None:
raise ValueError("Cannot apply homodyne measurement, mode does not exist")
covmat = np.diag(np.array([eps ** 2, 1.0 / eps ** 2]))
indices = [n]
expind = np.concatenate((2 * np.array(indices), 2 * np.array(indices) + 1))
mp = self.scovmat()
(A, B, C) = ops.chop_in_blocks(mp, expind)
V = A - np.dot(np.dot(B, np.linalg.inv(C + covmat)), np.transpose(B))
V1 = ops.reassemble(V, expind)
self.fromscovmat(V1)
r = self.smean()
(va, vc) = ops.chop_in_blocks_vector(r, expind)
vm1 = np.random.normal(vc[1], np.sqrt(C[1][1]))
vm = np.array([val, vm1])
va = va + np.dot(np.dot(B, np.linalg.inv(C + covmat)), vm - vc)
va = ops.reassemble_vector(va, expind)
self.fromsmean(va)
return val
def post_select_heterodyne(self, n, alpha_val):
""" Performs a homodyne measurement but postelecting on the value vals for mode n """
if self.active[n] is None:
raise ValueError("Cannot apply heterodyne measurement, mode does not exist")
covmat = np.identity(2)
indices = [n]
expind = np.concatenate((2 * np.array(indices), 2 * np.array(indices) + 1))
mp = self.scovmat()
(A, B, C) = ops.chop_in_blocks(mp, expind)
V = A - np.dot(np.dot(B, np.linalg.inv(C + covmat)), np.transpose(B))
V1 = ops.reassemble(V, expind)
self.fromscovmat(V1)
r = self.smean()
(va, vc) = ops.chop_in_blocks_vector(r, expind)
vm = 2.0 * np.array([np.real(alpha_val), np.imag(alpha_val)])
va = va + np.dot(np.dot(B, np.linalg.inv(C + covmat)), vm - vc)
va = ops.reassemble_vector(va, expind)
self.fromsmean(va)
return alpha_val
def apply_u(self, U):
""" Transforms the state according to the linear optical unitary that maps a[i] \to U[i, j]^*a[j]"""
self.mean = np.dot(np.conj(U), self.mean)
self.nmat = np.dot(np.dot(U, self.nmat), np.conj(np.transpose(U)))
self.mmat = np.dot(np.dot( | np.conj(U) | numpy.conj |
from typing import List
import numpy as np
from numpy import sqrt
Gx_0 = np.array([
[0],
])
Gx_1 = np.array([
[0, 0, 0],
[0, 0, -1],
[0, 1, 0],
])
Gx_2 = np.array([
[0, 1, 0, 0, 0],
[-1, 0, 0, 0, 0],
[0, 0, 0, -sqrt(3), 0],
[0, 0, sqrt(3), 0, -1],
[0, 0, 0, 1, 0],
])
Gx_3 = np.array([
[0, sqrt(6)/2, 0, 0, 0, 0, 0],
[-sqrt(6)/2, 0, sqrt(10)/2, 0, 0, 0, 0],
[0, -sqrt(10)/2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -sqrt(6), 0, 0],
[0, 0, 0, sqrt(6), 0, -sqrt(10)/2, 0],
[0, 0, 0, 0, sqrt(10)/2, 0, -sqrt(6)/2],
[0, 0, 0, 0, 0, sqrt(6)/2, 0],
])
Gx_4 = np.array([
[0, sqrt(2), 0, 0, 0, 0, 0, 0, 0],
[-sqrt(2), 0, sqrt(14)/2, 0, 0, 0, 0, 0, 0],
[0, -sqrt(14)/2, 0, 3*sqrt(2)/2, 0, 0, 0, 0, 0],
[0, 0, -3*sqrt(2)/2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -sqrt(10), 0, 0, 0],
[0, 0, 0, 0, sqrt(10), 0, -3*sqrt(2)/2, 0, 0],
[0, 0, 0, 0, 0, 3*sqrt(2)/2, 0, -sqrt(14)/2, 0],
[0, 0, 0, 0, 0, 0, sqrt(14)/2, 0, -sqrt(2)],
[0, 0, 0, 0, 0, 0, 0, sqrt(2), 0],
])
Gx_5 = np.array([
[0, sqrt(10)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(10)/2, 0, 3*sqrt(2)/2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -3*sqrt(2)/2, 0, sqrt(6), 0, 0, 0, 0, 0, 0, 0],
[0, 0, -sqrt(6), 0, sqrt(7), 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(7), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -sqrt(15), 0, 0, 0, 0],
[0, 0, 0, 0, 0, sqrt(15), 0, -sqrt(7), 0, 0, 0],
[0, 0, 0, 0, 0, 0, sqrt(7), 0, -sqrt(6), 0, 0],
[0, 0, 0, 0, 0, 0, 0, sqrt(6), 0, -3*sqrt(2)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(2)/2, 0, -sqrt(10)/2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(10)/2, 0],
])
Gx_6 = np.array([
[0, sqrt(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[- | sqrt(3) | numpy.sqrt |
# MIT License
# Copyright (C) <NAME>-<NAME> (taoyil AT UCI EDU)
import numpy as np
class RotationalDataQueue(list):
def head_updated_callback(self):
pass
def __init__(self, window_size=10):
self._i = 0
self.window_size = window_size
super(RotationalDataQueue, self).__init__()
@property
def non_empty(self):
return sum([1 if d is not None else 0 for d in self])
def sort_time(self):
self.sort(key=lambda x: x.time)
@property
def time(self):
return np.array([d.time for d in self])
@property
def duration(self):
return | np.max(self.time) | numpy.max |
import torch
import matplotlib.pyplot as plt
import numpy as np
from torchvision.utils import make_grid
device = 'cuda' if torch.cuda.is_available() else 'cpu'
plt.interactive(False)
def show(img):
npimg = img.numpy()
plt.imshow( | np.transpose(npimg, (1, 2, 0)) | numpy.transpose |
"""
采用 BERT + BILSTM + CRF 网络进行处理
"""
import json
import jieba
from django.db.models import Q
from algo.model.model import CustomModel
from algo.model.model_config import BertBilstmCrfConfig
from keras.models import Model
from keras.layers import Bidirectional, LSTM, Dense, Dropout
from keras.optimizers import Adam
from keras_contrib.layers import CRF
import keras_bert
import os
from algo.models import NerData
# 获取词典
unk_flag = '[UNK]'
pad_flag = '[PAD]'
cls_flag = '[CLS]'
sep_flag = '[SEP]'
class BertBilstmCrf(CustomModel):
def __init__(self,
vocab_size: int,
n_class: int,
max_len: int = 100,
embedding_dim: int = 128,
rnn_units: int = 128,
drop_rate: float = 0.5,
):
self.vocab_size = vocab_size
self.n_class = n_class
self.max_len = max_len
self.embedding_dim = embedding_dim
self.rnn_units = rnn_units
self.drop_rate = drop_rate
self.config_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'bert_config.json')
self.check_point_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'bert_model.ckpt')
self.dict_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'vocab.txt')
self.epochs = 15
self.w2i = get_w2i() # word to index
self.one_hot = True
self.unk_index = self.w2i.get(unk_flag, 101)
self.pad_index = self.w2i.get(pad_flag, 1)
self.cls_index = self.w2i.get(cls_flag, 102)
self.sep_index = self.w2i.get(sep_flag, 103)
self.tag2index = get_tag2index() # tag to index
self.tag2index = get_tag2index() # tag to index
self.tag_size = len(self.tag2index)
def precess_data(self):
# 从数据库读取
queryset = NerData.objects.filter(~Q(human_tag=None))
# poses=[{"begin": 2, "end": 3, "pos": "LOC"}]
sentences = []
tags = []
for q in queryset:
sentence = q['text']
poses = json.loads(q['human_label'])
# 整理标注数据
tag = ['O'] * len(sentence)
for pos in poses:
begin = int(pos['begin'])
end = int(pos['end'])
pos_tag = pos['pos']
tag[begin] = f"B-{pos_tag}"
if end > begin:
tag[begin+1:end] = (end-begin-1) * [f"I-{pos_tag}"]
tags.append(tag)
sentences.append(sentence)
# 转化
data = self.data_to_index(sentences)
label = self.label_to_index(tags)
# 进行 one-hot处理
if self.one_hot:
def label_to_one_hot(index: []) -> []:
data = []
for line in index:
data_line = []
for i, index in enumerate(line):
line_line = [0]*self.tag_size
line_line[index] = 1
data_line.append(line_line)
data.append(data_line)
return np.array(data)
data_label = label_to_one_hot(index=label)
else:
data_label = np.expand_dims(label, 2)
train_data_proportion = 0.8
num = len(data[0])
self.train_data = [data[0][:, int(train_data_proportion*num):], data[1][:, int(train_data_proportion*num):]]
self.train_label = data_label[:, int(train_data_proportion*num):]
self.test_data = [data[0][:, :int(train_data_proportion*num)], data[1][:, :int(train_data_proportion*num)]]
self.test_label = data_label[:, :int(train_data_proportion*num)]
def label_to_index(self, tags):
"""
将训练数据x转化为index
:return:
"""
label_ids = []
line_label = []
for tag in tags:
for t in tag:
# bert 需要输入index和types 由于我们这边都是只有一句的,所以type都为0
t_index = self.tag2index.get(t, 0)
line_label.append(t_index) # label index
max_len_buff = self.max_len-2
if len(line_label) > max_len_buff: # 先进行截断
line_label = line_label[:max_len_buff]
line_label = [0] + line_label + [0]
# padding
if len(line_label) < self.max_len: # 填充到最大长度
pad_num = self.max_len - len(line_label)
line_label = [0] * pad_num + line_label
label_ids.append(np.array(line_label))
line_label = []
return np.array(label_ids)
def data_to_index(self, sentences):
"""
将训练数据x转化为index
:return:
"""
data_ids = []
data_types = []
line_data_ids = []
line_data_types = []
for sentence in sentences:
for w in sentence:
# bert 需要输入index和types 由于我们这边都是只有一句的,所以type都为0
w_index = self.w2i.get(w, self.unk_index)
line_data_ids.append(w_index) # index
line_data_types.append(0) # types
max_len_buff = self.max_len-2
if len(line_data_ids) > max_len_buff: # 先进行截断
line_data_ids = line_data_ids[:max_len_buff]
line_data_types = line_data_types[:max_len_buff]
line_data_ids = [self.cls_index] + line_data_ids + [self.sep_index]
line_data_types = [0] + line_data_types + [0]
# padding
if len(line_data_ids) < self.max_len: # 填充到最大长度
pad_num = self.max_len - len(line_data_ids)
line_data_ids = [self.pad_index]*pad_num + line_data_ids
line_data_types = [0] * pad_num + line_data_types
data_ids.append(np.array(line_data_ids))
data_types.append(np.array(line_data_types))
line_data_ids = []
line_data_types = []
return [ | np.array(data_ids) | numpy.array |
"""
pymc.distributions
A collection of common probability distributions. The objects associated
with a distribution called 'dist' are:
dist_like : function
The log-likelihood function corresponding to dist. PyMC's convention
is to sum the log-likelihoods of multiple input values, so all
log-likelihood functions return a single float.
rdist : function
The random variate generator corresponding to dist. These take a
'size' argument indicating how many variates should be generated.
dist_expval : function
Computes the expected value of a dist-distributed variable.
Dist : Stochastic subclass
Instances have dist_like as their log-probability function
and rdist as their random function.
"""
#-------------------------------------------------------------------
# Decorate fortran functions from pymc.flib to ease argument passing
#-------------------------------------------------------------------
# TODO: Add exponweib_expval
# TODO: categorical, mvhypergeometric
# TODO: __all__
__docformat__ = 'reStructuredText'
from . import flib, utils
import numpy as np
# from scipy.stats.kde import gaussian_kde
import scipy.stats as stats
gaussian_kde = stats.kde
from .Node import ZeroProbability
from .PyMCObjects import Stochastic, Deterministic
from .CommonDeterministics import Lambda
from numpy import pi, inf
import itertools
import pdb
from . import utils
import warnings
from . import six
from .six import print_
xrange = six.moves.xrange
def poiscdf(a, x):
x = np.atleast_1d(x)
a = np.resize(a, x.shape)
values = np.array([flib.gammq(b, y) for b, y in zip(a.ravel(), x.ravel())])
return values.reshape(x.shape)
# Import utility functions
import inspect
import types
from copy import copy
random_number = np.random.random
inverse = np.linalg.pinv
sc_continuous_distributions = ['beta', 'cauchy', 'chi2',
'degenerate', 'exponential', 'exponweib',
'gamma', 'half_cauchy', 'half_normal',
'inverse_gamma', 'laplace', 'logistic',
'lognormal', 'noncentral_t', 'normal',
'pareto', 't', 'truncated_pareto', 'uniform',
'weibull', 'skew_normal', 'truncated_normal',
'von_mises']
sc_bool_distributions = ['bernoulli']
sc_discrete_distributions = ['binomial', 'betabin', 'geometric', 'poisson',
'negative_binomial', 'categorical', 'hypergeometric',
'discrete_uniform', 'truncated_poisson']
sc_nonnegative_distributions = ['bernoulli', 'beta', 'betabin', 'binomial', 'chi2', 'exponential',
'exponweib', 'gamma', 'half_cauchy', 'half_normal',
'hypergeometric', 'inverse_gamma', 'lognormal',
'weibull']
mv_continuous_distributions = ['dirichlet', 'mv_normal',
'mv_normal_cov', 'mv_normal_chol', 'wishart',
'wishart_cov']
mv_discrete_distributions = ['multivariate_hypergeometric', 'multinomial']
mv_nonnegative_distributions = ['dirichlet', 'wishart',
'wishart_cov', 'multivariate_hypergeometric',
'multinomial']
availabledistributions = (sc_continuous_distributions +
sc_bool_distributions +
sc_discrete_distributions +
mv_continuous_distributions +
mv_discrete_distributions)
# Changes lower case, underscore-separated names into "Class style"
# capitalized names For example, 'negative_binomial' becomes
# 'NegativeBinomial'
capitalize = lambda name: ''.join([s.capitalize() for s in name.split('_')])
# ==============================================================================
# User-accessible function to convert a logp and random function to a
# Stochastic subclass.
# ==============================================================================
# TODO Document this function
def bind_size(randfun, shape):
def newfun(*args, **kwargs):
try:
return np.reshape(randfun(size=shape, *args, **kwargs), shape)
except ValueError:
# Account for non-array return values
return randfun(size=shape, *args, **kwargs)
newfun.scalar_version = randfun
return newfun
def new_dist_class(*new_class_args):
"""
Returns a new class from a distribution.
:Parameters:
dtype : numpy dtype
The dtype values of instances of this class.
name : string
Name of the new class.
parent_names : list of strings
The labels of the parents of this class.
parents_default : list
The default values of parents.
docstr : string
The docstring of this class.
logp : function
The log-probability function for this class.
random : function
The random function for this class.
mv : boolean
A flag indicating whether this class represents array-valued
variables.
.. note::
stochastic_from_dist provides a higher-level version.
stochastic_from_data is suited for non-parametric distributions.
:SeeAlso:
stochastic_from_dist, stochastic_from_data
"""
(dtype,
name,
parent_names,
parents_default,
docstr,
logp,
random,
mv,
logp_partial_gradients) = new_class_args
class new_class(Stochastic):
__doc__ = docstr
def __init__(self, *args, **kwds):
(dtype,
name,
parent_names,
parents_default,
docstr,
logp,
random,
mv,
logp_partial_gradients) = new_class_args
parents = parents_default
# Figure out what argument names are needed.
arg_keys = [
'name',
'parents',
'value',
'observed',
'size',
'trace',
'rseed',
'doc',
'debug',
'plot',
'verbose']
arg_vals = [
None, parents, None, False, None, True, True, None, False, None, -1]
if 'isdata' in kwds:
warnings.warn(
'"isdata" is deprecated, please use "observed" instead.')
kwds['observed'] = kwds['isdata']
pass
# No size argument allowed for multivariate distributions.
if mv:
arg_keys.pop(4)
arg_vals.pop(4)
arg_dict_out = dict(zip(arg_keys, arg_vals))
args_needed = ['name'] + parent_names + arg_keys[2:]
# Sort positional arguments
for i in xrange(len(args)):
try:
k = args_needed.pop(0)
if k in parent_names:
parents[k] = args[i]
else:
arg_dict_out[k] = args[i]
except:
raise ValueError(
'Too many positional arguments provided. Arguments for class ' +
self.__class__.__name__ +
' are: ' +
str(args_needed))
# Sort keyword arguments
for k in args_needed:
if k in parent_names:
try:
parents[k] = kwds.pop(k)
except:
if k in parents_default:
parents[k] = parents_default[k]
else:
raise ValueError('No value given for parent ' + k)
elif k in arg_dict_out.keys():
try:
arg_dict_out[k] = kwds.pop(k)
except:
pass
# Remaining unrecognized arguments raise an error.
if len(kwds) > 0:
raise TypeError('Keywords ' +
str(kwds.keys()) +
' not recognized. Arguments recognized are ' +
str(args_needed))
# Determine size desired for scalar variables.
# Notes
# -----
# Case | init_val | parents | size | value.shape | bind size
# ------------------------------------------------------------------
# 1.1 | None | scalars | None | 1 | 1
# 1.2 | None | scalars | n | n | n
# 1.3 | None | n | None | n | 1
# 1.4 | None | n | n(m) | n (Error) | 1 (-)
# 2.1 | scalar | scalars | None | 1 | 1
# 2.2 | scalar | scalars | n | n | n
# 2.3 | scalar | n | None | n | 1
# 2.4 | scalar | n | n(m) | n (Error) | 1 (-)
# 3.1 | n | scalars | None | n | n
# 3.2 | n | scalars | n(m) | n (Error) | n (-)
# 3.3 | n | n | None | n | 1
# 3.4 | n | n | n(m) | n (Error) | 1 (-)
if not mv:
shape = arg_dict_out.pop('size')
shape = None if shape is None else tuple(np.atleast_1d(shape))
init_val = arg_dict_out['value']
init_val_shape = None if init_val is None else np.shape(
init_val)
if len(parents) > 0:
pv = [np.shape(utils.value(v)) for v in parents.values()]
biggest_parent = np.argmax(
[(np.prod(v) if v else 0) for v in pv])
parents_shape = pv[biggest_parent]
# Scalar parents can support any shape.
if np.prod(parents_shape) < 1:
parents_shape = None
else:
parents_shape = None
def shape_error():
raise ValueError(
'Shapes are incompatible: value %s, largest parent %s, shape argument %s' %
(shape, init_val_shape, parents_shape))
if init_val_shape is not None and shape is not None and init_val_shape != shape:
shape_error()
given_shape = init_val_shape or shape
bindshape = given_shape or parents_shape
# Check consistency of bindshape and parents_shape
if parents_shape is not None:
# Uncomment to leave broadcasting completely up to NumPy's random functions
# if bindshape[-np.alen(parents_shape):]!=parents_shape:
# Uncomment to limit broadcasting flexibility to what the
# Fortran likelihoods can handle.
if bindshape < parents_shape:
shape_error()
if random is not None:
random = bind_size(random, bindshape)
elif 'size' in kwds.keys():
raise ValueError(
'No size argument allowed for multivariate stochastic variables.')
# Call base class initialization method
if arg_dict_out.pop('debug'):
logp = debug_wrapper(logp)
random = debug_wrapper(random)
else:
Stochastic.__init__(
self,
logp=logp,
random=random,
logp_partial_gradients=logp_partial_gradients,
dtype=dtype,
**arg_dict_out)
new_class.__name__ = name
new_class.parent_names = parent_names
new_class.parents_default = parents_default
new_class.dtype = dtype
new_class.mv = mv
new_class.raw_fns = {'logp': logp, 'random': random}
return new_class
def stochastic_from_dist(
name, logp, random=None, logp_partial_gradients={}, dtype=np.float, mv=False):
"""
Return a Stochastic subclass made from a particular distribution.
:Parameters:
name : string
The name of the new class.
logp : function
The log-probability function.
random : function
The random function
dtype : numpy dtype
The dtype of values of instances.
mv : boolean
A flag indicating whether this class represents
array-valued variables.
:Example:
>>> Exponential = stochastic_from_dist('exponential',
logp=exponential_like,
random=rexponential,
dtype=np.float,
mv=False)
>>> A = Exponential(self_name, value, beta)
.. note::
new_dist_class is a more flexible class factory. Also consider
subclassing Stochastic directly.
stochastic_from_data is suited for non-parametric distributions.
:SeeAlso:
new_dist_class, stochastic_from_data
"""
(args, varargs, varkw, defaults) = inspect.getargspec(logp)
parent_names = args[1:]
try:
parents_default = dict(zip(args[-len(defaults):], defaults))
except TypeError: # No parents at all.
parents_default = {}
name = capitalize(name)
# Build docstring from distribution
parents_str = ''
if parent_names:
parents_str = ', '.join(parent_names) + ', '
docstr = name[0] + ' = ' + name + \
'(name, ' + parents_str + 'value=None, observed=False,'
if not mv:
docstr += ' size=1,'
docstr += ' trace=True, rseed=True, doc=None, verbose=-1, debug=False)\n\n'
docstr += 'Stochastic variable with ' + name + \
' distribution.\nParents are: ' + ', '.join(parent_names) + '.\n\n'
docstr += 'Docstring of log-probability function:\n'
try:
docstr += logp.__doc__
except TypeError:
pass # This will happen when logp doesn't have a docstring
logp = valuewrapper(logp)
distribution_arguments = logp.__dict__
wrapped_logp_partial_gradients = {}
for parameter, func in six.iteritems(logp_partial_gradients):
wrapped_logp_partial_gradients[parameter] = valuewrapper(
logp_partial_gradients[parameter],
arguments=distribution_arguments)
return new_dist_class(dtype, name, parent_names, parents_default, docstr,
logp, random, mv, wrapped_logp_partial_gradients)
def stochastic_from_data(name, data, lower=-np.inf, upper=np.inf,
value=None, observed=False, trace=True, verbose=-1, debug=False):
"""
Return a Stochastic subclass made from arbitrary data.
The histogram for the data is fitted with Kernel Density Estimation.
:Parameters:
- `data` : An array with samples (e.g. trace[:])
- `lower` : Lower bound on possible outcomes
- `upper` : Upper bound on possible outcomes
:Example:
>>> from pymc import stochastic_from_data
>>> pos = stochastic_from_data('posterior', posterior_samples)
>>> prior = pos # update the prior with arbitrary distributions
:Alias:
Histogram
"""
pdf = gaussian_kde(data) # automatic bandwidth selection
# account for tail contribution
lower_tail = upper_tail = 0.
if lower > -np.inf:
lower_tail = pdf.integrate_box(-np.inf, lower)
if upper < np.inf:
upper_tail = pdf.integrate_box(upper, np.inf)
factor = 1. / (1. - (lower_tail + upper_tail))
def logp(value):
prob = factor * pdf(value)
if value < lower or value > upper:
return -np.inf
elif prob <= 0.:
return -np.inf
else:
return np.log(prob)
def random():
res = pdf.resample(1)[0][0]
while res < lower or res > upper:
res = pdf.resample(1)[0][0]
return res
if value is None:
value = random()
return Stochastic(logp=logp,
doc='Non-parametric density with Gaussian Kernels.',
name=name,
parents={},
random=random,
trace=trace,
value=value,
dtype=float,
observed=observed,
verbose=verbose)
# Alias following Stochastics naming convention
Histogram = stochastic_from_data
#-------------------------------------------------------------
# Light decorators
#-------------------------------------------------------------
def randomwrap(func):
"""
Decorator for random value generators
Allows passing of sequence of parameters, as well as a size argument.
Convention:
- If size=1 and the parameters are all scalars, return a scalar.
- If size=1, the random variates are 1D.
- If the parameters are scalars and size > 1, the random variates are 1D.
- If size > 1 and the parameters are sequences, the random variates are
aligned as (size, max(length)), where length is the parameters size.
:Example:
>>> rbernoulli(.1)
0
>>> rbernoulli([.1,.9])
np.asarray([0, 1])
>>> rbernoulli(.9, size=2)
np.asarray([1, 1])
>>> rbernoulli([.1,.9], 2)
np.asarray([[0, 1],
[0, 1]])
"""
# Find the order of the arguments.
refargs, varargs, varkw, defaults = inspect.getargspec(func)
# vfunc = np.vectorize(self.func)
npos = len(refargs) - len(defaults) # Number of pos. arg.
nkwds = len(defaults) # Number of kwds args.
mv = func.__name__[
1:] in mv_continuous_distributions + mv_discrete_distributions
# Use the NumPy random function directly if this is not a multivariate
# distribution
if not mv:
return func
def wrapper(*args, **kwds):
# First transform keyword arguments into positional arguments.
n = len(args)
if nkwds > 0:
args = list(args)
for i, k in enumerate(refargs[n:]):
if k in kwds.keys():
args.append(kwds[k])
else:
args.append(defaults[n - npos + i])
r = []
s = []
largs = []
nr = args[-1]
length = [np.atleast_1d(a).shape[0] for a in args]
dimension = [np.atleast_1d(a).ndim for a in args]
N = max(length)
if len(set(dimension)) > 2:
raise('Dimensions do not agree.')
# Make sure all elements are iterable and have consistent lengths, ie
# 1 or n, but not m and n.
for arg, s in zip(args, length):
t = type(arg)
arr = np.empty(N, type)
if s == 1:
arr.fill(arg)
elif s == N:
arr = np.asarray(arg)
else:
raise RuntimeError('Arguments size not allowed: %s.' % s)
largs.append(arr)
if mv and N > 1 and max(dimension) > 1 and nr > 1:
raise ValueError(
'Multivariate distributions cannot take s>1 and multiple values.')
if mv:
for i, arg in enumerate(largs[:-1]):
largs[0] = np.atleast_2d(arg)
for arg in zip(*largs):
r.append(func(*arg))
size = arg[-1]
vec_stochastics = len(r) > 1
if mv:
if nr == 1:
return r[0]
else:
return np.vstack(r)
else:
if size > 1 and vec_stochastics:
return np.atleast_2d(r).T
elif vec_stochastics or size > 1:
return np.concatenate(r)
else: # Scalar case
return r[0][0]
wrapper.__doc__ = func.__doc__
wrapper.__name__ = func.__name__
return wrapper
def debug_wrapper(func, name):
# Wrapper to debug distributions
import pdb
def wrapper(*args, **kwargs):
print_('Debugging inside %s:' % name)
print_('\tPress \'s\' to step into function for debugging')
print_('\tCall \'args\' to list function arguments')
# Set debugging trace
pdb.set_trace()
# Call function
return func(*args, **kwargs)
return wrapper
#-------------------------------------------------------------
# Utility functions
#-------------------------------------------------------------
def constrain(value, lower=-np.Inf, upper=np.Inf, allow_equal=False):
"""
Apply interval constraint on stochastic value.
"""
ok = flib.constrain(value, lower, upper, allow_equal)
if ok == 0:
raise ZeroProbability
def standardize(x, loc=0, scale=1):
"""
Standardize x
Return (x-loc)/scale
"""
return flib.standardize(x, loc, scale)
# ==================================
# = vectorize causes memory leaks. =
# ==================================
# @Vectorize
def gammaln(x):
"""
Logarithm of the Gamma function
"""
return flib.gamfun(x)
def expand_triangular(X, k):
"""
Expand flattened triangular matrix.
"""
X = X.tolist()
# Unflatten matrix
Y = np.asarray(
[[0] * i + X[i * k - (i * (i - 1)) / 2: i * k + (k - i)] for i in range(k)])
# Loop over rows
for i in range(k):
# Loop over columns
for j in range(k):
Y[j, i] = Y[i, j]
return Y
# Loss functions
absolute_loss = lambda o, e: absolute(o - e)
squared_loss = lambda o, e: (o - e) ** 2
chi_square_loss = lambda o, e: (1. * (o - e) ** 2) / e
loss_functions = {
'absolute': absolute_loss,
'squared': squared_loss,
'chi_square': chi_square_loss}
def GOFpoints(x, y, expval, loss):
# Return pairs of points for GOF calculation
return np.sum(np.transpose([loss(x, expval), loss(y, expval)]), 0)
def gofwrapper(f, loss_function='squared'):
"""
Goodness-of-fit decorator function for likelihoods
==================================================
Generates goodness-of-fit points for data likelihoods.
Wrap function f(*args, **kwds) where f is a likelihood.
Assume args = (x, parameter1, parameter2, ...)
Before passing the arguments to the function, the wrapper makes sure that
the parameters have the same shape as x.
"""
name = f.__name__[:-5]
# Take a snapshot of the main namespace.
# Find the functions needed to compute the gof points.
expval_func = eval(name + '_expval')
random_func = eval('r' + name)
def wrapper(*args, **kwds):
"""
This wraps a likelihood.
"""
"""Return gof points."""
# Calculate loss
loss = kwds.pop('gof', loss_functions[loss_function])
# Expected value, given parameters
expval = expval_func(*args[1:], **kwds)
y = random_func(size=len(args[0]), *args[1:], **kwds)
f.gof_points = GOFpoints(args[0], y, expval, loss)
"""Return likelihood."""
return f(*args, **kwds)
# Assign function attributes to wrapper.
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
wrapper.name = name
return wrapper
#--------------------------------------------------------
# Statistical distributions
# random generator, expval, log-likelihood
#--------------------------------------------------------
# Autoregressive lognormal
def rarlognormal(a, sigma, rho, size=1):
R"""
Autoregressive normal random variates.
If a is a scalar, generates one series of length size.
If a is a sequence, generates size series of the same length
as a.
"""
f = utils.ar1
if np.isscalar(a):
r = f(rho, 0, sigma, size)
else:
n = len(a)
r = [f(rho, 0, sigma, n) for i in range(size)]
if size == 1:
r = r[0]
return a * np.exp(r)
def arlognormal_like(x, a, sigma, rho):
R"""
Autoregressive lognormal log-likelihood.
.. math::
x_i & = a_i \exp(e_i) \\
e_i & = \rho e_{i-1} + \epsilon_i
where :math:`\epsilon_i \sim N(0,\sigma)`.
"""
return flib.arlognormal(x, np.log(a), sigma, rho, beta=1)
# Bernoulli----------------------------------------------
@randomwrap
def rbernoulli(p, size=None):
"""
Random Bernoulli variates.
"""
return np.random.random(size) < p
def bernoulli_expval(p):
"""
Expected value of bernoulli distribution.
"""
return p
def bernoulli_like(x, p):
R"""Bernoulli log-likelihood
The Bernoulli distribution describes the probability of successes (x=1) and
failures (x=0).
.. math:: f(x \mid p) = p^{x} (1-p)^{1-x}
:Parameters:
- `x` : Series of successes (1) and failures (0). :math:`x=0,1`
- `p` : Probability of success. :math:`0 < p < 1`.
:Example:
>>> from pymc import bernoulli_like
>>> bernoulli_like([0,1,0,1], .4)
-2.854232711280291
.. note::
- :math:`E(x)= p`
- :math:`Var(x)= p(1-p)`
"""
return flib.bernoulli(x, p)
bernoulli_grad_like = {'p': flib.bern_grad_p}
# Beta----------------------------------------------
@randomwrap
def rbeta(alpha, beta, size=None):
"""
Random beta variates.
"""
from scipy.stats.distributions import beta as sbeta
return sbeta.ppf(np.random.random(size), alpha, beta)
# return np.random.beta(alpha, beta, size)
def beta_expval(alpha, beta):
"""
Expected value of beta distribution.
"""
return 1.0 * alpha / (alpha + beta)
def beta_like(x, alpha, beta):
R"""
Beta log-likelihood. The conjugate prior for the parameter
:math:`p` of the binomial distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha) \Gamma(\beta)} x^{\alpha - 1} (1 - x)^{\beta - 1}
:Parameters:
- `x` : 0 < x < 1
- `alpha` : alpha > 0
- `beta` : beta > 0
:Example:
>>> from pymc import beta_like
>>> beta_like(.4,1,2)
0.182321556793954
.. note::
- :math:`E(X)=\frac{\alpha}{\alpha+\beta}`
- :math:`Var(X)=\frac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}`
"""
# try:
# constrain(alpha, lower=0, allow_equal=True)
# constrain(beta, lower=0, allow_equal=True)
# constrain(x, 0, 1, allow_equal=True)
# except ZeroProbability:
# return -np.Inf
return flib.beta_like(x, alpha, beta)
beta_grad_like = {'value': flib.beta_grad_x,
'alpha': flib.beta_grad_a,
'beta': flib.beta_grad_b}
# Binomial----------------------------------------------
@randomwrap
def rbinomial(n, p, size=None):
"""
Random binomial variates.
"""
# return np.random.binomial(n,p,size)
return np.random.binomial(np.ravel(n), np.ravel(p), size)
def binomial_expval(n, p):
"""
Expected value of binomial distribution.
"""
return p * n
def binomial_like(x, n, p):
R"""
Binomial log-likelihood. The discrete probability distribution of the
number of successes in a sequence of n independent yes/no experiments,
each of which yields success with probability p.
.. math::
f(x \mid n, p) = \frac{n!}{x!(n-x)!} p^x (1-p)^{n-x}
:Parameters:
- `x` : [int] Number of successes, > 0.
- `n` : [int] Number of Bernoulli trials, > x.
- `p` : Probability of success in each trial, :math:`p \in [0,1]`.
.. note::
- :math:`E(X)=np`
- :math:`Var(X)=np(1-p)`
"""
# Temporary hack to avoid issue #614
return flib.binomial(x, n, p)
binomial_grad_like = {'p': flib.binomial_gp}
# Beta----------------------------------------------
@randomwrap
def rbetabin(alpha, beta, n, size=None):
"""
Random beta-binomial variates.
"""
phi = np.random.beta(alpha, beta, size)
return np.random.binomial(n, phi)
def betabin_expval(alpha, beta, n):
"""
Expected value of beta-binomial distribution.
"""
return n * alpha / (alpha + beta)
def betabin_like(x, alpha, beta, n):
R"""
Beta-binomial log-likelihood. Equivalent to binomial random
variables with probabilities drawn from a
:math:`\texttt{Beta}(\alpha,\beta)` distribution.
.. math::
f(x \mid \alpha, \beta, n) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha)} \frac{\Gamma(n+1)}{\Gamma(x+1)\Gamma(n-x+1)} \frac{\Gamma(\alpha + x)\Gamma(n+\beta-x)}{\Gamma(\alpha+\beta+n)}
:Parameters:
- `x` : x=0,1,\ldots,n
- `alpha` : alpha > 0
- `beta` : beta > 0
- `n` : n=x,x+1,\ldots
:Example:
>>> betabin_like(3,1,1,10)
-2.3978952727989
.. note::
- :math:`E(X)=n\frac{\alpha}{\alpha+\beta}`
- :math:`Var(X)=n\frac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}`
"""
return flib.betabin_like(x, alpha, beta, n)
betabin_grad_like = {'alpha': flib.betabin_ga,
'beta': flib.betabin_gb}
# Categorical----------------------------------------------
# Note that because categorical elements are not ordinal, there
# is no expected value.
#@randomwrap
def rcategorical(p, size=None):
"""
Categorical random variates.
"""
out = flib.rcat(p, np.random.random(size=size))
if sum(out.shape) == 1:
return out.squeeze()
else:
return out
def categorical_like(x, p):
R"""
Categorical log-likelihood. The most general discrete distribution.
.. math:: f(x=i \mid p) = p_i
for :math:`i \in 0 \ldots k-1`.
:Parameters:
- `x` : [int] :math:`x \in 0\ldots k-1`
- `p` : [float] :math:`p > 0`, :math:`\sum p = 1`
"""
p = np.atleast_2d(p)
if np.any(abs(np.sum(p, 1) - 1) > 0.0001):
print_("Probabilities in categorical_like sum to", np.sum(p, 1))
return flib.categorical(np.array(x).astype(int), p)
# Cauchy----------------------------------------------
@randomwrap
def rcauchy(alpha, beta, size=None):
"""
Returns Cauchy random variates.
"""
return alpha + beta * np.tan(pi * random_number(size) - pi / 2.0)
def cauchy_expval(alpha, beta):
"""
Expected value of cauchy distribution.
"""
return alpha
# In wikipedia, the arguments name are k, x0.
def cauchy_like(x, alpha, beta):
R"""
Cauchy log-likelihood. The Cauchy distribution is also known as the
Lorentz or the Breit-Wigner distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{1}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
:Parameters:
- `alpha` : Location parameter.
- `beta` : Scale parameter > 0.
.. note::
- Mode and median are at alpha.
"""
return flib.cauchy(x, alpha, beta)
cauchy_grad_like = {'value': flib.cauchy_grad_x,
'alpha': flib.cauchy_grad_a,
'beta': flib.cauchy_grad_b}
# Chi square----------------------------------------------
@randomwrap
def rchi2(nu, size=None):
"""
Random :math:`\chi^2` variates.
"""
return np.random.chisquare(nu, size)
def chi2_expval(nu):
"""
Expected value of Chi-squared distribution.
"""
return nu
def chi2_like(x, nu):
R"""
Chi-squared :math:`\chi^2` log-likelihood.
.. math::
f(x \mid \nu) = \frac{x^{(\nu-2)/2}e^{-x/2}}{2^{\nu/2}\Gamma(\nu/2)}
:Parameters:
- `x` : > 0
- `nu` : [int] Degrees of freedom ( nu > 0 )
.. note::
- :math:`E(X)=\nu`
- :math:`Var(X)=2\nu`
"""
return flib.gamma(x, 0.5 * nu, 1. / 2)
chi2_grad_like = {'value': lambda x, nu: flib.gamma_grad_x(x, 0.5 * nu, 1. / 2),
'nu': lambda x, nu: flib.gamma_grad_alpha(x, 0.5 * nu, 1. / 2) * .5}
# chi2_grad_like = {'x' : lambda x, nu : (nu / 2 - 1) / x -.5,
# 'nu' : flib.chi2_grad_nu }
# Degenerate---------------------------------------------
@randomwrap
def rdegenerate(k, size=1):
"""
Random degenerate variates.
"""
return np.ones(size) * k
def degenerate_expval(k):
"""
Expected value of degenerate distribution.
"""
return k
def degenerate_like(x, k):
R"""
Degenerate log-likelihood.
.. math::
f(x \mid k) = \left\{ \begin{matrix} 1 \text{ if } x = k \\ 0 \text{ if } x \ne k\end{matrix} \right.
:Parameters:
- `x` : Input value.
- `k` : Degenerate value.
"""
x = np.atleast_1d(x)
return sum(np.log([i == k for i in x]))
# def degenerate_grad_like(x, k):
# R"""
# degenerate_grad_like(x, k)
#
# Degenerate gradient log-likelihood.
#
# .. math::
# f(x \mid k) = \left\{ \begin{matrix} 1 \text{ if } x = k \\ 0 \text{ if } x \ne k\end{matrix} \right.
#
# :Parameters:
# - `x` : Input value.
# - `k` : Degenerate value.
# """
# return np.zeros(np.size(x))*k
# Dirichlet----------------------------------------------
@randomwrap
def rdirichlet(theta, size=1):
"""
Dirichlet random variates.
"""
gammas = np.vstack([rgamma(theta, 1) for i in xrange(size)])
if size > 1 and np.size(theta) > 1:
return (gammas.T / gammas.sum(1))[:-1].T
elif np.size(theta) > 1:
return (gammas[0] / gammas[0].sum())[:-1]
else:
return 1.
def dirichlet_expval(theta):
"""
Expected value of Dirichlet distribution.
"""
return theta / np.sum(theta).astype(float)
def dirichlet_like(x, theta):
R"""
Dirichlet log-likelihood.
This is a multivariate continuous distribution.
.. math::
f(\mathbf{x}) = \frac{\Gamma(\sum_{i=1}^k \theta_i)}{\prod \Gamma(\theta_i)}\prod_{i=1}^{k-1} x_i^{\theta_i - 1}
\cdot\left(1-\sum_{i=1}^{k-1}x_i\right)^\theta_k
:Parameters:
x : (n, k-1) array
Array of shape (n, k-1) where `n` is the number of samples
and `k` the dimension.
:math:`0 < x_i < 1`, :math:`\sum_{i=1}^{k-1} x_i < 1`
theta : array
An (n,k) or (1,k) array > 0.
.. note::
Only the first `k-1` elements of `x` are expected. Can be used
as a parent of Multinomial and Categorical nevertheless.
"""
x = np.atleast_2d(x)
theta = np.atleast_2d(theta)
if (np.shape(x)[-1] + 1) != np.shape(theta)[-1]:
raise ValueError('The dimension of x in dirichlet_like must be k-1.')
return flib.dirichlet(x, theta)
# Exponential----------------------------------------------
@randomwrap
def rexponential(beta, size=None):
"""
Exponential random variates.
"""
return np.random.exponential(1. / beta, size)
def exponential_expval(beta):
"""
Expected value of exponential distribution.
"""
return 1. / beta
def exponential_like(x, beta):
R"""
Exponential log-likelihood.
The exponential distribution is a special case of the gamma distribution
with alpha=1. It often describes the time until an event.
.. math:: f(x \mid \beta) = \beta e^{-\beta x}
:Parameters:
- `x` : x > 0
- `beta` : Rate parameter (beta > 0).
.. note::
- :math:`E(X) = 1/\beta`
- :math:`Var(X) = 1/\beta^2`
- PyMC's beta is named 'lambda' by Wikipedia, SciPy, Wolfram MathWorld and other sources.
"""
return flib.gamma(x, 1, beta)
exponential_grad_like = {'value': lambda x, beta: flib.gamma_grad_x(x, 1.0, beta),
'beta': lambda x, beta: flib.gamma_grad_beta(x, 1.0, beta)}
# Exponentiated Weibull-----------------------------------
@randomwrap
def rexponweib(alpha, k, loc=0, scale=1, size=None):
"""
Random exponentiated Weibull variates.
"""
q = np.random.uniform(size=size)
r = flib.exponweib_ppf(q, alpha, k)
return loc + r * scale
def exponweib_expval(alpha, k, loc, scale):
# Not sure how we can do this, since the first moment is only
# tractable at particular values of k
raise NotImplementedError('exponweib_expval has not been implemented yet.')
def exponweib_like(x, alpha, k, loc=0, scale=1):
R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0).
"""
return flib.exponweib(x, alpha, k, loc, scale)
"""
commented out because tests fail
exponweib_grad_like = {'value' : flib.exponweib_gx,
'alpha' : flib.exponweib_ga,
'k' : flib.exponweib_gk,
'loc' : flib.exponweib_gl,
'scale' : flib.exponweib_gs}
"""
# Gamma----------------------------------------------
@randomwrap
def rgamma(alpha, beta, size=None):
"""
Random gamma variates.
"""
return np.random.gamma(shape=alpha, scale=1. / beta, size=size)
def gamma_expval(alpha, beta):
"""
Expected value of gamma distribution.
"""
return 1. * np.asarray(alpha) / beta
def gamma_like(x, alpha, beta):
R"""
Gamma log-likelihood.
Represents the sum of alpha exponentially distributed random variables, each
of which has rate parameter beta.
.. math::
f(x \mid \alpha, \beta) = \frac{\beta^{\alpha}x^{\alpha-1}e^{-\beta x}}{\Gamma(\alpha)}
:Parameters:
- `x` : math:`x \ge 0`
- `alpha` : Shape parameter (alpha > 0).
- `beta` : Rate parameter (beta > 0).
.. note::
- :math:`E(X) = \frac{\alpha}{\beta}`
- :math:`Var(X) = \frac{\alpha}{\beta^2}`
"""
return flib.gamma(x, alpha, beta)
gamma_grad_like = {'value': flib.gamma_grad_x,
'alpha': flib.gamma_grad_alpha,
'beta': flib.gamma_grad_beta}
# GEV Generalized Extreme Value ------------------------
# Modify parameterization -> Hosking (kappa, xi, alpha)
@randomwrap
def rgev(xi, mu=0, sigma=1, size=None):
"""
Random generalized extreme value (GEV) variates.
"""
q = np.random.uniform(size=size)
z = flib.gev_ppf(q, xi)
return z * sigma + mu
def gev_expval(xi, mu=0, sigma=1):
"""
Expected value of generalized extreme value distribution.
"""
return mu - (sigma / xi) + (sigma / xi) * flib.gamfun(1 - xi)
def gev_like(x, xi, mu=0, sigma=1):
R"""
Generalized Extreme Value log-likelihood
.. math::
pdf(x \mid \xi,\mu,\sigma) = \frac{1}{\sigma}(1 + \xi \left[\frac{x-\mu}{\sigma}\right])^{-1/\xi-1}\exp{-(1+\xi \left[\frac{x-\mu}{\sigma}\right])^{-1/\xi}}
.. math::
\sigma & > 0,\\
x & > \mu-\sigma/\xi \text{ if } \xi > 0,\\
x & < \mu-\sigma/\xi \text{ if } \xi < 0\\
x & \in [-\infty,\infty] \text{ if } \xi = 0
"""
return flib.gev(x, xi, mu, sigma)
# Geometric----------------------------------------------
# Changed the return value
@randomwrap
def rgeometric(p, size=None):
"""
Random geometric variates.
"""
return np.random.geometric(p, size)
def geometric_expval(p):
"""
Expected value of geometric distribution.
"""
return 1. / p
def geometric_like(x, p):
R"""
Geometric log-likelihood. The probability that the first success in a
sequence of Bernoulli trials occurs on the x'th trial.
.. math::
f(x \mid p) = p(1-p)^{x-1}
:Parameters:
- `x` : [int] Number of trials before first success (x > 0).
- `p` : Probability of success on an individual trial, :math:`p \in [0,1]`
.. note::
- :math:`E(X)=1/p`
- :math:`Var(X)=\frac{1-p}{p^2}`
"""
return flib.geometric(x, p)
geometric_grad_like = {'p': flib.geometric_gp}
# Half Cauchy----------------------------------------------
@randomwrap
def rhalf_cauchy(alpha, beta, size=None):
"""
Returns half-Cauchy random variates.
"""
return abs(alpha + beta * np.tan(pi * random_number(size) - pi / 2.0))
def half_cauchy_expval(alpha, beta):
"""
Expected value of cauchy distribution is undefined.
"""
return inf
# In wikipedia, the arguments name are k, x0.
def half_cauchy_like(x, alpha, beta):
R"""
Half-Cauchy log-likelihood. Simply the absolute value of Cauchy.
.. math::
f(x \mid \alpha, \beta) = \frac{2}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
:Parameters:
- `alpha` : Location parameter.
- `beta` : Scale parameter (beta > 0).
.. note::
- x must be non-negative.
"""
x = np.atleast_1d(x)
if sum(x.ravel() < 0):
return -inf
return flib.cauchy(x, alpha, beta) + len(x) * np.log(2)
# Half-normal----------------------------------------------
@randomwrap
def rhalf_normal(tau, size=None):
"""
Random half-normal variates.
"""
return abs(np.random.normal(0, np.sqrt(1 / tau), size))
def half_normal_expval(tau):
"""
Expected value of half normal distribution.
"""
return np.sqrt(2. * pi / np.asarray(tau))
def half_normal_like(x, tau):
R"""
Half-normal log-likelihood, a normal distribution with mean 0 limited
to the domain :math:`x \in [0, \infty)`.
.. math::
f(x \mid \tau) = \sqrt{\frac{2\tau}{\pi}}\exp\left\{ {\frac{-x^2 \tau}{2}}\right\}
:Parameters:
- `x` : :math:`x \ge 0`
- `tau` : tau > 0
"""
return flib.hnormal(x, tau)
half_normal_grad_like = {'value': flib.hnormal_gradx,
'tau': flib.hnormal_gradtau}
# Hypergeometric----------------------------------------------
def rhypergeometric(n, m, N, size=None):
"""
Returns hypergeometric random variates.
"""
if n == 0:
return np.zeros(size, dtype=int)
elif n == N:
out = np.empty(size, dtype=int)
out.fill(m)
return out
return np.random.hypergeometric(n, N - n, m, size)
def hypergeometric_expval(n, m, N):
"""
Expected value of hypergeometric distribution.
"""
return 1. * n * m / N
def hypergeometric_like(x, n, m, N):
R"""
Hypergeometric log-likelihood.
Discrete probability distribution that describes the number of successes in
a sequence of draws from a finite population without replacement.
.. math::
f(x \mid n, m, N) = \frac{\left({ \begin{array}{c} {m} \\ {x} \\
\end{array} }\right)\left({ \begin{array}{c} {N-m} \\ {n-x} \\
\end{array}}\right)}{\left({ \begin{array}{c} {N} \\ {n} \\
\end{array}}\right)}
:Parameters:
- `x` : [int] Number of successes in a sample drawn from a population.
- `n` : [int] Size of sample drawn from the population.
- `m` : [int] Number of successes in the population.
- `N` : [int] Total number of units in the population.
.. note::
:math:`E(X) = \frac{n n}{N}`
"""
return flib.hyperg(x, n, m, N)
# Inverse gamma----------------------------------------------
@randomwrap
def rinverse_gamma(alpha, beta, size=None):
"""
Random inverse gamma variates.
"""
return 1. / np.random.gamma(shape=alpha, scale=1. / beta, size=size)
def inverse_gamma_expval(alpha, beta):
"""
Expected value of inverse gamma distribution.
"""
return 1. * np.asarray(beta) / (alpha - 1.)
def inverse_gamma_like(x, alpha, beta):
R"""
Inverse gamma log-likelihood, the reciprocal of the gamma distribution.
.. math::
f(x \mid \alpha, \beta) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{-\alpha - 1} \exp\left(\frac{-\beta}{x}\right)
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter (alpha > 0).
- `beta` : Scale parameter (beta > 0).
.. note::
:math:`E(X)=\frac{\beta}{\alpha-1}` for :math:`\alpha > 1`
:math:`Var(X)=\frac{\beta^2}{(\alpha-1)^2(\alpha-2)}` for :math:`\alpha > 2`
"""
return flib.igamma(x, alpha, beta)
inverse_gamma_grad_like = {'value': flib.igamma_grad_x,
'alpha': flib.igamma_grad_alpha,
'beta': flib.igamma_grad_beta}
# Inverse Wishart---------------------------------------------------
# def rinverse_wishart(n, C):
# """
# Return an inverse Wishart random matrix.
#
# :Parameters:
# - `n` : [int] Degrees of freedom (n > 0).
# - `C` : Symmetric and positive definite scale matrix
# """
# wi = rwishart(n, np.asmatrix(C).I).I
# flib.symmetrize(wi)
# return wi
#
# def inverse_wishart_expval(n, C):
# """
# Expected value of inverse Wishart distribution.
#
# :Parameters:
# - `n` : [int] Degrees of freedom (n > 0).
# - `C` : Symmetric and positive definite scale matrix
#
# """
# return np.asarray(C)/(n-len(C)-1)
#
# def inverse_wishart_like(X, n, C):
# R"""
# Inverse Wishart log-likelihood. The inverse Wishart distribution
# is the conjugate prior for the covariance matrix of a multivariate
# normal distribution.
#
# .. math::
# f(X \mid n, T) = \frac{{\mid T \mid}^{n/2}{\mid X
# \mid}^{-(n+k+1)/2} \exp\left\{ -\frac{1}{2} Tr(TX^{-1})
# \right\}}{2^{nk/2} \Gamma_p(n/2)}
#
# where :math:`k` is the rank of X.
#
# :Parameters:
# - `X` : Symmetric, positive definite matrix.
# - `n` : [int] Degrees of freedom (n > 0).
# - `C` : Symmetric and positive definite scale matrix
#
# .. note::
# Step method MatrixMetropolis will preserve the symmetry of
# Wishart variables.
#
# """
# return flib.blas_wishart(X.I, n, C.I)
# Double exponential (Laplace)--------------------------------------------
@randomwrap
def rlaplace(mu, tau, size=None):
"""
Laplace (double exponential) random variates.
"""
u = np.random.uniform(-0.5, 0.5, size)
return mu - np.sign(u) * np.log(1 - 2 * np.abs(u)) / tau
rdexponential = rlaplace
def laplace_expval(mu, tau):
"""
Expected value of Laplace (double exponential) distribution.
"""
return mu
dexponential_expval = laplace_expval
def laplace_like(x, mu, tau):
R"""
Laplace (double exponential) log-likelihood.
The Laplace (or double exponential) distribution describes the
difference between two independent, identically distributed exponential
events. It is often used as a heavier-tailed alternative to the normal.
.. math::
f(x \mid \mu, \tau) = \frac{\tau}{2}e^{-\tau |x-\mu|}
:Parameters:
- `x` : :math:`-\infty < x < \infty`
- `mu` : Location parameter :math:`-\infty < mu < \infty`
- `tau` : Scale parameter :math:`\tau > 0`
.. note::
- :math:`E(X) = \mu`
- :math:`Var(X) = \frac{2}{\tau^2}`
"""
return flib.gamma(np.abs(np.array(x) - mu), 1, tau) - \
np.size(x) * np.log(2)
laplace_grad_like = {'value': lambda x, mu, tau: flib.gamma_grad_x(np.abs(x - mu), 1, tau) * np.sign(x - mu),
'mu': lambda x, mu, tau: -flib.gamma_grad_x(np.abs(x - mu), 1, tau) * np.sign(x - mu),
'tau': lambda x, mu, tau: flib.gamma_grad_beta(np.abs(x - mu), 1, tau)}
dexponential_like = laplace_like
dexponential_grad_like = laplace_grad_like
# Logistic-----------------------------------
@randomwrap
def rlogistic(mu, tau, size=None):
"""
Logistic random variates.
"""
u = np.random.random(size)
return mu + np.log(u / (1 - u)) / tau
def logistic_expval(mu, tau):
"""
Expected value of logistic distribution.
"""
return mu
def logistic_like(x, mu, tau):
R"""
Logistic log-likelihood.
The logistic distribution is often used as a growth model; for example,
populations, markets. Resembles a heavy-tailed normal distribution.
.. math::
f(x \mid \mu, tau) = \frac{\tau \exp(-\tau[x-\mu])}{[1 + \exp(-\tau[x-\mu])]^2}
:Parameters:
- `x` : :math:`-\infty < x < \infty`
- `mu` : Location parameter :math:`-\infty < mu < \infty`
- `tau` : Scale parameter (tau > 0)
.. note::
- :math:`E(X) = \mu`
- :math:`Var(X) = \frac{\pi^2}{3\tau^2}`
"""
return flib.logistic(x, mu, tau)
# Lognormal----------------------------------------------
@randomwrap
def rlognormal(mu, tau, size=None):
"""
Return random lognormal variates.
"""
return np.random.lognormal(mu, np.sqrt(1. / tau), size)
def lognormal_expval(mu, tau):
"""
Expected value of log-normal distribution.
"""
return np.exp(mu + 1. / 2 / tau)
def lognormal_like(x, mu, tau):
R"""
Log-normal log-likelihood.
Distribution of any random variable whose logarithm is normally
distributed. A variable might be modeled as log-normal if it can be thought
of as the multiplicative product of many small independent factors.
.. math::
f(x \mid \mu, \tau) = \sqrt{\frac{\tau}{2\pi}}\frac{
\exp\left\{ -\frac{\tau}{2} (\ln(x)-\mu)^2 \right\}}{x}
:Parameters:
- `x` : x > 0
- `mu` : Location parameter.
- `tau` : Scale parameter (tau > 0).
.. note::
:math:`E(X)=e^{\mu+\frac{1}{2\tau}}`
:math:`Var(X)=(e^{1/\tau}-1)e^{2\mu+\frac{1}{\tau}}`
"""
return flib.lognormal(x, mu, tau)
lognormal_grad_like = {'value': flib.lognormal_gradx,
'mu': flib.lognormal_gradmu,
'tau': flib.lognormal_gradtau}
# Multinomial----------------------------------------------
#@randomwrap
def rmultinomial(n, p, size=None):
"""
Random multinomial variates.
"""
# Leaving size=None as the default means return value is 1d array
# if not specified-- nicer.
# Single value for p:
if len(np.shape(p)) == 1:
return np.random.multinomial(n, p, size)
# Multiple values for p:
if np.isscalar(n):
n = n * np.ones(np.shape(p)[0], dtype=np.int)
out = np.empty(np.shape(p))
for i in xrange(np.shape(p)[0]):
out[i, :] = np.random.multinomial(n[i], p[i,:], size)
return out
def multinomial_expval(n, p):
"""
Expected value of multinomial distribution.
"""
return np.asarray([pr * n for pr in p])
def multinomial_like(x, n, p):
R"""
Multinomial log-likelihood.
Generalization of the binomial
distribution, but instead of each trial resulting in "success" or
"failure", each one results in exactly one of some fixed finite number k
of possible outcomes over n independent trials. 'x[i]' indicates the number
of times outcome number i was observed over the n trials.
.. math::
f(x \mid n, p) = \frac{n!}{\prod_{i=1}^k x_i!} \prod_{i=1}^k p_i^{x_i}
:Parameters:
x : (ns, k) int
Random variable indicating the number of time outcome i is
observed. :math:`\sum_{i=1}^k x_i=n`, :math:`x_i \ge 0`.
n : int
Number of trials.
p : (k,)
Probability of each one of the different outcomes.
:math:`\sum_{i=1}^k p_i = 1)`, :math:`p_i \ge 0`.
.. note::
- :math:`E(X_i)=n p_i`
- :math:`Var(X_i)=n p_i(1-p_i)`
- :math:`Cov(X_i,X_j) = -n p_i p_j`
- If :math:`\sum_i p_i < 0.999999` a log-likelihood value of -inf
will be returned.
"""
# flib expects 2d arguments. Do we still want to support multiple p
# values along realizations ?
x = np.atleast_2d(x)
p = np.atleast_2d(p)
return flib.multinomial(x, n, p)
# Multivariate hypergeometric------------------------------
def rmultivariate_hypergeometric(n, m, size=None):
"""
Random multivariate hypergeometric variates.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
"""
N = len(m)
urn = np.repeat(np.arange(N), m)
if size:
draw = np.array([[urn[i] for i in np.random.permutation(len(urn))[:n]]
for j in range(size)])
r = [[np.sum(draw[j] == i) for i in range(len(m))]
for j in range(size)]
else:
draw = np.array([urn[i] for i in np.random.permutation(len(urn))[:n]])
r = [np.sum(draw == i) for i in range(len(m))]
return np.asarray(r)
def multivariate_hypergeometric_expval(n, m):
"""
Expected value of multivariate hypergeometric distribution.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
"""
m = np.asarray(m, float)
return n * (m / m.sum())
def multivariate_hypergeometric_like(x, m):
R"""
Multivariate hypergeometric log-likelihood
Describes the probability of drawing x[i] elements of the ith category,
when the number of items in each category is given by m.
.. math::
\frac{\prod_i \left({ \begin{array}{c} {m_i} \\ {x_i} \\
\end{array}}\right)}{\left({ \begin{array}{c} {N} \\ {n} \\
\end{array}}\right)}
where :math:`N = \sum_i m_i` and :math:`n = \sum_i x_i`.
:Parameters:
- `x` : [int sequence] Number of draws from each category, (x < m).
- `m` : [int sequence] Number of items in each categoy.
"""
return flib.mvhyperg(x, m)
# Multivariate normal--------------------------------------
def rmv_normal(mu, tau, size=1):
"""
Random multivariate normal variates.
"""
sig = np.linalg.cholesky(tau)
mu_size = np.shape(mu)
if size == 1:
out = np.random.normal(size=mu_size)
try:
flib.dtrsm_wrap(sig, out, 'L', 'T', 'L', 1.)
except:
out = np.linalg.solve(sig, out)
out += mu
return out
else:
if not hasattr(size, '__iter__'):
size = (size,)
tot_size = np.prod(size)
out = np.random.normal(size=(tot_size,) + mu_size)
for i in xrange(tot_size):
try:
flib.dtrsm_wrap(sig, out[i, :], 'L', 'T', 'L', 1.)
except:
out[i, :] = np.linalg.solve(sig, out[i,:])
out[i, :] += mu
return out.reshape(size + mu_size)
def mv_normal_expval(mu, tau):
"""
Expected value of multivariate normal distribution.
"""
return mu
def mv_normal_like(x, mu, tau):
R"""
Multivariate normal log-likelihood
.. math::
f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter sequence.
- `Tau` : (k,k) Positive definite precision matrix.
.. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.prec_mvnorm(r, mu, tau) for r in x])
else:
return flib.prec_mvnorm(x, mu, tau)
# Multivariate normal, parametrized with covariance---------------------------
def rmv_normal_cov(mu, C, size=1):
"""
Random multivariate normal variates.
"""
mu_size = np.shape(mu)
if size == 1:
return np.random.multivariate_normal(mu, C, size).reshape(mu_size)
else:
return np.random.multivariate_normal(
mu, C, size).reshape((size,) + mu_size)
def mv_normal_cov_expval(mu, C):
"""
Expected value of multivariate normal distribution.
"""
return mu
def mv_normal_cov_like(x, mu, C):
R"""
Multivariate normal log-likelihood parameterized by a covariance
matrix.
.. math::
f(x \mid \pi, C) = \frac{1}{(2\pi|C|)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}C^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `C` : (k,k) Positive definite covariance matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_chol_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.cov_mvnorm(r, mu, C) for r in x])
else:
return flib.cov_mvnorm(x, mu, C)
# Multivariate normal, parametrized with Cholesky factorization.----------
def rmv_normal_chol(mu, sig, size=1):
"""
Random multivariate normal variates.
"""
mu_size = np.shape(mu)
if size == 1:
out = np.random.normal(size=mu_size)
try:
flib.dtrmm_wrap(sig, out, 'L', 'N', 'L', 1.)
except:
out = np.dot(sig, out)
out += mu
return out
else:
if not hasattr(size, '__iter__'):
size = (size,)
tot_size = np.prod(size)
out = np.random.normal(size=(tot_size,) + mu_size)
for i in xrange(tot_size):
try:
flib.dtrmm_wrap(sig, out[i, :], 'L', 'N', 'L', 1.)
except:
out[i, :] = np.dot(sig, out[i,:])
out[i, :] += mu
return out.reshape(size + mu_size)
def mv_normal_chol_expval(mu, sig):
"""
Expected value of multivariate normal distribution.
"""
return mu
def mv_normal_chol_like(x, mu, sig):
R"""
Multivariate normal log-likelihood.
.. math::
f(x \mid \pi, \sigma) = \frac{1}{(2\pi)^{1/2}|\sigma|)} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}(\sigma \sigma^{\prime})^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `sigma` : (k,k) Lower triangular matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_cov_like`
"""
# TODO: Vectorize in Fortran
if len( | np.shape(x) | numpy.shape |
import numpy as np
import pandas as pd
import math
import datetime
import matplotlib.pyplot as plt
import os
import shutil
num_templates = 4 # 5 genuine samples of each user
num_remaining = 25 - num_templates
num_training_samples = 80 #70 users in training
batch_size = 2*num_remaining + num_templates # remaining 20 genuine and 20 forgery samples of one user
look_back = 1200 #length of the sequence
win_length = 3 # length of the capturing window in the feature extractor
feat_ext = '_clt_indp_400_len'
def linear_interp(seq,look_back):
[len_x,len_y] = seq.shape
num_times = int(look_back/len_x)
new_len_x = num_times * len_x
if(look_back <= len_x):
print(".......No need of Interpolation......")
return seq
#print("........Duplicating.......")
col_diffs = np.zeros((len_x,len_y)) # Assuming the 0th element is 0 Hence the diff between the 1st sample point from 0th point is 0
col_diffs[0,:] = seq[0,:]/num_times
for lv11 in range(1,len_x):
col_diffs[lv11,:] = (seq[lv11,:] - seq[lv11-1,:])/num_times #caclulating the diffs
new_seq = np.zeros((new_len_x,len_y))
for lv12 in range(num_times):
new_seq[lv12,:] = (lv12+1)*col_diffs[0,:] # interpolating the 1st element from 0 to the 1st element. Assuming that the 0th element is 0
for lv10 in range(1,len_x):
for lv11 in range(num_times):
new_seq[lv10*num_times + lv11,:] = seq[lv10,:] + (lv11+1)*col_diffs[lv10,:]
return new_seq
def averaging_filter(seq,winlen):
print("***********Averaging***********")
#print(seq.shape)
[num_feat,len_seq] = seq.shape
new_len = len_seq/winlen
new_len = (int)(new_len)
new_seq = np.zeros((num_feat,new_len))
for lv10 in range(new_len):
subseq = seq[:,lv10*winlen:lv10*winlen+winlen]
new_seq[:,lv10] = np.transpose(subseq.sum(axis=1))
new_seq[:,lv10] = new_seq[:,lv10]/((float)(winlen))
#print(new_seq.shape)
return new_seq
def preprocess(seq):
[len_seq_x,len_seq_y] = seq.shape
for lv9 in range(0,len_seq_y):
for lv8 in range(0,len_seq_x-1):
seq[lv8,lv9] = seq[lv8+1,lv9] - seq[lv8,lv9]
seq = seq[:len_seq_x-1,:]
#print(seq.shape)
[len_seq_x,len_seq_y] = seq.shape
seq_normalized = np.zeros((len_seq_x,len_seq_y))
for lv6 in range(0,len_seq_y):
mean_col_lv6 = np.mean(seq[:,lv6])
std_col_lv6 = np.std(seq[:,lv6])
for lv7 in range(0,len_seq_x):
seq_normalized[lv7,lv6] = (seq[lv7,lv6]-mean_col_lv6)/std_col_lv6
return seq_normalized
def twed_dist(seq_1,seq_2):
[len_seq_1,feat_size] = seq_1.shape
[len_seq_2,feat_size] = seq_2.shape
twed_param_lambda = 0.1
twed_mat = np.zeros((len_seq_1+1,len_seq_2+1))
for lv4 in range(1,len_seq_1+1):
twed_mat[lv4][0] = 9999999
for lv4 in range(1,len_seq_2+1):
twed_mat[0][lv4] = 9999999
for lv4 in range(1,len_seq_1+1):
for lv5 in range(1,len_seq_2+1):
seq1_curr = seq_1[lv4-1,:]
seq2_curr = seq_2[lv5-1,:]
seq1_prev = seq_1[lv4-2,:]
seq2_prev = seq_2[lv5-2,:]
twed_val1 = twed_mat[lv4-1,lv5] + twed_param_lambda*np.linalg.norm(seq1_curr-seq1_prev)
twed_val2 = twed_mat[lv4-1,lv5-1] + twed_param_lambda*np.linalg.norm(seq1_curr-seq2_curr) + twed_param_lambda*np.linalg.norm(seq1_prev-seq2_prev)
twed_val3 = twed_mat[lv4,lv5-1] + twed_param_lambda*np.linalg.norm(seq2_prev-seq2_curr)
twed_mat[lv4,lv5] = min(twed_val1,twed_val2)
twed_mat[lv4,lv5] = min(twed_mat[lv4,lv5],twed_val3)
score = twed_mat[len_seq_1][len_seq_2] #not normalized TWED score
len_warping_path = 1
wp_mat = | np.zeros((len_seq_1+1,len_seq_2+1)) | numpy.zeros |
# %% codecell
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
ruta = ".\data\sim_from_2_to_10_sepqubits-entbases_from_3_to_5_bases.csv"
data = np.loadtxt(ruta, delimiter=",")
data = data.reshape(9, 3, 100)
media = np.mean(data, axis=2)
mediana = np.quantile(data, 0.5, axis=2)
cuantil_25 = np.quantile(data, 0.25, axis=2)
cuantil_75 = np.quantile(data, 0.75, axis=2)
qubits = np.array(["2", "3", "4", "5", "6", "7", "8", "9", "10"])
color = [np.array([0.500, 0.497, 0.791]),
| np.array([0.979, 0.744, 0.175]) | numpy.array |
# -*- coding: UTF-8 -*-
'''
Created on 4 nov. 2014
@author: <NAME>
Written By:
<NAME>
@Email: < robert [--DOT--] pastor0691 (--AT--) orange [--DOT--] fr >
@http://trajectoire-predict.monsite-orange.fr/
@copyright: Copyright 2015 <NAME>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
density at mean sea level = 1.225 kg / cubic meters
'''
import unittest
import numpy
import xlsxwriter
import os
import math
MeterPerSecond2Knots = 1.94384449
Knots2MeterPerSecond = 0.514444444
class Atmosphere():
'''
The standard sea level conditions are as follows:
Temperature (T0) = 288.15 K = 150C
Pressure (p0) = 101325 N/m2 = 760 mm of Hg
'''
SeaLevelTemperatureDegrees = 15.0
SeaLevelPressureNewtonsSquareMeters = 101325.0
''' MSL Mean Sea Level '''
StandardAtmosphericTemperatureMslKelvins = 288.15 # kelvins
StandardAtmosphericPressureMslPascal = 101325 # pascals
StandardAtmosphericDensityMslKgCubicMeters = 1.225 # [kg/m3]
SpeedOfSoundMslMetersSeconds = 340.294 # at mean sea level [m/s]
'''ISA temperature gradient with altitude below the tropopause :
betaT = - 0.0065 [°K/m]
'''
betaT = - 0.0065 # [°K/m]
'''
Tropopause
Tropopause is the separation between two different layers: the troposphere, which stands
below it, and the stratosphere, which is placed above. Its altitude HP,trop is constant when
expressed in terms of geopotential pressure altitude:
H p,trop = 11000 [m]
'''
TropopauseGeoPotentialPressureAltitude = 11000.0 # meters
className = ''
# altitude in Meters
AltitudeMeters = numpy.array( [-2000,
0, 2000, 4000, 6000, 8000, 10000,
12000, 14000, 16000, 18000, 20000,
22000, 24000, 26000, 28000, 30000,
32000, 34000, 36000, 38000, 40000,
42000, 44000, 46000, 48000, 50000,
52000, 54000, 56000, 58000, 60000,
62000, 64000, 66000, 68000, 70000,
72000, 74000, 76000, 78000, 80000,
82000, 84000, 86000 ] )
'''
alt-km sigma delta theta temp-Kelvin
pressure-N-sq-m dens-kg-cu-m a-sound-m-s viscosity-kg-m-s k-visc-sq-m-s
n this table from -2 to 86 km in 2 km intervals
alt is altitude in meters.
sigma is density divided by sea-level density.
delta is pressure divided by sea-level pressure.
theta is temperature divided by sea-level temperature.
temp is temperature in kelvins.
press is pressure in newtons per square meter.
dens is density in kilograms per cubic meter.
a is the speed of sound in meters per second.
visc is viscosity in 10**(-6) kilograms per meter-second.
k.visc is kinematic viscosity in square meters per second.
'''
AtmosphereTemperatureKelvins = None
AirDensityKilogramsCubicMeters = None
SpeedOfSoundMetersPerSecond = None
TabularAtmosphere = numpy.array(
(
# sigma delta theta temp press density a visc k.visc
numpy.array([ '1.21E+00','1.26E+00','1.0451','301.2','1.28E+05','1.48E+00','347.9','18.51','1.25E-05' ]),
numpy.array([ '1.0' ,'1.0' ,'1.0' ,'288.1','1.01E+05','1.23E+00','340.3','17.89','1.46E-05' ] ),
numpy.array([ '8.22E-01','7.85E-01','0.9549','275.2','7.95E+04','1.01E+00','332.5','17.26','1.71E-05' ]),
numpy.array([ '6.69E-01','6.09E-01','0.9098','262.2','6.17E+04','8.19E-01','324.6','16.61','2.03E-05' ]),
numpy.array([ '5.39E-01','4.66E-01','0.8648','249.2','4.72E+04','6.60E-01','316.5','15.95','2.42E-05' ]),
numpy.array([ '4.29E-01','3.52E-01','0.8198','236.2','3.57E+04','5.26E-01','308.1','15.27','2.90E-05' ]),
numpy.array([ '3.38E-01','2.62E-01','0.7748','223.3','2.65E+04','4.14E-01','299.5','14.58','3.53E-05' ]),
numpy.array([ '2.55E-01','1.91E-01','0.7519','216.6','1.94E+04','3.12E-01','295.1','14.22','4.56E-05' ]),
numpy.array([ '1.86E-01','1.40E-01','0.7519','216.6','1.42E+04','2.28E-01','295.1','14.22','6.24E-05' ]),
numpy.array([ '1.36E-01','1.02E-01','0.7519','216.6','1.04E+04','1.67E-01','295.1','14.22','8.54E-05' ]),
numpy.array([ '9.93E-02','7.47E-02','0.7519','216.6','7.57E+03','1.22E-01','295.1','14.22','1.17E-04' ]),
numpy.array([ '7.26E-02','5.46E-02','0.7519','216.6','5.53E+03','8.89E-02','295.1','14.22','1.60E-04' ]),
numpy.array([ '5.27E-02','3.99E-02','0.7585','218.6','4.05E+03','6.45E-02','296.4','14.32','2.22E-04' ]),
numpy.array([ '3.83E-02','2.93E-02','0.7654','220.6','2.97E+03','4.69E-02','297.7','14.43','3.07E-04' ]),
numpy.array([ '2.80E-02','2.16E-02','0.7723','222.5','2.19E+03','3.43E-02','299.1','14.54','4.24E-04' ]),
numpy.array([ '2.05E-02','1.60E-02','0.7792','224.5','1.62E+03','2.51E-02','300.4','14.65','5.84E-04' ]),
numpy.array([ '1.50E-02','1.18E-02','0.7861','226.5','1.20E+03','1.84E-02','301.7','14.75','8.01E-04' ]),
numpy.array([ '1.11E-02','8.77E-03','0.793' ,'228.5','8.89E+02','1.36E-02','303.0','14.86','1.10E-03' ]),
numpy.array([ '8.07E-03','6.55E-03','0.8112','233.7','6.63E+02','9.89E-03','306.5','15.14','1.53E-03' ]),
numpy.array([ '5.92E-03','4.92E-03','0.8304','239.3','4.99E+02','7.26E-03','310.1','15.43','2.13E-03' ]),
numpy.array([ '4.38E-03','3.72E-03','0.8496','244.8','3.77E+02','5.37E-03','313.7','15.72','2.93E-03' ]),
numpy.array([ '3.26E-03','2.83E-03','0.8688','250.4','2.87E+02','4.00E-03','317.2','16.01','4.01E-03' ]),
numpy.array([ '2.44E-03','2.17E-03','0.888' ,'255.9','2.20E+02','3.00E-03','320.7','16.29','5.44E-03' ]),
numpy.array([ '1.84E-03','1.67E-03','0.9072','261.4','1.70E+02','2.26E-03','324.1','16.57','7.34E-03' ]),
numpy.array([ '1.40E-03','1.30E-03','0.9263','266.9','1.31E+02','1.71E-03','327.5','16.85','9.83E-03' ]),
numpy.array([ '1.07E-03','1.01E-03','0.9393','270.6','1.02E+02','1.32E-03','329.8','17.04','1.29E-02' ]),
numpy.array([ '8.38E-04','7.87E-04','0.9393','270.6','7.98E+01','1.03E-03','329.8','17.04','1.66E-02' ]),
numpy.array([ '6.58E-04','6.14E-04','0.9336','269.0','6.22E+01','8.06E-04','328.8','16.96','2.10E-02' ]),
numpy.array([ '5.22E-04','4.77E-04','0.9145','263.5','4.83E+01','6.39E-04','325.4','16.68','2.61E-02' ]),
numpy.array([ '4.12E-04','3.69E-04','0.8954','258.0','3.74E+01','5.04E-04','322.0','16.40','3.25E-02' ]),
numpy.array([ '3.23E-04','2.83E-04','0.8763','252.5','2.87E+01','3.96E-04','318.6','16.12','4.07E-02' ]),
numpy.array([ '2.53E-04','2.17E-04','0.8573','247.0','2.20E+01','3.10E-04','315.1','15.84','5.11E-02' ]),
numpy.array([ '1.96E-04','1.65E-04','0.8382','241.5','1.67E+01','2.41E-04','311.5','15.55','6.46E-02' ]),
numpy.array([ '1.52E-04','1.24E-04','0.8191','236.0','1.26E+01','1.86E-04','308.0','15.26','8.20E-02' ]),
numpy.array([ '1.17E-04','9.34E-05','0.8001','230.5','9.46E+00','1.43E-04','304.4','14.97','1.05E-01' ]),
numpy.array([ '8.91E-05','6.96E-05','0.7811','225.1','7.05E+00','1.09E-04','300.7','14.67','1.34E-01' ]),
numpy.array([ '6.76E-05','5.15E-05','0.7620','219.6','5.22E+00','8.28E-05','297.1','14.38','1.74E-01' ]),
numpy.array([ '5.09E-05','3.79E-05','0.7436','214.3','3.84E+00','6.24E-05','293.4','14.08','2.26E-01' ]),
numpy.array([ '3.79E-05','2.76E-05','0.7300','210.3','2.80E+00','4.64E-05','290.7','13.87','2.99E-01' ]),
numpy.array([ '2.80E-05','2.01E-05','0.7164','206.4','2.03E+00','3.43E-05','288.0','13.65','3.98E-01' ]),
numpy.array([ '2.06E-05','1.45E-05','0.7029','202.5','1.47E+00','2.52E-05','285.3','13.43','5.32E-01' ]),
numpy.array([ '1.51E-05','1.04E-05','0.6893','198.6','1.05E+00','1.85E-05','282.5','13.21','7.16E-01' ]),
numpy.array([ '1.10E-05','7.40E-06','0.6758','194.7','7.50E-01','1.34E-05','279.7','12.98','9.68E-01' ]),
numpy.array([ '7.91E-06','5.24E-06','0.6623','190.8','5.31E-01','9.69E-06','276.9','12.76','1.32E+00' ]),
numpy.array([ '5.68E-06','3.68E-06','0.6488','186.9','3.73E-01','6.96E-06','274.1','12.53','1.80E+00' ]) ) )
def __init__(self):
self.className = self.__class__.__name__
''' convert array of strings into floats '''
#print self.className, 'array shape= ', self.TabularAtmosphere.shape[0]
self.AtmosphereTemperatureKelvins = numpy.empty(self.TabularAtmosphere.shape[0])
self.AirDensityKilogramsCubicMeters = numpy.empty(self.TabularAtmosphere.shape[0])
self.SpeedOfSoundMetersPerSecond = numpy.empty(self.TabularAtmosphere.shape[0])
self.PressurePascals = numpy.empty(self.TabularAtmosphere.shape[0])
indexI = 0
for row in self.TabularAtmosphere:
index = 0
for item in row:
if index == 1:
self.PressurePascals[indexI] = item
elif index == 3:
self.AtmosphereTemperatureKelvins[indexI] = item
elif index == 5:
self.AirDensityKilogramsCubicMeters[indexI] = item
elif index == 6:
self.SpeedOfSoundMetersPerSecond[indexI] = item
index += 1
indexI += 1
#print self.className, "============="
#print self.AtmosphereTemperatureKelvins
'''
Does not check that the x-coordinate sequence xp is increasing.
If xp is not increasing, the results are nonsense. A simple check for increasing is:
'''
if numpy.all( | numpy.diff(self.AltitudeMeters) | numpy.diff |
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#
#
# SCRIPT : compute_averaged image.py
# POURPOSE : Compute image average
# AUTHOR : <NAME>
# EMAIL : <EMAIL>
#
# V1.0 : XX/XX/XXXX [<NAME>]
#
#
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
import os
import argparse
from glob import glob
from natsort import natsorted
import numpy as np
from PIL import Image
import multiprocessing
def average_worker(imlist, output_file_name):
"""Average a sequence of images using numpy and PIL."""
images = np.array([np.array(Image.open(fname)) for fname in imlist])
arr = np.array(np.mean(images, axis=(0)), dtype=np.uint8)
out = Image.fromarray(arr)
out.save(output_file_name)
def main():
"""Call the main program."""
# verify if the input path exists,
# if it does, then get the frame names
inp = args.input[0]
if os.path.isdir(inp):
frames = natsorted(glob(inp + "/*"))
else:
raise IOError("No such file or directory \"{}\"".format(inp))
# create the output path, if not present
outpath = os.path.abspath(args.output[0])
os.makedirs(outpath, exist_ok=True)
# get number of frames to use for averaging
nframes = np.int(args.nframes[0])
# get number of cores to use
nproc = np.int(args.nproc[0])
# split the list of input frames into N lists with nframes per list
lenght = np.int(np.floor(len(frames) / nframes))
frame_chunks = | np.array_split(frames, lenght) | numpy.array_split |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.calibration import calibration_curve
import pickle
def reliability(y, y_prob):
nbin = 10
# class0
y_true = y.copy()
y_true[y_true == 0] = 4
y_true[y_true != 4] = 0
y_true[y_true == 4] = 1
select = y_prob[:, 0]
print(select)
x0, y0 = calibration_curve(y_true, select, n_bins=nbin)
# class 1
y_true = y.copy()
y_true[y_true != 1] = 0
select = y_prob[:, 1]
x1, y1 = calibration_curve(y_true, select, n_bins=nbin)
# class 2
y_true = y.copy()
y_true[y_true != 2] = 0
select = y_prob[:, 2]
x2, y2 = calibration_curve(y_true, select, n_bins=nbin)
# class 3
y_true = y.copy()
y_true[y_true != 3] = 0
select = y_prob[:, 3]
x3, y3 = calibration_curve(y_true, select, n_bins=nbin)
x = | np.linspace(0, 1, 101) | numpy.linspace |
#!/usr/bin/env python3
"""
The second-level axes subclass used for all proplot figures.
Implements plotting method overrides.
"""
import inspect
import itertools
import re
import sys
from numbers import Integral
import matplotlib.axes as maxes
import matplotlib.cbook as cbook
import matplotlib.cm as mcm
import matplotlib.collections as mcollections
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
import numpy as np
import numpy.ma as ma
from .. import colors as pcolors
from .. import constructor, utils
from ..config import rc
from ..internals import ic # noqa: F401
from ..internals import (
_get_aliases,
_not_none,
_pop_kwargs,
_pop_params,
_pop_props,
context,
data,
docstring,
guides,
warnings,
)
from . import base
try:
from cartopy.crs import PlateCarree
except ModuleNotFoundError:
PlateCarree = object
__all__ = ['PlotAxes']
# Constants
# NOTE: Increased from native linewidth of 0.25 matplotlib uses for grid box edges.
# This is half of rc['patch.linewidth'] of 0.6. Half seems like a nice default.
EDGEWIDTH = 0.3
# Data argument docstrings
_args_1d_docstring = """
*args : {y} or {x}, {y}
The data passed as positional or keyword arguments. Interpreted as follows:
* If only `{y}` coordinates are passed, try to infer the `{x}` coordinates
from the `~pandas.Series` or `~pandas.DataFrame` indices or the
`~xarray.DataArray` coordinates. Otherwise, the `{x}` coordinates
are ``np.arange(0, {y}.shape[0])``.
* If the `{y}` coordinates are a 2D array, plot each column of data in succession
(except where each column of data represents a statistical distribution, as with
``boxplot``, ``violinplot``, or when using ``means=True`` or ``medians=True``).
* If any arguments are `pint.Quantity`, auto-add the pint unit registry
to matplotlib's unit registry using `~pint.UnitRegistry.setup_matplotlib`.
A `pint.Quantity` embedded in an `xarray.DataArray` is also supported.
"""
_args_1d_multi_docstring = """
*args : {y}2 or {x}, {y}2, or {x}, {y}1, {y}2
The data passed as positional or keyword arguments. Interpreted as follows:
* If only `{y}` coordinates are passed, try to infer the `{x}` coordinates from
the `~pandas.Series` or `~pandas.DataFrame` indices or the `~xarray.DataArray`
coordinates. Otherwise, the `{x}` coordinates are ``np.arange(0, {y}2.shape[0])``.
* If only `{x}` and `{y}2` coordinates are passed, set the `{y}1` coordinates
to zero. This draws elements originating from the zero line.
* If both `{y}1` and `{y}2` are provided, draw elements between these points. If
either are 2D, draw elements by iterating over each column.
* If any arguments are `pint.Quantity`, auto-add the pint unit registry
to matplotlib's unit registry using `~pint.UnitRegistry.setup_matplotlib`.
A `pint.Quantity` embedded in an `xarray.DataArray` is also supported.
"""
_args_2d_docstring = """
*args : {z} or x, y, {z}
The data passed as positional or keyword arguments. Interpreted as follows:
* If only {zvar} coordinates are passed, try to infer the `x` and `y` coordinates
from the `~pandas.DataFrame` indices and columns or the `~xarray.DataArray`
coordinates. Otherwise, the `y` coordinates are ``np.arange(0, y.shape[0])``
and the `x` coordinates are ``np.arange(0, y.shape[1])``.
* For ``pcolor`` and ``pcolormesh``, calculate coordinate *edges* using
`~proplot.utils.edges` or `~proplot.utils.edges2d` if *centers* were provided.
For all other methods, calculate coordinate *centers* if *edges* were provided.
* If the `x` or `y` coordinates are `pint.Quantity`, auto-add the pint unit registry
to matplotlib's unit registry using `~pint.UnitRegistry.setup_matplotlib`. If the
{zvar} coordinates are `pint.Quantity`, pass the magnitude to the plotting
command. A `pint.Quantity` embedded in an `xarray.DataArray` is also supported.
"""
docstring._snippet_manager['plot.args_1d_y'] = _args_1d_docstring.format(x='x', y='y')
docstring._snippet_manager['plot.args_1d_x'] = _args_1d_docstring.format(x='y', y='x')
docstring._snippet_manager['plot.args_1d_multiy'] = _args_1d_multi_docstring.format(x='x', y='y') # noqa: E501
docstring._snippet_manager['plot.args_1d_multix'] = _args_1d_multi_docstring.format(x='y', y='x') # noqa: E501
docstring._snippet_manager['plot.args_2d'] = _args_2d_docstring.format(z='z', zvar='`z`') # noqa: E501
docstring._snippet_manager['plot.args_2d_flow'] = _args_2d_docstring.format(z='u, v', zvar='`u` and `v`') # noqa: E501
# Shared docstrings
_args_1d_shared_docstring = """
data : dict-like, optional
A dict-like dataset container (e.g., `~pandas.DataFrame` or
`~xarray.DataArray`). If passed, positional arguments can optionally
be string `data` keys and the arrays used for plotting are retrieved
with ``data[key]``. This is a `native matplotlib feature
<https://matplotlib.org/stable/gallery/misc/keyword_plotting.html>`__.
autoformat : bool, optional
Whether the `x` axis labels, `y` axis labels, axis formatters, axes titles,
legend titles, and colorbar labels are automatically configured when a
`~pandas.Series`, `~pandas.DataFrame`, `~xarray.DataArray`, or `~pint.Quantity`
is passed to the plotting command. Default is :rc:`autoformat`. Formatting
of `pint.Quantity` unit strings is controlled by :rc:`unitformat`.
"""
_args_2d_shared_docstring = """
%(plot.args_1d_shared)s
transpose : bool, optional
Whether to transpose the input data. This should be used when
passing datasets with column-major dimension order ``(x, y)``.
Otherwise row-major dimension order ``(y, x)`` is expected.
order : {'C', 'F'}, optional
Alternative to `transpose`. ``'C'`` corresponds to the default C-cyle
row-major ordering (equivalent to ``transpose=False``). ``'F'`` corresponds
to Fortran-style column-major ordering (equivalent to ``transpose=True``).
globe : bool, optional
For `proplot.axes.GeoAxes` only. Whether to enforce global coverage.
Default is ``False``. When set to ``True`` this does the following:
#. Interpolates input data to the North and South poles by setting the data
values at the poles to the mean from latitudes nearest each pole.
#. Makes meridional coverage "circular", i.e. the last longitude coordinate
equals the first longitude coordinate plus 360\N{DEGREE SIGN}.
#. When basemap is the backend, cycles 1D longitude vectors to fit within
the map edges. For example, if the central longitude is 90\N{DEGREE SIGN},
the data is shifted so that it spans -90\N{DEGREE SIGN} to 270\N{DEGREE SIGN}.
"""
docstring._snippet_manager['plot.args_1d_shared'] = _args_1d_shared_docstring
docstring._snippet_manager['plot.args_2d_shared'] = _args_2d_shared_docstring
# Auto colorbar and legend docstring
_guide_docstring = """
colorbar : bool, int, or str, optional
If not ``None``, this is a location specifying where to draw an
*inner* or *outer* colorbar from the resulting object(s). If ``True``,
the default :rc:`colorbar.loc` is used. If the same location is
used in successive plotting calls, object(s) will be added to the
existing colorbar in that location (valid for colorbars built from lists
of artists). Valid locations are shown in in `~proplot.axes.Axes.colorbar`.
colorbar_kw : dict-like, optional
Extra keyword args for the call to `~proplot.axes.Axes.colorbar`.
legend : bool, int, or str, optional
Location specifying where to draw an *inner* or *outer* legend from the
resulting object(s). If ``True``, the default :rc:`legend.loc` is used.
If the same location is used in successive plotting calls, object(s)
will be added to existing legend in that location. Valid locations
are shown in `~proplot.axes.Axes.legend`.
legend_kw : dict-like, optional
Extra keyword args for the call to `~proplot.axes.Axes.legend`.
"""
docstring._snippet_manager['plot.guide'] = _guide_docstring
# Misc shared 1D plotting docstrings
_inbounds_docstring = """
inbounds : bool, optional
Whether to restrict the default `y` (`x`) axis limits to account for only
in-bounds data when the `x` (`y`) axis limits have been locked. Default
is :rc:`axes.inbounds`. See also :rcraw:`cmap.inbounds`.
"""
_error_means_docstring = """
mean, means : bool, optional
Whether to plot the means of each column for 2D `{y}` coordinates. Means
are calculated with `numpy.nanmean`. If no other arguments are specified,
this also sets ``barstd=True`` (and ``boxstd=True`` for violin plots).
median, medians : bool, optional
Whether to plot the medians of each column for 2D `{y}` coordinates. Medians
are calculated with `numpy.nanmedian`. If no other arguments arguments are
specified, this also sets ``barstd=True`` (and ``boxstd=True`` for violin plots).
"""
_error_bars_docstring = """
barstd, barstds : bool, float, or 2-tuple of float, optional
Valid only if `mean` or `median` is ``True``. Standard deviation multiples for
*thin error bars* with optional whiskers (i.e., caps). If scalar, then +/- that
multiple is used. If ``True``, the default standard deviation range of +/-3 is used.
barpctile, barpctiles : bool, float, or 2-tuple of float, optional
Valid only if `mean` or `median` is ``True``. As with `barstd`, but instead
using percentiles for the error bars. If scalar, that percentile range is
used (e.g., ``90`` shows the 5th to 95th percentiles). If ``True``, the default
percentile range of 0 to 100 is used.
bardata : array-like, optional
Valid only if `mean` and `median` are ``False``. If shape is 2 x N, these
are the lower and upper bounds for the thin error bars. If shape is N, these
are the absolute, symmetric deviations from the central points.
boxstd, boxstds, boxpctile, boxpctiles, boxdata : optional
As with `barstd`, `barpctile`, and `bardata`, but for *thicker error bars*
representing a smaller interval than the thin error bars. If `boxstds` is
``True``, the default standard deviation range of +/-1 is used. If `boxpctiles`
is ``True``, the default percentile range of 25 to 75 is used (i.e., the
interquartile range). When "boxes" and "bars" are combined, this has the
effect of drawing miniature box-and-whisker plots.
capsize : float, optional
The cap size for thin error bars in points. Default is :rc:`errorbar.capsize`.
barz, barzorder, boxz, boxzorder : float, optional
The "zorder" for the thin and thick error bars. Default is ``2.5``.
barc, barcolor, boxc, boxcolor : color-spec, optional
Colors for the thin and thick error bars. Default is
:rc:`boxplot.whiskerprops.color`.
barlw, barlinewidth, boxlw, boxlinewidth : float, optional
Line widths for the thin and thick error bars, in points. The defaults
:rc:`boxplot.whiskerprops.linewidth` (bars) and four times that value (boxes).
boxm, boxmarker : bool or marker-spec, optional
Whether to draw a small marker in the middle of the box denoting the mean or
median position. Ignored if `boxes` is ``False``. Default is ``'o'``.
boxms, boxmarkersize : size-spec, optional
The marker size for the `boxmarker` marker in points ** 2. Default size
is equal to ``(2 * boxlinewidth) ** 2``.
boxmc, boxmarkercolor, boxmec, boxmarkeredgecolor : color-spec, optional
Color, face color, and edge color for the `boxmarker` marker. Default color
and edge color are ``'w'``.
"""
_error_shading_docstring = """
shadestd, shadestds, shadepctile, shadepctiles, shadedata : optional
As with `barstd`, `barpctile`, and `bardata`, but using *shading* to indicate
the error range. If `shadestds` is ``True``, the default standard deviation
range of +/-2 is used. If `shadepctiles` is ``True``, the default
percentile range of 10 to 90 is used.
fadestd, fadestds, fadepctile, fadepctiles, fadedata : optional
As with `shadestd`, `shadepctile`, and `shadedata`, but for an additional,
more faded, *secondary* shaded region. If `fadestds` is ``True``, the default
standard deviation range of +/-3 is used. If `fadepctiles` is ``True``,
the default percentile range of 0 to 100 is used.
shadec, shadecolor, fadec, fadecolor : color-spec, optional
Colors for the different shaded regions. Default is to inherit the parent color.
shadez, shadezorder, fadez, fadezorder : float, optional
The "zorder" for the different shaded regions. Default is ``1.5``.
shadea, shadealpha, fadea, fadealpha : float, optional
The opacity for the different shaded regions. Defaults are ``0.4`` and ``0.2``.
shadelw, shadelinewidth, fadelw, fadelinewidth : float, optional
The edge line width for the shading patches. Default is :rc:`patch.linewidth`.
shdeec, shadeedgecolor, fadeec, fadeedgecolor : float, optional
The edge color for the shading patches. Default is ``'none'``.
shadelabel, fadelabel : bool or str, optional
Labels for the shaded regions to be used as separate legend entries. To toggle
labels "on" and apply a *default* label, use e.g. ``shadelabel=True``. To apply
a *custom* label, use e.g. ``shadelabel='label'``. Otherwise, the shading is
drawn underneath the line and/or marker in the legend entry.
"""
docstring._snippet_manager['plot.inbounds'] = _inbounds_docstring
docstring._snippet_manager['plot.error_means_y'] = _error_means_docstring.format(y='y')
docstring._snippet_manager['plot.error_means_x'] = _error_means_docstring.format(y='x')
docstring._snippet_manager['plot.error_bars'] = _error_bars_docstring
docstring._snippet_manager['plot.error_shading'] = _error_shading_docstring
# Color docstrings
_cycle_docstring = """
cycle : cycle-spec, optional
The cycle specifer, passed to the `~proplot.constructor.Cycle` constructor.
If the returned cycler is unchanged from the current cycler, the axes
cycler will not be reset to its first position. To disable property cycling
and just use black for the default color, use ``cycle=False``, ``cycle='none'``,
or ``cycle=()`` (analogous to disabling ticks with e.g. ``xformatter='none'``).
To restore the default property cycler, use ``cycle=True``.
cycle_kw : dict-like, optional
Passed to `~proplot.constructor.Cycle`.
"""
_cmap_norm_docstring = """
cmap : colormap-spec, optional
The colormap specifer, passed to the `~proplot.constructor.Colormap`
constructor function.
cmap_kw : dict-like, optional
Passed to `~proplot.constructor.Colormap`.
c, color, colors : color-spec or sequence of color-spec, optional
The color(s) used to create a `~proplot.colors.DiscreteColormap`.
If not passed, `cmap` is used.
norm : norm-spec, optional
The data value normalizer, passed to the `~proplot.constructor.Norm`
constructor function. If `discrete` is ``True`` then 1) this affects the default
level-generation algorithm (e.g. ``norm='log'`` builds levels in log-space) and
2) this is passed to `~proplot.colors.DiscreteNorm` to scale the colors before they
are discretized (if `norm` is not already a `~proplot.colors.DiscreteNorm`).
norm_kw : dict-like, optional
Passed to `~proplot.constructor.Norm`.
extend : {'neither', 'both', 'min', 'max'}, optional
Direction for drawing colorbar "extensions" (i.e. color keys for out-of-bounds
data on the end of the colorbar). Default is ``'neither'``.
discrete : bool, optional
If ``False``, then `~proplot.colors.DiscreteNorm` is not applied to the
colormap. Instead, for non-contour plots, the number of levels will be
roughly controlled by :rcraw:`cmap.lut`. This has a similar effect to
using `levels=large_number` but it may improve rendering speed. Default
is ``True`` for only contour-plotting commands like `~proplot.axes.Axes.contourf`
and pseudocolor-plotting commands like `~proplot.axes.Axes.pcolor`.
sequential, diverging, cyclic, qualitative : bool, optional
Boolean arguments used if `cmap` is not passed. Set these to ``True``
to use the default :rcraw:`cmap.sequential`, :rcraw:`cmap.diverging`,
:rcraw:`cmap.cyclic`, and :rcraw:`cmap.qualitative` colormaps.
The `diverging` option also applies `~proplot.colors.DivergingNorm`
as the default continuous normalizer.
"""
docstring._snippet_manager['plot.cycle'] = _cycle_docstring
docstring._snippet_manager['plot.cmap_norm'] = _cmap_norm_docstring
# Levels docstrings
# NOTE: In some functions we only need some components
_vmin_vmax_docstring = """
vmin, vmax : float, optional
The minimum and maximum color scale values used with the `norm` normalizer.
If `discrete` is ``False`` these are the absolute limits, and if `discrete`
is ``True`` these are the approximate limits used to automatically determine
`levels` or `values` lists at "nice" intervals. If `levels` or `values` were
already passed as lists, the default `vmin` and `vmax` are the minimum and
maximum of the lists. If `robust` was passed, the default `vmin` and `vmax`
are some percentile range of the data values. Otherwise, the default `vmin`
and `vmax` are the minimum and maximum of the data values.
"""
_manual_levels_docstring = """
N
Shorthand for `levels`.
levels : int or sequence of float, optional
The number of level edges or a sequence of level edges. If the former, `locator`
is used to generate this many level edges at "nice" intervals. If the latter,
the levels should be monotonically increasing or decreasing (note decreasing
levels fail with ``contour`` plots). Default is :rc:`cmap.levels`.
values : int or sequence of float, optional
The number of level centers or a sequence of level centers. If the former,
`locator` is used to generate this many level centers at "nice" intervals.
If the latter, levels are inferred using `~proplot.utils.edges`.
This will override any `levels` input.
"""
_auto_levels_docstring = """
robust : bool, float, or 2-tuple, optional
If ``True`` and `vmin` or `vmax` were not provided, they are
determined from the 2nd and 98th data percentiles rather than the
minimum and maximum. If float, this percentile range is used (for example,
``90`` corresponds to the 5th to 95th percentiles). If 2-tuple of float,
these specific percentiles should be used. This feature is useful
when your data has large outliers. Default is :rc:`cmap.robust`.
inbounds : bool, optional
If ``True`` and `vmin` or `vmax` were not provided, when axis limits
have been explicitly restricted with `~matplotlib.axes.Axes.set_xlim`
or `~matplotlib.axes.Axes.set_ylim`, out-of-bounds data is ignored.
Default is :rc:`cmap.inbounds`. See also :rcraw:`axes.inbounds`.
locator : locator-spec, optional
The locator used to determine level locations if `levels` or `values` were not
already passed as lists. Passed to the `~proplot.constructor.Locator` constructor.
Default is `~matplotlib.ticker.MaxNLocator` with ``levels`` integer levels.
locator_kw : dict-like, optional
Passed to `~proplot.constructor.Locator`.
symmetric : bool, optional
If ``True``, automatically generated levels are symmetric about zero.
Default is always ``False``.
positive : bool, optional
If ``True``, automatically generated levels are positive with a minimum at zero.
Default is always ``False``.
negative : bool, optional
If ``True``, automatically generated levels are negative with a maximum at zero.
Default is always ``False``.
nozero : bool, optional
If ``True``, ``0`` is removed from the level list. This is mainly useful for
single-color `~matplotlib.axes.Axes.contour` plots.
"""
docstring._snippet_manager['plot.vmin_vmax'] = _vmin_vmax_docstring
docstring._snippet_manager['plot.levels_manual'] = _manual_levels_docstring
docstring._snippet_manager['plot.levels_auto'] = _auto_levels_docstring
# Labels docstrings
_label_docstring = """
label, value : float or str, optional
The single legend label or colorbar coordinate to be used for
this plotted element. Can be numeric or string. This is generally
used with 1D positional arguments.
"""
_labels_1d_docstring = """
%(plot.label)s
labels, values : sequence of float or sequence of str, optional
The legend labels or colorbar coordinates used for each plotted element.
Can be numeric or string, and must match the number of plotted elements.
This is generally used with 2D positional arguments.
"""
_labels_2d_docstring = """
label : str, optional
The legend label to be used for this object. In the case of
contours, this is paired with the the central artist in the artist
list returned by `matplotlib.contour.ContourSet.legend_elements`.
labels : bool, optional
Whether to apply labels to contours and grid boxes. The text will be
white when the luminance of the underlying filled contour or grid box
is less than 50 and black otherwise.
labels_kw : dict-like, optional
Ignored if `labels` is ``False``. Extra keyword args for the labels.
For contour plots, this is passed to `~matplotlib.axes.Axes.clabel`.
Otherwise, this is passed to `~matplotlib.axes.Axes.text`.
fmt : format-spec, optional
The `~matplotlib.ticker.Formatter` used to format number labels.
Passed to the `~proplot.constructor.Formatter` constructor.
precision : int, optional
The maximum number of decimal places for number labels generated
with the default formatter `~proplot.ticker.Simpleformatter`.
"""
docstring._snippet_manager['plot.label'] = _label_docstring
docstring._snippet_manager['plot.labels_1d'] = _labels_1d_docstring
docstring._snippet_manager['plot.labels_2d'] = _labels_2d_docstring
# Negative-positive colors
_negpos_docstring = """
negpos : bool, optional
Whether to shade {objects} where ``{pos}`` with `poscolor`
and where ``{neg}`` with `negcolor`. Default is ``False``. If
``True`` this function will return a 2-tuple of values.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive {objects}. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
"""
docstring._snippet_manager['plot.negpos_fill'] = _negpos_docstring.format(
objects='patches', neg='y2 < y1', pos='y2 >= y1'
)
docstring._snippet_manager['plot.negpos_lines'] = _negpos_docstring.format(
objects='lines', neg='ymax < ymin', pos='ymax >= ymin'
)
docstring._snippet_manager['plot.negpos_bar'] = _negpos_docstring.format(
objects='bars', neg='height < 0', pos='height >= 0'
)
# Plot docstring
_plot_docstring = """
Plot standard lines.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(artist.line)s
%(plot.error_means_{y})s
%(plot.error_bars)s
%(plot.error_shading)s
%(plot.inbounds)s
%(plot.labels_1d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.plot`.
See also
--------
PlotAxes.plot
PlotAxes.plotx
matplotlib.axes.Axes.plot
"""
docstring._snippet_manager['plot.plot'] = _plot_docstring.format(y='y')
docstring._snippet_manager['plot.plotx'] = _plot_docstring.format(y='x')
# Step docstring
# NOTE: Internally matplotlib implements step with thin wrapper of plot
_step_docstring = """
Plot step lines.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(artist.line)s
%(plot.inbounds)s
%(plot.labels_1d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.step`.
See also
--------
PlotAxes.step
PlotAxes.stepx
matplotlib.axes.Axes.step
"""
docstring._snippet_manager['plot.step'] = _step_docstring.format(y='y')
docstring._snippet_manager['plot.stepx'] = _step_docstring.format(y='x')
# Stem docstring
_stem_docstring = """
Plot stem lines.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(plot.inbounds)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.stem`.
"""
docstring._snippet_manager['plot.stem'] = _stem_docstring.format(y='x')
docstring._snippet_manager['plot.stemx'] = _stem_docstring.format(y='x')
# Lines docstrings
_lines_docstring = """
Plot {orientation} lines.
Parameters
----------
%(plot.args_1d_multi{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
stack, stacked : bool, optional
Whether to "stack" lines from successive columns of {y} data
or plot lines on top of each other. Default is ``False``.
%(plot.cycle)s
%(artist.line)s
%(plot.negpos_lines)s
%(plot.inbounds)s
%(plot.labels_1d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.{prefix}lines`.
See also
--------
PlotAxes.vlines
PlotAxes.hlines
matplotlib.axes.Axes.vlines
matplotlib.axes.Axes.hlines
"""
docstring._snippet_manager['plot.vlines'] = _lines_docstring.format(
y='y', prefix='v', orientation='vertical'
)
docstring._snippet_manager['plot.hlines'] = _lines_docstring.format(
y='x', prefix='h', orientation='horizontal'
)
# Scatter docstring
_parametric_docstring = """
Plot a parametric line.
Parameters
----------
%(plot.args_1d_y)s
c, color, colors, values, labels : sequence of float, str, or color-spec, optional
The parametric coordinate(s). These can be passed as a third positional
argument or as a keyword argument. If they are float, the colors will be
determined from `norm` and `cmap`. If they are strings, the color values
will be ``np.arange(len(colors))`` and eventual colorbar ticks will
be labeled with the strings. If they are colors, they are used for the
line segments and `cmap` is ignored -- for example, ``colors='blue'``
makes a monochromatic "parametric" line.
interp : int, optional
Interpolate to this many additional points between the parametric
coordinates. Default is ``0``. This can be increased to make the color
gradations between a small number of coordinates appear "smooth".
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.vmin_vmax)s
%(plot.inbounds)s
scalex, scaley : bool, optional
Whether the view limits are adapted to the data limits. The values are
passed on to `~matplotlib.axes.Axes.autoscale_view`.
%(plot.label)s
%(plot.guide)s
**kwargs
Valid `~matplotlib.collections.LineCollection` properties.
Returns
-------
`~matplotlib.collections.LineCollection`
The parametric line. See `this matplotlib example \
<https://matplotlib.org/stable/gallery/lines_bars_and_markers/multicolored_line>`__.
See also
--------
PlotAxes.plot
PlotAxes.plotx
matplotlib.collections.LineCollection
"""
docstring._snippet_manager['plot.parametric'] = _parametric_docstring
# Scatter function docstring
_scatter_docstring = """
Plot markers with flexible keyword arguments.
Parameters
----------
%(plot.args_1d_{y})s
s, size, ms, markersize : float or sequence of float or unit-spec, optional
The marker area(s). If this is an array matching the shape of `x` and `y`, the
units are scaled by `smin` and `smax`. If this contains unit string(s), it is
processed by `~proplot.utils.units` and represents the width rather than area.
c, color, colors, mc, markercolor, markercolors, fc, facecolor, facecolors \
: array-like or color-spec, optional
The marker color(s). If this is an array matching the shape of `x` and `y`,
the colors are generated using `cmap`, `norm`, `vmin`, and `vmax`. Otherwise,
this should be a valid matplotlib color.
smin, smax : float, optional
The minimum and maximum marker size area in units ``points ** 2``. Ignored
if `absolute_size` is ``True``. Default value for `smin` is ``1`` and for
`smax` is the square of :rc:`lines.markersize`.
absolute_size : bool, optional
Whether the marker sizes should be taken to be in physical units or
scaled by `smin` and `smax`. Default is ``True`` if `s` is scalar
and ``False`` if `s` is an array.
%(plot.vmin_vmax)s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.levels_auto)s
%(plot.cycle)s
lw, linewidth, linewidths, mew, markeredgewidth, markeredgewidths \
: float or sequence, optional
The marker edge width(s).
edgecolors, markeredgecolor, markeredgecolors \
: color-spec or sequence, optional
The marker edge color(s).
%(plot.error_means_{y})s
%(plot.error_bars)s
%(plot.error_shading)s
%(plot.inbounds)s
%(plot.labels_1d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.scatter`.
See also
--------
PlotAxes.scatter
PlotAxes.scatterx
matplotlib.axes.Axes.scatter
"""
docstring._snippet_manager['plot.scatter'] = _scatter_docstring.format(y='y')
docstring._snippet_manager['plot.scatterx'] = _scatter_docstring.format(y='x')
# Bar function docstring
_bar_docstring = """
Plot individual, grouped, or stacked bars.
Parameters
----------
%(plot.args_1d_{y})s
width : float or array-like, optional
The width(s) of the bars relative to the {x} coordinate step size.
Can be passed as a third positional argument.
{bottom} : float or array-like, optional
The coordinate(s) of the {bottom} edge of the bars. Default is
``0``. Can be passed as a fourth positinal argument.
absolute_width : bool, optional
Whether to make the `width` units *absolute*. If ``True``, this
restores the default matplotlib behavior. Default is ``False``.
stack, stacked : bool, optional
Whether to "stack" bars from successive columns of {y} data
or plot bars side-by-side in groups. Default is ``False``.
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(artist.patch)s
%(plot.negpos_bar)s
%(axes.edgefix)s
%(plot.error_means_{y})s
%(plot.error_bars)s
%(plot.inbounds)s
%(plot.labels_1d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.bar{suffix}`.
See also
--------
PlotAxes.bar
PlotAxes.barh
matplotlib.axes.Axes.bar
matplotlib.axes.Axes.barh
"""
docstring._snippet_manager['plot.bar'] = _bar_docstring.format(
x='x', y='y', bottom='bottom', suffix=''
)
docstring._snippet_manager['plot.barh'] = _bar_docstring.format(
x='y', y='x', bottom='left', suffix='h'
)
# Area plot docstring
_fill_docstring = """
Plot individual, grouped, or overlaid shading patches.
Parameters
----------
%(plot.args_1d_multi{y})s
stack, stacked : bool, optional
Whether to "stack" area patches from successive columns of {y} data
or plot area patches on top of each other. Default is ``False``.
%(plot.args_1d_shared)s
Other parameters
----------------
where : ndarray, optional
A boolean mask for the points that should be shaded.
See `this matplotlib example \
<https://matplotlib.org/stable/gallery/pyplots/whats_new_98_4_fill_between.html>`__.
%(plot.cycle)s
%(artist.patch)s
%(plot.negpos_fill)s
%(axes.edgefix)s
%(plot.inbounds)s
%(plot.labels_1d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.fill_between{suffix}`.
See also
--------
PlotAxes.area
PlotAxes.areax
PlotAxes.fill_between
PlotAxes.fill_betweenx
matplotlib.axes.Axes.fill_between
matplotlib.axes.Axes.fill_betweenx
"""
docstring._snippet_manager['plot.fill_between'] = _fill_docstring.format(
x='x', y='y', suffix=''
)
docstring._snippet_manager['plot.fill_betweenx'] = _fill_docstring.format(
x='y', y='x', suffix='x'
)
# Box plot docstrings
_boxplot_docstring = """
Plot {orientation} boxes and whiskers with a nice default style.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
fill : bool, optional
Whether to fill the box with a color. Default is ``True``.
mean, means : bool, optional
If ``True``, this passes ``showmeans=True`` and ``meanline=True``
to `~matplotlib.axes.Axes.boxplot`.
%(plot.cycle)s
%(artist.patch_black)s
m, marker, ms, markersize : float or str, optional
Marker style and size for the 'fliers', i.e. outliers. Default is
determined by :rcraw:`boxplot.flierprops`.
meanls, medianls, meanlinestyle, medianlinestyle, meanlinestyles, medianlinestyles \
: line style-spec, optional
The line style for the mean and median lines drawn horizontally
across the box.
boxc, capc, whiskerc, flierc, meanc, medianc, \
boxcolor, capcolor, whiskercolor, fliercolor, meancolor, mediancolor \
boxcolors, capcolors, whiskercolors, fliercolors, meancolors, mediancolors \
: color-spec or sequence, optional
The color of various boxplot components. If a sequence, should be the
same length as the number of boxes. These are shorthands so you don't
have to pass e.g. a ``boxprops`` dictionary.
boxlw, caplw, whiskerlw, flierlw, meanlw, medianlw, boxlinewidth, caplinewidth, \
meanlinewidth, medianlinewidth, whiskerlinewidth, flierlinewidth, boxlinewidths, \
caplinewidths, meanlinewidths, medianlinewidths, whiskerlinewidths, flierlinewidths \
: float, optional
The line width of various boxplot components. These are shorthands so
you don't have to pass e.g. a ``boxprops`` dictionary.
%(plot.labels_1d)s
**kwargs
Passed to `matplotlib.axes.Axes.boxplot`.
See also
--------
PlotAxes.boxes
PlotAxes.boxesh
PlotAxes.boxplot
PlotAxes.boxploth
matplotlib.axes.Axes.boxplot
"""
docstring._snippet_manager['plot.boxplot'] = _boxplot_docstring.format(
y='y', orientation='vertical'
)
docstring._snippet_manager['plot.boxploth'] = _boxplot_docstring.format(
y='x', orientation='horizontal'
)
# Violin plot docstrings
_violinplot_docstring = """
Plot {orientation} violins with a nice default style matching
`this matplotlib example \
<https://matplotlib.org/stable/gallery/statistics/customized_violin.html>`__.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(artist.patch_black)s
%(plot.labels_1d)s
%(plot.error_bars)s
**kwargs
Passed to `matplotlib.axes.Axes.violinplot`.
Note
----
It is no longer possible to show minima and maxima with whiskers --
while this is useful for `~matplotlib.axes.Axes.boxplot`\\ s it is
redundant for `~matplotlib.axes.Axes.violinplot`\\ s.
See also
--------
PlotAxes.violins
PlotAxes.violinsh
PlotAxes.violinplot
PlotAxes.violinploth
matplotlib.axes.Axes.violinplot
"""
docstring._snippet_manager['plot.violinplot'] = _violinplot_docstring.format(
y='y', orientation='vertical'
)
docstring._snippet_manager['plot.violinploth'] = _violinplot_docstring.format(
y='x', orientation='horizontal'
)
# 1D histogram docstrings
_hist_docstring = """
Plot {orientation} histograms.
Parameters
----------
%(plot.args_1d_{y})s
bins : int or sequence of float, optional
The bin count or exact bin edges.
%(plot.weights)s
histtype : {{'bar', 'barstacked', 'step', 'stepfilled'}}, optional
The histogram type. See `matplotlib.axes.Axes.hist` for details.
width, rwidth : float, optional
The bar width(s) for bar-type histograms relative to the bin size. Default
is ``0.8`` for multiple columns of unstacked data and ``1`` otherwise.
stack, stacked : bool, optional
Whether to "stack" successive columns of {y} data for bar-type histograms
or show side-by-side in groups. Setting this to ``False`` is equivalent to
``histtype='bar'`` and to ``True`` is equivalent to ``histtype='barstacked'``.
fill, filled : bool, optional
Whether to "fill" step-type histograms or just plot the edges. Setting
this to ``False`` is equivalent to ``histtype='step'`` and to ``True``
is equivalent to ``histtype='stepfilled'``.
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(artist.patch)s
%(axes.edgefix)s
%(plot.labels_1d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.hist`.
See also
--------
PlotAxes.hist
PlotAxes.histh
matplotlib.axes.Axes.hist
"""
_weights_docstring = """
weights : array-like, optional
The weights associated with each point. If string this
can be retrieved from `data` (see below).
"""
docstring._snippet_manager['plot.weights'] = _weights_docstring
docstring._snippet_manager['plot.hist'] = _hist_docstring.format(
y='x', orientation='vertical'
)
docstring._snippet_manager['plot.histh'] = _hist_docstring.format(
y='x', orientation='horizontal'
)
# 2D histogram docstrings
_hist2d_docstring = """
Plot a {descrip}.
standard 2D histogram.
Parameters
----------
%(plot.args_1d_y)s{bins}
%(plot.weights)s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.vmin_vmax)s
%(plot.levels_auto)s
%(plot.labels_2d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.{command}`.
See also
--------
PlotAxes.hist2d
PlotAxes.hexbin
matplotlib.axes.Axes.{command}
"""
_bins_docstring = """
bins : int or 2-tuple of int, or array-like or 2-tuple of array-like, optional
The bin count or exact bin edges for each dimension or both dimensions.
""".rstrip()
docstring._snippet_manager['plot.hist2d'] = _hist2d_docstring.format(
command='hist2d', descrip='standard 2D histogram', bins=_bins_docstring
)
docstring._snippet_manager['plot.hexbin'] = _hist2d_docstring.format(
command='hexbin', descrip='2D hexagonally binned histogram', bins=''
)
# Pie chart docstring
_pie_docstring = """
Plot a pie chart.
Parameters
----------
%(plot.args_1d_y)s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(artist.patch)s
%(axes.edgefix)s
%(plot.labels_1d)s
labelpad, labeldistance : float, optional
The distance at which labels are drawn in radial coordinates.
See also
--------
matplotlib.axes.Axes.pie
"""
docstring._snippet_manager['plot.pie'] = _pie_docstring
# Contour docstrings
_contour_docstring = """
Plot {descrip}.
Parameters
----------
%(plot.args_2d)s
%(plot.args_2d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.vmin_vmax)s
%(plot.levels_auto)s
%(artist.collection_contour)s{edgefix}
%(plot.labels_2d)s
%(plot.guide)s
**kwargs
Passed to `matplotlib.axes.Axes.{command}`.
See also
--------
PlotAxes.contour
PlotAxes.contourf
PlotAxes.tricontour
PlotAxes.tricontourf
matplotlib.axes.Axes.{command}
"""
docstring._snippet_manager['plot.contour'] = _contour_docstring.format(
descrip='contour lines', command='contour', edgefix=''
)
docstring._snippet_manager['plot.contourf'] = _contour_docstring.format(
descrip='filled contours', command='contourf', edgefix='%(axes.edgefix)s\n',
)
docstring._snippet_manager['plot.tricontour'] = _contour_docstring.format(
descrip='contour lines on a triangular grid', command='tricontour', edgefix=''
)
docstring._snippet_manager['plot.tricontourf'] = _contour_docstring.format(
descrip='filled contours on a triangular grid', command='tricontourf', edgefix='\n%(axes.edgefix)s' # noqa: E501
)
# Pcolor docstring
_pcolor_docstring = """
Plot {descrip}.
Parameters
----------
%(plot.args_2d)s
%(plot.args_2d_shared)s{aspect}
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.vmin_vmax)s
%(plot.levels_auto)s
%(artist.collection_pcolor)s
%(axes.edgefix)s
%(plot.labels_2d)s
%(plot.guide)s
**kwargs
Passed to `matplotlib.axes.Axes.{command}`.
See also
--------
PlotAxes.pcolor
PlotAxes.pcolormesh
PlotAxes.pcolorfast
PlotAxes.heatmap
PlotAxes.tripcolor
matplotlib.axes.Axes.{command}
"""
_heatmap_descrip = """
grid boxes with formatting suitable for heatmaps. Ensures square grid
boxes, adds major ticks to the center of each grid box, disables minor
ticks and gridlines, and sets :rcraw:`cmap.discrete` to ``False`` by default
""".strip()
_heatmap_aspect = """
aspect : {'equal', 'auto'} or float, optional
Modify the axes aspect ratio. The aspect ratio is of particular
relevance for heatmaps since it may lead to non-square grid boxes.
This parameter is a shortcut for calling `~matplotlib.axes.set_aspect`.
Default is :rc:`image.aspect`. The options are as follows:
* Number: The data aspect ratio.
* ``'equal'``: A data aspect ratio of 1.
* ``'auto'``: Allows the data aspect ratio to change depending on
the layout. In general this results in non-square grid boxes.
""".rstrip()
docstring._snippet_manager['plot.pcolor'] = _pcolor_docstring.format(
descrip='irregular grid boxes', command='pcolor', aspect=''
)
docstring._snippet_manager['plot.pcolormesh'] = _pcolor_docstring.format(
descrip='regular grid boxes', command='pcolormesh', aspect=''
)
docstring._snippet_manager['plot.pcolorfast'] = _pcolor_docstring.format(
descrip='grid boxes quickly', command='pcolorfast', aspect=''
)
docstring._snippet_manager['plot.tripcolor'] = _pcolor_docstring.format(
descrip='triangular grid boxes', command='tripcolor', aspect=''
)
docstring._snippet_manager['plot.heatmap'] = _pcolor_docstring.format(
descrip=_heatmap_descrip, command='pcolormesh', aspect=_heatmap_aspect
)
# Image docstring
_show_docstring = """
Plot {descrip}.
Parameters
----------
z : array-like
The data passed as a positional argument or keyword argument.
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.vmin_vmax)s
%(plot.levels_auto)s
%(plot.guide)s
**kwargs
Passed to `matplotlib.axes.Axes.{command}`.
See also
--------
proplot.axes.PlotAxes
matplotlib.axes.Axes.{command}
"""
docstring._snippet_manager['plot.imshow'] = _show_docstring.format(
descrip='an image', command='imshow'
)
docstring._snippet_manager['plot.matshow'] = _show_docstring.format(
descrip='a matrix', command='matshow'
)
docstring._snippet_manager['plot.spy'] = _show_docstring.format(
descrip='a sparcity pattern', command='spy'
)
# Flow function docstring
_flow_docstring = """
Plot {descrip}.
Parameters
----------
%(plot.args_2d_flow)s
c, color, colors : array-like or color-spec, optional
The colors of the {descrip} passed as either a keyword argument
or a fifth positional argument. This can be a single color or
a color array to be scaled by `cmap` and `norm`.
%(plot.args_2d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.vmin_vmax)s
%(plot.levels_auto)s
**kwargs
Passed to `matplotlib.axes.Axes.{command}`
See also
--------
PlotAxes.barbs
PlotAxes.quiver
PlotAxes.stream
PlotAxes.streamplot
matplotlib.axes.Axes.{command}
"""
docstring._snippet_manager['plot.barbs'] = _flow_docstring.format(
descrip='wind barbs', command='barbs'
)
docstring._snippet_manager['plot.quiver'] = _flow_docstring.format(
descrip='quiver arrows', command='quiver'
)
docstring._snippet_manager['plot.stream'] = _flow_docstring.format(
descrip='streamlines', command='streamplot'
)
def _get_vert(vert=None, orientation=None, **kwargs):
"""
Get the orientation specified as either `vert` or `orientation`. This is
used internally by various helper functions.
"""
if vert is not None:
return kwargs, vert
elif orientation is not None:
return kwargs, orientation != 'horizontal' # should already be validated
else:
return kwargs, True # fallback
def _parse_vert(
vert=None, orientation=None, default_vert=None, default_orientation=None,
**kwargs
):
"""
Interpret both 'vert' and 'orientation' and add to outgoing keyword args
if a default is provided.
"""
# NOTE: Users should only pass these to hist, boxplot, or violinplot. To change
# the plot, scatter, area, or bar orientation users should use the differently
# named functions. Internally, however, they use these keyword args.
if default_vert is not None:
kwargs['vert'] = _not_none(
vert=vert,
orientation=None if orientation is None else orientation == 'vertical',
default=default_vert,
)
if default_orientation is not None:
kwargs['orientation'] = _not_none(
orientation=orientation,
vert=None if vert is None else 'vertical' if vert else 'horizontal',
default=default_orientation,
)
if kwargs.get('orientation', None) not in (None, 'horizontal', 'vertical'):
raise ValueError("Orientation must be either 'horizontal' or 'vertical'.")
return kwargs
class PlotAxes(base.Axes):
"""
The second lowest-level `~matplotlib.axes.Axes` subclass used by proplot.
Implements all plotting overrides.
"""
def __init__(self, *args, **kwargs):
"""
Parameters
----------
*args, **kwargs
Passed to `proplot.axes.Axes`.
See also
--------
matplotlib.axes.Axes
proplot.axes.Axes
proplot.axes.CartesianAxes
proplot.axes.PolarAxes
proplot.axes.GeoAxes
"""
super().__init__(*args, **kwargs)
def _plot_native(self, name, *args, **kwargs):
"""
Call the plotting method and use context object to redirect internal
calls to native methods. Finally add attributes to outgoing methods.
"""
# NOTE: Previously allowed internal matplotlib plotting function calls to run
# through proplot overrides then avoided awkward conflicts in piecemeal fashion.
# Now prevent internal calls from running through overrides using preprocessor
kwargs.pop('distribution', None) # remove stat distributions
with context._state_context(self, _internal_call=True):
if self._name == 'basemap':
obj = getattr(self.projection, name)(*args, ax=self, **kwargs)
else:
obj = getattr(super(), name)(*args, **kwargs)
return obj
def _plot_contour_edge(self, method, *args, **kwargs):
"""
Call the contour method to add "edges" to filled contours.
"""
# NOTE: This is used to provide an object that can be used by 'clabel' for
# auto-labels. Filled contours create strange artifacts.
# NOTE: Make the default 'line width' identical to one used for pcolor plots
# rather than rc['contour.linewidth']. See mpl pcolor() source code
if not any(key in kwargs for key in ('linewidths', 'linestyles', 'edgecolors')):
kwargs['linewidths'] = 0 # for clabel
kwargs.setdefault('linewidths', EDGEWIDTH)
kwargs.pop('cmap', None)
kwargs['colors'] = kwargs.pop('edgecolors', 'k')
return self._plot_native(method, *args, **kwargs)
def _plot_negpos_objs(
self, name, x, *ys, negcolor=None, poscolor=None, colorkey='facecolor',
use_where=False, use_zero=False, **kwargs
):
"""
Call the plot method separately for "negative" and "positive" data.
"""
if use_where:
kwargs.setdefault('interpolate', True) # see fill_between docs
for key in ('color', 'colors', 'facecolor', 'facecolors', 'where'):
value = kwargs.pop(key, None)
if value is not None:
warnings._warn_proplot(
f'{name}() argument {key}={value!r} is incompatible with negpos=True. Ignoring.' # noqa: E501
)
# Negative component
yneg = list(ys) # copy
if use_zero: # filter bar heights
yneg[0] = data._safe_mask(ys[0] < 0, ys[0])
elif use_where: # apply fill_between mask
kwargs['where'] = ys[1] < ys[0]
else:
yneg = data._safe_mask(ys[1] < ys[0], *ys)
kwargs[colorkey] = _not_none(negcolor, rc['negcolor'])
negobj = self._plot_native(name, x, *yneg, **kwargs)
# Positive component
ypos = list(ys) # copy
if use_zero: # filter bar heights
ypos[0] = data._safe_mask(ys[0] >= 0, ys[0])
elif use_where: # apply fill_between mask
kwargs['where'] = ys[1] >= ys[0]
else:
ypos = data._safe_mask(ys[1] >= ys[0], *ys)
kwargs[colorkey] = _not_none(poscolor, rc['poscolor'])
posobj = self._plot_native(name, x, *ypos, **kwargs)
return cbook.silent_list(type(negobj).__name__, (negobj, posobj))
def _plot_errorbars(
self, x, y, *_, distribution=None,
default_bars=True, default_boxes=False,
barstd=None, barstds=None, barpctile=None, barpctiles=None, bardata=None,
boxstd=None, boxstds=None, boxpctile=None, boxpctiles=None, boxdata=None,
capsize=None, **kwargs,
):
"""
Add up to 2 error indicators: thick "boxes" and thin "bars".
"""
# Parse input args
# NOTE: Want to keep _plot_errorbars() and _plot_errorshading() separate.
# But also want default behavior where some default error indicator is shown
# if user requests means/medians only. Result is the below kludge.
kwargs, vert = _get_vert(**kwargs)
barstds = _not_none(barstd=barstd, barstds=barstds)
boxstds = _not_none(boxstd=boxstd, boxstds=boxstds)
barpctiles = _not_none(barpctile=barpctile, barpctiles=barpctiles)
boxpctiles = _not_none(boxpctile=boxpctile, boxpctiles=boxpctiles)
bars = any(_ is not None for _ in (bardata, barstds, barpctiles))
boxes = any(_ is not None for _ in (boxdata, boxstds, boxpctiles))
shade = any( # annoying kludge
prefix + suffix in key for key in kwargs
for prefix in ('shade', 'fade') for suffix in ('std', 'pctile', 'data')
)
if distribution is not None and not shade:
if not bars:
barstds = bars = default_bars
if not boxes:
boxstds = boxes = default_boxes
# Error bar properties
edgecolor = kwargs.get('edgecolor', rc['boxplot.whiskerprops.color'])
barprops = _pop_props(kwargs, 'line', ignore='marker', prefix='bar')
barprops['capsize'] = _not_none(capsize, rc['errorbar.capsize'])
barprops['linestyle'] = 'none'
barprops.setdefault('color', edgecolor)
barprops.setdefault('zorder', 2.5)
barprops.setdefault('linewidth', rc['boxplot.whiskerprops.linewidth'])
# Error box properties
# NOTE: Includes 'markerfacecolor' and 'markeredgecolor' props
boxprops = _pop_props(kwargs, 'line', prefix='box')
boxprops['capsize'] = 0
boxprops['linestyle'] = 'none'
boxprops.setdefault('color', barprops['color'])
boxprops.setdefault('zorder', barprops['zorder'])
boxprops.setdefault('linewidth', 4 * barprops['linewidth'])
# Box marker properties
boxmarker = {key: boxprops.pop(key) for key in tuple(boxprops) if 'marker' in key} # noqa: E501
boxmarker['c'] = _not_none(boxmarker.pop('markerfacecolor', None), 'white')
boxmarker['s'] = _not_none(boxmarker.pop('markersize', None), boxprops['linewidth'] ** 0.5) # noqa: E501
boxmarker['zorder'] = boxprops['zorder']
boxmarker['edgecolor'] = boxmarker.pop('markeredgecolor', None)
boxmarker['linewidth'] = boxmarker.pop('markerlinewidth', None)
if boxmarker.get('marker') is True:
boxmarker['marker'] = 'o'
elif default_boxes: # enable by default
boxmarker.setdefault('marker', 'o')
# Draw thin or thick error bars from distributions or explicit errdata
sy = 'y' if vert else 'x' # yerr
ex, ey = (x, y) if vert else (y, x)
eobjs = []
if bars: # now impossible to make thin bar width different from cap width!
edata, _ = data._dist_range(
y, distribution,
stds=barstds, pctiles=barpctiles, errdata=bardata,
stds_default=(-3, 3), pctiles_default=(0, 100),
)
obj = self.errorbar(ex, ey, **barprops, **{sy + 'err': edata})
eobjs.append(obj)
if boxes: # must go after so scatter point can go on top
edata, _ = data._dist_range(
y, distribution,
stds=boxstds, pctiles=boxpctiles, errdata=boxdata,
stds_default=(-1, 1), pctiles_default=(25, 75),
)
obj = self.errorbar(ex, ey, **boxprops, **{sy + 'err': edata})
if boxmarker.get('marker', None):
self.scatter(ex, ey, **boxmarker)
eobjs.append(obj)
kwargs['distribution'] = distribution
return (*eobjs, kwargs)
def _plot_errorshading(
self, x, y, *_, distribution=None, color_key='color',
shadestd=None, shadestds=None, shadepctile=None, shadepctiles=None, shadedata=None, # noqa: E501
fadestd=None, fadestds=None, fadepctile=None, fadepctiles=None, fadedata=None,
shadelabel=False, fadelabel=False, **kwargs
):
"""
Add up to 2 error indicators: more opaque "shading" and less opaque "fading".
"""
kwargs, vert = _get_vert(**kwargs)
shadestds = _not_none(shadestd=shadestd, shadestds=shadestds)
fadestds = _not_none(fadestd=fadestd, fadestds=fadestds)
shadepctiles = _not_none(shadepctile=shadepctile, shadepctiles=shadepctiles)
fadepctiles = _not_none(fadepctile=fadepctile, fadepctiles=fadepctiles)
shade = any(_ is not None for _ in (shadedata, shadestds, shadepctiles))
fade = any(_ is not None for _ in (fadedata, fadestds, fadepctiles))
# Shading properties
shadeprops = _pop_props(kwargs, 'patch', prefix='shade')
shadeprops.setdefault('alpha', 0.4)
shadeprops.setdefault('zorder', 1.5)
shadeprops.setdefault('linewidth', rc['patch.linewidth'])
shadeprops.setdefault('edgecolor', 'none')
# Fading properties
fadeprops = _pop_props(kwargs, 'patch', prefix='fade')
fadeprops.setdefault('zorder', shadeprops['zorder'])
fadeprops.setdefault('alpha', 0.5 * shadeprops['alpha'])
fadeprops.setdefault('linewidth', shadeprops['linewidth'])
fadeprops.setdefault('edgecolor', 'none')
# Get default color then apply to outgoing keyword args so
# that plotting function will not advance to next cycler color.
# TODO: More robust treatment of 'color' vs. 'facecolor'
if (
shade and shadeprops.get('facecolor', None) is None
or fade and fadeprops.get('facecolor', None) is None
):
color = kwargs.get(color_key, None)
if color is None: # add to outgoing
color = kwargs[color_key] = self._get_lines.get_next_color()
shadeprops.setdefault('facecolor', color)
fadeprops.setdefault('facecolor', color)
# Draw dark and light shading from distributions or explicit errdata
eobjs = []
fill = self.fill_between if vert else self.fill_betweenx
if fade:
edata, label = data._dist_range(
y, distribution,
stds=fadestds, pctiles=fadepctiles, errdata=fadedata,
stds_default=(-3, 3), pctiles_default=(0, 100),
label=fadelabel, absolute=True,
)
eobj = fill(x, *edata, label=label, **fadeprops)
eobjs.append(eobj)
if shade:
edata, label = data._dist_range(
y, distribution,
stds=shadestds, pctiles=shadepctiles, errdata=shadedata,
stds_default=(-2, 2), pctiles_default=(10, 90),
label=shadelabel, absolute=True,
)
eobj = fill(x, *edata, label=label, **shadeprops)
eobjs.append(eobj)
kwargs['distribution'] = distribution
return (*eobjs, kwargs)
def _add_sticky_edges(self, objs, axis, *args, only=None):
"""
Add sticky edges to the input artists using the minimum and maximum of the
input coordinates. This is used to copy `bar` behavior to `area` and `lines`.
"""
for sides in args:
sides = np.atleast_1d(sides)
if not sides.size:
continue
min_, max_ = data._safe_range(sides)
if min_ is None or max_ is None:
continue
for obj in guides._iter_iterables(objs):
if only and not isinstance(obj, only):
continue # e.g. ignore error bars
convert = getattr(self, 'convert_' + axis + 'units')
edges = getattr(obj.sticky_edges, axis)
edges.extend(convert((min_, max_)))
def _add_contour_labels(
self, obj, cobj, fmt, *, c=None, color=None, colors=None,
size=None, fontsize=None, inline_spacing=None, **kwargs
):
"""
Add labels to contours with support for shade-dependent filled contour labels.
Text color is inferred from filled contour object and labels are always drawn
on unfilled contour object (otherwise errors crop up).
"""
# Parse input args
zorder = max((h.get_zorder() for h in obj.collections), default=3)
zorder = max(3, zorder + 1)
kwargs.setdefault('zorder', zorder)
colors = _not_none(c=c, color=color, colors=colors)
fontsize = _not_none(size=size, fontsize=fontsize, default=rc['font.smallsize'])
inline_spacing = _not_none(inline_spacing, 2.5)
# Separate clabel args from text Artist args
text_kw = {}
clabel_keys = ('levels', 'inline', 'manual', 'rightside_up', 'use_clabeltext')
for key in tuple(kwargs): # allow dict to change size
if key not in clabel_keys:
text_kw[key] = kwargs.pop(key)
# Draw hidden additional contour for filled contour labels
cobj = _not_none(cobj, obj)
if obj.filled and colors is None:
colors = []
for level in obj.levels:
_, _, lum = utils.to_xyz(obj.cmap(obj.norm(level)))
colors.append('w' if lum < 50 else 'k')
# Draw the labels
labs = cobj.clabel(
fmt=fmt, colors=colors, fontsize=fontsize,
inline_spacing=inline_spacing, **kwargs
)
if labs is not None: # returns None if no contours
for lab in labs:
lab.update(text_kw)
return labs
def _add_gridbox_labels(
self, obj, fmt, *, c=None, color=None, colors=None,
size=None, fontsize=None, **kwargs
):
"""
Add labels to pcolor boxes with support for shade-dependent text colors.
Values are inferred from the unnormalized grid box color.
"""
# Parse input args
# NOTE: This function also hides grid boxes filled with NaNs to avoid ugly
# issue where edge colors surround NaNs. Should maybe move this somewhere else.
obj.update_scalarmappable() # update 'edgecolors' list
color = _not_none(c=c, color=color, colors=colors)
fontsize = _not_none(size=size, fontsize=fontsize, default=rc['font.smallsize'])
kwargs.setdefault('ha', 'center')
kwargs.setdefault('va', 'center')
# Apply colors and hide edge colors for empty grids
# NOTE: Could also
labs = []
array = obj.get_array()
paths = obj.get_paths()
edgecolors = data._to_numpy_array(obj.get_edgecolors())
if len(edgecolors) == 1:
edgecolors = np.repeat(edgecolors, len(array), axis=0)
for i, (path, value) in enumerate(zip(paths, array)):
# Round to the number corresponding to the *color* rather than
# the exact data value. Similar to contour label numbering.
if value is ma.masked or not np.isfinite(value):
edgecolors[i, :] = 0
continue
if isinstance(obj.norm, pcolors.DiscreteNorm):
value = obj.norm._norm.inverse(obj.norm(value))
icolor = color
if color is None:
_, _, lum = utils.to_xyz(obj.cmap(obj.norm(value)), 'hcl')
icolor = 'w' if lum < 50 else 'k'
bbox = path.get_extents()
x = (bbox.xmin + bbox.xmax) / 2
y = (bbox.ymin + bbox.ymax) / 2
lab = self.text(x, y, fmt(value), color=icolor, size=fontsize, **kwargs)
labs.append(lab)
obj.set_edgecolors(edgecolors)
return labs
def _add_auto_labels(
self, obj, cobj=None, labels=False, labels_kw=None,
fmt=None, formatter=None, formatter_kw=None, precision=None,
):
"""
Add number labels. Default formatter is `~proplot.ticker.SimpleFormatter`
with a default maximum precision of ``3`` decimal places.
"""
# TODO: Add quiverkey to this!
if not labels:
return
labels_kw = labels_kw or {}
formatter_kw = formatter_kw or {}
formatter = _not_none(
fmt_labels_kw=labels_kw.pop('fmt', None),
formatter_labels_kw=labels_kw.pop('formatter', None),
fmt=fmt,
formatter=formatter,
default='simple'
)
precision = _not_none(
formatter_kw_precision=formatter_kw.pop('precision', None),
precision=precision,
default=3, # should be lower than the default intended for tick labels
)
formatter = constructor.Formatter(formatter, precision=precision, **formatter_kw) # noqa: E501
if isinstance(obj, mcontour.ContourSet):
self._add_contour_labels(obj, cobj, formatter, **labels_kw)
elif isinstance(obj, mcollections.Collection):
self._add_gridbox_labels(obj, formatter, **labels_kw)
else:
raise RuntimeError(f'Not possible to add labels to object {obj!r}.')
def _iter_arg_pairs(self, *args):
"""
Iterate over ``[x1,] y1, [fmt1,] [x2,] y2, [fmt2,] ...`` input.
"""
# NOTE: This is copied from _process_plot_var_args.__call__ to avoid relying
# on private API. We emulate this input style with successive plot() calls.
args = list(args)
while args: # this permits empty input
x, y, *args = args
if args and isinstance(args[0], str): # format string detected!
fmt, *args = args
elif isinstance(y, str): # omits some of matplotlib's rigor but whatevs
x, y, fmt = None, x, y
else:
fmt = None
yield x, y, fmt
def _iter_arg_cols(self, *args, label=None, labels=None, values=None, **kwargs):
"""
Iterate over columns of positional arguments and add successive ``'label'``
keyword arguments using the input label-list ``'labels'``.
"""
# Handle cycle args and label lists
# NOTE: Arrays here should have had metadata stripped by _parse_plot1d
# but could still be pint quantities that get processed by axis converter.
n = max(
1 if not data._is_array(a) or a.ndim < 2 else a.shape[-1]
for a in args
)
labels = _not_none(label=label, values=values, labels=labels)
if not np.iterable(labels) or isinstance(labels, str):
labels = n * [labels]
if len(labels) != n:
raise ValueError(f'Array has {n} columns but got {len(labels)} labels.')
if labels is not None:
labels = [
str(_not_none(label, ''))
for label in data._to_numpy_array(labels)
]
else:
labels = n * [None]
# Yield successive columns
for i in range(n):
kw = kwargs.copy()
kw['label'] = labels[i] or None
a = tuple(
a if not data._is_array(a) or a.ndim < 2 else a[..., i] for a in args
)
yield (i, n, *a, kw)
def _inbounds_vlim(self, x, y, z, *, to_centers=False):
"""
Restrict the sample data used for automatic `vmin` and `vmax` selection
based on the existing x and y axis limits.
"""
# Get masks
# WARNING: Experimental, seems robust but this is not mission-critical so
# keep this in a try-except clause for now. However *internally* we should
# not reach this block unless everything is an array so raise that error.
xmask = ymask = None
if self._name != 'cartesian':
return z # TODO: support geographic projections when input is PlateCarree()
if not all(getattr(a, 'ndim', None) in (1, 2) for a in (x, y, z)):
raise ValueError('Invalid input coordinates. Must be 1D or 2D arrays.')
try:
# Get centers and masks
if to_centers and z.ndim == 2:
x, y = data._to_centers(x, y, z)
if not self.get_autoscalex_on():
xlim = self.get_xlim()
xmask = (x >= min(xlim)) & (x <= max(xlim))
if not self.get_autoscaley_on():
ylim = self.get_ylim()
ymask = (y >= min(ylim)) & (y <= max(ylim))
# Get subsample
if xmask is not None and ymask is not None:
z = z[np.ix_(ymask, xmask)] if z.ndim == 2 and xmask.ndim == 1 else z[ymask & xmask] # noqa: E501
elif xmask is not None:
z = z[:, xmask] if z.ndim == 2 and xmask.ndim == 1 else z[xmask]
elif ymask is not None:
z = z[ymask, :] if z.ndim == 2 and ymask.ndim == 1 else z[ymask]
return z
except Exception as err:
warnings._warn_proplot(
'Failed to restrict automatic colormap normalization '
f'to in-bounds data only. Error message: {err}'
)
return z
def _inbounds_xylim(self, extents, x, y, **kwargs):
"""
Restrict the `dataLim` to exclude out-of-bounds data when x (y) limits
are fixed and we are determining default y (x) limits. This modifies
the mutable input `extents` to support iteration over columns.
"""
# WARNING: This feature is still experimental. But seems obvious. Matplotlib
# updates data limits in ad hoc fashion differently for each plotting command
# but since proplot standardizes inputs we can easily use them for dataLim.
if extents is None:
return
if self._name != 'cartesian':
return
if not x.size or not y.size:
return
kwargs, vert = _get_vert(**kwargs)
if not vert:
x, y = y, x
trans = self.dataLim
autox, autoy = self.get_autoscalex_on(), self.get_autoscaley_on()
try:
if autoy and not autox and x.shape == y.shape:
# Reset the y data limits
xmin, xmax = sorted(self.get_xlim())
mask = (x >= xmin) & (x <= xmax)
ymin, ymax = data._safe_range(data._safe_mask(mask, y)) # in-bounds y
convert = self.convert_yunits # handle datetime, pint units
if ymin is not None:
trans.y0 = extents[1] = min(convert(ymin), extents[1])
if ymax is not None:
trans.y1 = extents[3] = max(convert(ymax), extents[3])
self._request_autoscale_view()
if autox and not autoy and y.shape == x.shape:
# Reset the x data limits
ymin, ymax = sorted(self.get_ylim())
mask = (y >= ymin) & (y <= ymax)
xmin, xmax = data._safe_range(data._safe_mask(mask, x)) # in-bounds x
convert = self.convert_xunits # handle datetime, pint units
if xmin is not None:
trans.x0 = extents[0] = min(convert(xmin), extents[0])
if xmax is not None:
trans.x1 = extents[2] = max(convert(xmax), extents[2])
self._request_autoscale_view()
except Exception as err:
warnings._warn_proplot(
'Failed to restrict automatic y (x) axis limit algorithm to '
f'data within locked x (y) limits only. Error message: {err}'
)
def _update_guide(
self, objs, colorbar=None, colorbar_kw=None, queue_colorbar=True,
legend=None, legend_kw=None,
):
"""
Update the queued artists for an on-the-fly legends and colorbars or track
the input keyword arguments on the artists for retrieval later on. The
`queue` argument indicates whether to draw colorbars immediately.
"""
# TODO: Support auto-splitting artists passed to legend into
# their legend elements. Play with this.
# WARNING: This should generally be last in the pipeline before calling
# the plot function or looping over data columns. The colormap parser
# and standardize functions both modify colorbar_kw and legend_kw.
if colorbar:
colorbar_kw = colorbar_kw or {}
colorbar_kw.setdefault('queue', queue_colorbar)
self.colorbar(objs, loc=colorbar, **colorbar_kw)
else:
guides._guide_kw_to_obj(objs, 'colorbar', colorbar_kw) # save for later
if legend:
legend_kw = legend_kw or {}
self.legend(objs, loc=legend, queue=True, **legend_kw)
else:
guides._guide_kw_to_obj(objs, 'legend', legend_kw) # save for later
def _parse_format1d(
self, x, *ys, zerox=False, autox=True, autoy=True, autoformat=None,
autoreverse=True, autolabels=True, autovalues=False, autoguide=True,
label=None, labels=None, value=None, values=None, **kwargs
):
"""
Try to retrieve default coordinates from array-like objects and apply default
formatting. Also update the keyword arguments.
"""
# Parse input
y = max(ys, key=lambda y: y.size) # find a non-scalar y for inferring metadata
autox = autox and not zerox # so far just relevant for hist()
autoformat = _not_none(autoformat, rc['autoformat'])
kwargs, vert = _get_vert(**kwargs)
labels = _not_none(
label=label,
labels=labels,
value=value,
values=values,
legend_kw_labels=kwargs.get('legend_kw', {}).pop('labels', None),
colorbar_kw_values=kwargs.get('colorbar_kw', {}).pop('values', None),
)
# Retrieve the x coords
# NOTE: Where columns represent distributions, like for box and violinplot or
# where we use 'means' or 'medians', columns coords (axis 1) are 'x' coords.
# Otherwise, columns represent e.g. lines and row coords (axis 0) are 'x'
# coords. Exception is passing "ragged arrays" to boxplot and violinplot.
dists = any(kwargs.get(s) for s in ('mean', 'means', 'median', 'medians'))
raggd = any(getattr(y, 'dtype', None) == 'object' for y in ys)
xaxis = 0 if raggd else 1 if dists or not autoy else 0
if autox and x is None:
x = data._meta_labels(y, axis=xaxis) # use the first one
# Retrieve the labels. We only want default legend labels if this is an
# object with 'title' metadata and/or the coords are string.
# WARNING: Confusing terminology differences here -- for box and violin plots
# labels refer to indices along x axis.
if autolabels and labels is None:
laxis = 0 if not autox and not autoy else xaxis if not autoy else xaxis + 1
if laxis >= y.ndim:
labels = data._meta_title(y)
else:
labels = data._meta_labels(y, axis=laxis, always=False)
notitle = not data._meta_title(labels)
if labels is None:
pass
elif notitle and not any(isinstance(_, str) for _ in labels):
labels = None
# Apply the labels or values
if labels is not None:
if autovalues:
kwargs['values'] = data._to_numpy_array(labels)
elif autolabels:
kwargs['labels'] = data._to_numpy_array(labels)
# Apply title for legend or colorbar that uses the labels or values
if autoguide and autoformat:
title = data._meta_title(labels)
if title: # safely update legend_kw and colorbar_kw
guides._guide_kw_to_arg('legend', kwargs, title=title)
guides._guide_kw_to_arg('colorbar', kwargs, label=title)
# Apply the basic x and y settings
autox = autox and self._name == 'cartesian'
autoy = autoy and self._name == 'cartesian'
sx, sy = 'xy' if vert else 'yx'
kw_format = {}
if autox and autoformat: # 'x' axis
title = data._meta_title(x)
if title:
axis = getattr(self, sx + 'axis')
if axis.isDefault_label:
kw_format[sx + 'label'] = title
if autoy and autoformat: # 'y' axis
sy = sx if zerox else sy # hist() 'y' values are along 'x' axis
title = data._meta_title(y)
if title:
axis = getattr(self, sy + 'axis')
if axis.isDefault_label:
kw_format[sy + 'label'] = title
# Convert string-type coordinates
# NOTE: This should even allow qualitative string input to hist()
if autox:
x, kw_format = data._meta_coords(x, which=sx, **kw_format)
if autoy:
*ys, kw_format = data._meta_coords(*ys, which=sy, **kw_format)
if autox and autoreverse and x.ndim == 1 and x.size > 1 and x[1] < x[0]:
kw_format[sx + 'reverse'] = True
# Apply formatting
if kw_format:
self.format(**kw_format)
# Finally strip metadata
# WARNING: Most methods that accept 2D arrays use columns of data, but when
# pandas DataFrame specifically is passed to hist, boxplot, or violinplot, rows
# of data assumed! Converting to ndarray necessary.
ys = tuple(map(data._to_numpy_array, ys))
if x is not None: # pie() and hist()
x = data._to_numpy_array(x)
return (x, *ys, kwargs)
def _parse_plot1d(self, x, *ys, **kwargs):
"""
Interpret positional arguments for all "1D" plotting commands.
"""
# Standardize values
zerox = not ys
if zerox or all(y is None for y in ys): # pad with remaining Nones
x, *ys = None, x, *ys[1:]
if len(ys) == 2: # 'lines' or 'fill_between'
if ys[1] is None:
ys = (np.array([0.0]), ys[0]) # user input 1 or 2 positional args
elif ys[0] is None:
ys = (np.array([0.0]), ys[1]) # user input keyword 'y2' but no y1
if any(y is None for y in ys):
raise ValueError('Missing required data array argument.')
ys = tuple(map(data._to_duck_array, ys))
if x is not None:
x = data._to_duck_array(x)
x, *ys, kwargs = self._parse_format1d(x, *ys, zerox=zerox, **kwargs)
# Geographic corrections
if self._name == 'cartopy' and isinstance(kwargs.get('transform'), PlateCarree): # noqa: E501
x, *ys = data._geo_cartopy_1d(x, *ys)
elif self._name == 'basemap' and kwargs.get('latlon', None):
xmin, xmax = self._lonaxis.get_view_interval()
x, *ys = data._geo_basemap_1d(x, *ys, xmin=xmin, xmax=xmax)
return (x, *ys, kwargs)
def _parse_format2d(self, x, y, *zs, autoformat=None, autoguide=True, **kwargs):
"""
Try to retrieve default coordinates from array-like objects and apply default
formatting. Also apply optional transpose and update the keyword arguments.
"""
# Retrieve coordinates
autoformat = _not_none(autoformat, rc['autoformat'])
if x is None and y is None:
z = zs[0]
if z.ndim == 1:
x = data._meta_labels(z, axis=0)
y = np.zeros(z.shape) # default barb() and quiver() behavior in mpl
else:
x = data._meta_labels(z, axis=1)
y = data._meta_labels(z, axis=0)
# Apply labels and XY axis settings
if self._name == 'cartesian':
# Apply labels
# NOTE: Do not overwrite existing labels!
kw_format = {}
if autoformat:
for s, d in zip('xy', (x, y)):
title = data._meta_title(d)
if title:
axis = getattr(self, s + 'axis')
if axis.isDefault_label:
kw_format[s + 'label'] = title
# Handle string-type coordinates
x, kw_format = data._meta_coords(x, which='x', **kw_format)
y, kw_format = data._meta_coords(y, which='y', **kw_format)
for s, d in zip('xy', (x, y)):
if (
d.size > 1
and d.ndim == 1
and data._to_numpy_array(d)[1] < data._to_numpy_array(d)[0]
):
kw_format[s + 'reverse'] = True
# Apply formatting
if kw_format:
self.format(**kw_format)
# Apply title for legend or colorbar
if autoguide and autoformat:
title = data._meta_title(zs[0])
if title: # safely update legend_kw and colorbar_kw
guides._guide_kw_to_arg('legend', kwargs, title=title)
guides._guide_kw_to_arg('colorbar', kwargs, label=title)
# Finally strip metadata
x = data._to_numpy_array(x)
y = data._to_numpy_array(y)
zs = tuple(map(data._to_numpy_array, zs))
return (x, y, *zs, kwargs)
def _parse_plot2d(
self, x, y, *zs, globe=False, edges=False, allow1d=False,
transpose=None, order=None, **kwargs
):
"""
Interpret positional arguments for all "2D" plotting commands.
"""
# Standardize values
# NOTE: Functions pass two 'zs' at most right now
if all(z is None for z in zs):
x, y, zs = None, None, (x, y)[:len(zs)]
if any(z is None for z in zs):
raise ValueError('Missing required data array argument(s).')
zs = tuple(data._to_duck_array(z, strip_units=True) for z in zs)
if x is not None:
x = data._to_duck_array(x)
if y is not None:
y = data._to_duck_array(y)
if order is not None:
if not isinstance(order, str) or order not in 'CF':
raise ValueError(f"Invalid order={order!r}. Options are 'C' or 'F'.")
transpose = _not_none(
transpose=transpose, transpose_order=bool('CF'.index(order))
)
if transpose:
zs = tuple(z.T for z in zs)
if x is not None:
x = x.T
if y is not None:
y = y.T
x, y, *zs, kwargs = self._parse_format2d(x, y, *zs, **kwargs)
if edges:
# NOTE: These functions quitely pass through 1D inputs, e.g. barb data
x, y = data._to_edges(x, y, zs[0])
else:
x, y = data._to_centers(x, y, zs[0])
# Geographic corrections
if allow1d:
pass
elif self._name == 'cartopy' and isinstance(kwargs.get('transform'), PlateCarree): # noqa: E501
x, y, *zs = data._geo_cartopy_2d(x, y, *zs, globe=globe)
elif self._name == 'basemap' and kwargs.get('latlon', None):
xmin, xmax = self._lonaxis.get_view_interval()
x, y, *zs = data._geo_basemap_2d(x, y, *zs, xmin=xmin, xmax=xmax, globe=globe) # noqa: E501
x, y = np.meshgrid(x, y) # WARNING: required always
return (x, y, *zs, kwargs)
def _parse_inbounds(self, *, inbounds=None, **kwargs):
"""
Capture the `inbounds` keyword arg and return data limit
extents if it is ``True``. Otherwise return ``None``. When
``_inbounds_xylim`` gets ``None`` it will silently exit.
"""
extents = None
inbounds = _not_none(inbounds, rc['axes.inbounds'])
if inbounds:
extents = list(self.dataLim.extents) # ensure modifiable
return kwargs, extents
def _parse_color(self, x, y, c, *, apply_cycle=True, infer_rgb=False, **kwargs):
"""
Parse either a colormap or color cycler. Colormap will be discrete and fade
to subwhite luminance by default. Returns a HEX string if needed so we don't
get ambiguous color warnings. Used with scatter, streamplot, quiver, barbs.
"""
# NOTE: This function is positioned above the _parse_cmap and _parse_cycle
# functions and helper functions.
methods = (
self._parse_cmap, self._parse_levels, self._parse_autolev, self._parse_vlim
)
if c is None or mcolors.is_color_like(c):
if infer_rgb and c is not None:
c = pcolors.to_hex(c) # avoid scatter() ambiguous color warning
if apply_cycle: # False for scatter() so we can wait to get correct 'N'
kwargs = self._parse_cycle(**kwargs)
else:
c = np.atleast_1d(c) # should only have effect on 'scatter' input
if infer_rgb and c.ndim == 2 and c.shape[1] in (3, 4):
c = list(map(pcolors.to_hex, c)) # avoid iterating over columns
else:
kwargs = self._parse_cmap(
x, y, c, plot_lines=True, default_discrete=False, **kwargs
)
methods = (self._parse_cycle,)
pop = _pop_params(kwargs, *methods, ignore_internal=True)
if pop:
warnings._warn_proplot(f'Ignoring unused keyword arg(s): {pop}')
return (c, kwargs)
def _parse_vlim(
self, *args,
vmin=None, vmax=None, to_centers=False,
robust=None, inbounds=None, **kwargs,
):
"""
Return a suitable vmin and vmax based on the input data.
Parameters
----------
*args
The sample data.
vmin, vmax : float, optional
The user input minimum and maximum.
robust : bool, optional
Whether to limit the default range to exclude outliers.
inbounds : bool, optional
Whether to filter to in-bounds data.
to_centers : bool, optional
Whether to convert coordinates to 'centers'.
Returns
-------
vmin, vmax : float
The minimum and maximum.
kwargs
Unused arguemnts.
"""
# Parse vmin and vmax
automin = vmin is None
automax = vmax is None
if not automin and not automax:
return vmin, vmax, kwargs
# Parse input args
inbounds = _not_none(inbounds, rc['cmap.inbounds'])
robust = _not_none(robust, rc['cmap.robust'], False)
robust = 96 if robust is True else 100 if robust is False else robust
robust = np.atleast_1d(robust)
if robust.size == 1:
pmin, pmax = 50 + 0.5 * np.array([-robust.item(), robust.item()])
elif robust.size == 2:
pmin, pmax = robust.flat # pull out of array
else:
raise ValueError(f'Unexpected robust={robust!r}. Must be bool, float, or 2-tuple.') # noqa: E501
# Get sample data
# NOTE: Critical to use _to_duck_array here because some commands
# are unstandardized.
# NOTE: Try to get reasonable *count* levels for hexbin/hist2d, but in general
# have no way to select nice ones a priori (why we disable discretenorm).
# NOTE: Currently we only ever use this function with *single* array input
# but in future could make this public as a way for users (me) to get
# automatic synced contours for a bunch of arrays in a grid.
vmins, vmaxs = [], []
if len(args) > 2:
x, y, *zs = args
else:
x, y, *zs = None, None, *args
for z in zs:
if z is None: # e.g. empty scatter color
continue
if z.ndim > 2: # e.g. imshow data
continue
z = data._to_numpy_array(z)
if inbounds and x is not None and y is not None: # ignore if None coords
z = self._inbounds_vlim(x, y, z, to_centers=to_centers)
imin, imax = data._safe_range(z, pmin, pmax)
if automin and imin is not None:
vmins.append(imin)
if automax and imax is not None:
vmaxs.append(imax)
if automin:
vmin = min(vmins, default=0)
if automax:
vmax = max(vmaxs, default=1)
return vmin, vmax, kwargs
def _parse_autolev(
self, *args, levels=None,
extend=None, norm=None, norm_kw=None, vmin=None, vmax=None,
locator=None, locator_kw=None, symmetric=None, **kwargs
):
"""
Return a suitable level list given the input data, normalizer,
locator, and vmin and vmax.
Parameters
----------
*args
The sample data. Passed to `_parse_vlim`.
levels : int
The approximate number of levels.
vmin, vmax : float, optional
The approximate minimum and maximum level edges. Passed to the locator.
diverging : bool, optional
Whether the resulting levels are intended for a diverging normalizer.
symmetric : bool, optional
Whether the resulting levels should be symmetric about zero.
norm, norm_kw : optional
Passed to `~proplot.constructor.Norm`. Used to change the default
`locator` (e.g., a `~matplotlib.colors.LogNorm` normalizer will use
a `~matplotlib.ticker.LogLocator` to generate levels).
Parameters
----------
levels : list of float
The level edges.
kwargs
Unused arguments.
"""
# Input args
# NOTE: Some of this is adapted from the hidden contour.ContourSet._autolev
# NOTE: We use 'symmetric' with MaxNLocator to ensure boundaries include a
# zero level but may trim many of these below.
norm_kw = norm_kw or {}
locator_kw = locator_kw or {}
extend = _not_none(extend, 'neither')
levels = _not_none(levels, rc['cmap.levels'])
vmin = _not_none(vmin=vmin, norm_kw_vmin=norm_kw.pop('vmin', None))
vmax = _not_none(vmax=vmax, norm_kw_vmax=norm_kw.pop('vmax', None))
norm = constructor.Norm(norm or 'linear', **norm_kw)
symmetric = _not_none(
symmetric=symmetric,
locator_kw_symmetric=locator_kw.pop('symmetric', None),
default=False,
)
# Get default locator from input norm
# NOTE: This normalizer is only temporary for inferring level locs
norm = constructor.Norm(norm or 'linear', **norm_kw)
if locator is not None:
locator = constructor.Locator(locator, **locator_kw)
elif isinstance(norm, mcolors.LogNorm):
locator = mticker.LogLocator(**locator_kw)
elif isinstance(norm, mcolors.SymLogNorm):
for key, default in (('base', 10), ('linthresh', 1)):
val = _not_none(getattr(norm, key, None), getattr(norm, '_' + key, None), default) # noqa: E501
locator_kw.setdefault(key, val)
locator = mticker.SymmetricalLogLocator(**locator_kw)
else:
locator_kw['symmetric'] = symmetric
locator = mticker.MaxNLocator(levels, min_n_ticks=1, **locator_kw)
# Get default level locations
nlevs = levels
automin = vmin is None
automax = vmax is None
vmin, vmax, kwargs = self._parse_vlim(*args, vmin=vmin, vmax=vmax, **kwargs)
try:
levels = locator.tick_values(vmin, vmax)
except RuntimeError: # too-many-ticks error
levels = np.linspace(vmin, vmax, levels) # TODO: _autolev used N+1
# Possibly trim levels far outside of 'vmin' and 'vmax'
# NOTE: This part is mostly copied from matplotlib _autolev
if not symmetric:
i0, i1 = 0, len(levels) # defaults
under, = np.where(levels < vmin)
if len(under):
i0 = under[-1]
if not automin or extend in ('min', 'both'):
i0 += 1 # permit out-of-bounds data
over, = np.where(levels > vmax)
if len(over):
i1 = over[0] + 1 if len(over) else len(levels)
if not automax or extend in ('max', 'both'):
i1 -= 1 # permit out-of-bounds data
if i1 - i0 < 3:
i0, i1 = 0, len(levels) # revert
levels = levels[i0:i1]
# Compare the no. of levels we got (levels) to what we wanted (nlevs)
# If we wanted more than 2 times the result, then add nn - 1 extra
# levels in-between the returned levels in normalized space (e.g. LogNorm).
nn = nlevs // len(levels)
if nn >= 2:
olevels = norm(levels)
nlevels = []
for i in range(len(levels) - 1):
l1, l2 = olevels[i], olevels[i + 1]
nlevels.extend(np.linspace(l1, l2, nn + 1)[:-1])
nlevels.append(olevels[-1])
levels = norm.inverse(nlevels)
return levels, kwargs
def _parse_levels(
self, *args, N=None, levels=None, values=None, extend=None,
positive=False, negative=False, nozero=False, norm=None, norm_kw=None,
vmin=None, vmax=None, skip_autolev=False, min_levels=None, **kwargs,
):
"""
Return levels resulting from a wide variety of keyword options.
Parameters
----------
*args
The sample data. Passed to `_parse_vlim`.
N
Shorthand for `levels`.
levels : int or sequence of float, optional
The levels list or (approximate) number of levels to create.
values : int or sequence of float, optional
The level center list or (approximate) number of level centers to create.
positive, negative, nozero : bool, optional
Whether to remove out non-positive, non-negative, and zero-valued
levels. The latter is useful for single-color contour plots.
norm, norm_kw : optional
Passed to `Norm`. Used to possbily infer levels or to convert values.
skip_autolev : bool, optional
Whether to skip autolev parsing.
min_levels : int, optional
The minimum number of levels allowed.
Returns
-------
levels : list of float
The level edges.
kwargs
Unused arguments.
"""
# Rigorously check user input levels and values
# NOTE: Include special case where color levels are referenced by string labels
levels = _not_none(N=N, levels=levels, norm_kw_levs=norm_kw.pop('levels', None))
min_levels = _not_none(min_levels, 2) # q for contour plots
if positive and negative:
negative = False
warnings._warn_proplot(
'Incompatible args positive=True and negative=True. Using former.'
)
if levels is not None and values is not None:
warnings._warn_proplot(
f'Incompatible args levels={levels!r} and values={values!r}. Using former.' # noqa: E501
)
for key, points in (('levels', levels), ('values', values)):
if points is None:
continue
if isinstance(norm, (mcolors.BoundaryNorm, pcolors.SegmentedNorm)):
warnings._warn_proplot(
f'Ignoring {key}={points}. Instead using norm={norm!r} boundaries.'
)
if not np.iterable(points):
continue
if len(points) < min_levels:
raise ValueError(
f'Invalid {key}={points}. Must be at least length {min_levels}.'
)
if isinstance(norm, (mcolors.BoundaryNorm, pcolors.SegmentedNorm)):
levels, values = norm.boundaries, None
else:
levels = _not_none(levels, rc['cmap.levels'])
# Infer level edges from level centers if possible
# NOTE: The only way for user to manually impose BoundaryNorm is by
# passing one -- users cannot create one using Norm constructor key.
if isinstance(values, Integral):
levels = values + 1
elif values is None:
pass
elif not np.iterable(values):
raise ValueError(f'Invalid values={values!r}.')
elif len(values) == 0:
levels = [] # weird but why not
elif len(values) == 1:
levels = [values[0] - 1, values[0] + 1] # weird but why not
elif norm is not None and norm not in ('segments', 'segmented'):
# Generate levels by finding in-between points in the
# normalized numeric space, e.g. LogNorm space.
norm_kw = norm_kw or {}
convert = constructor.Norm(norm, **norm_kw)
levels = convert.inverse(utils.edges(convert(values)))
else:
# Try to generate levels so SegmentedNorm will place 'values' ticks at the
# center of each segment. edges() gives wrong result unless spacing is even.
# Solve: (x1 + x2) / 2 = y --> x2 = 2 * y - x1 with arbitrary starting x1.
descending = values[1] < values[0]
if descending: # e.g. [100, 50, 20, 10, 5, 2, 1] successful if reversed
values = values[::-1]
levels = [1.5 * values[0] - 0.5 * values[1]] # arbitrary starting point
for value in values:
levels.append(2 * value - levels[-1])
if np.any(np.diff(levels) < 0):
levels = utils.edges(values)
if descending: # then revert back below
levels = levels[::-1]
# Process level edges and infer defaults
# NOTE: Matplotlib colorbar algorithm *cannot* handle descending levels so
# this function reverses them and adds special attribute to the normalizer.
# Then colorbar() reads this attr and flips the axis and the colormap direction
if np.iterable(levels):
pop = _pop_params(kwargs, self._parse_autolev, ignore_internal=True)
if pop:
warnings._warn_proplot(f'Ignoring unused keyword arg(s): {pop}')
elif not skip_autolev:
levels, kwargs = self._parse_autolev(
*args, levels=levels, norm=norm, norm_kw=norm_kw, extend=extend, **kwargs # noqa: E501
)
ticks = values if np.iterable(values) else levels
if ticks is not None and np.iterable(ticks):
guides._guide_kw_to_arg('colorbar', kwargs, locator=ticks)
# Filter the level boundaries
if levels is not None and np.iterable(levels):
if nozero:
levels = levels[levels != 0]
if positive:
levels = levels[levels >= 0]
if negative:
levels = levels[levels <= 0]
return levels, kwargs
def _parse_discrete(
self, levels, norm, cmap, *, extend=None, min_levels=None, **kwargs,
):
"""
Create a `~proplot.colors.DiscreteNorm` or `~proplot.colors.BoundaryNorm`
from the input colormap and normalizer.
Parameters
----------
levels : sequence of float
The level boundaries.
norm : `~matplotlib.colors.Normalize`
The continuous normalizer.
cmap : `~matplotlib.colors.Colormap`
The colormap.
extend : str, optional
The extend setting.
min_levels : int, optional
The minimum number of levels.
Returns
-------
norm : `~proplot.colors.DiscreteNorm`
The discrete normalizer.
cmap : `~matplotlib.colors.Colormap`
The possibly-modified colormap.
kwargs
Unused arguments.
"""
# Reverse the colormap if input levels or values were descending
# See _parse_levels for details
min_levels = _not_none(min_levels, 2) # 1 for contour plots
unique = extend = _not_none(extend, 'neither')
under = cmap._rgba_under
over = cmap._rgba_over
cyclic = getattr(cmap, '_cyclic', None)
qualitative = isinstance(cmap, pcolors.DiscreteColormap) # see _parse_cmap
if len(levels) < min_levels:
raise ValueError(
f'Invalid levels={levels!r}. Must be at least length {min_levels}.'
)
# Ensure end colors are unique by scaling colors as if extend='both'
# NOTE: Inside _parse_cmap should have enforced extend='neither'
if cyclic:
step = 0.5
unique = 'both'
# Ensure color list length matches level list length using rotation
# NOTE: No harm if not enough colors, we just end up with the same
# color for out-of-bounds extensions. This is a gentle failure
elif qualitative:
step = 0.5 # try to sample the central index for safety, but not important
unique = 'neither'
auto_under = under is None and extend in ('min', 'both')
auto_over = over is None and extend in ('max', 'both')
ncolors = len(levels) - min_levels + 1 + auto_under + auto_over
colors = list(itertools.islice(itertools.cycle(cmap.colors), ncolors))
if auto_under and len(colors) > 1:
under, *colors = colors
if auto_over and len(colors) > 1:
*colors, over = colors
cmap = cmap.copy(colors, N=len(colors))
if under is not None:
cmap.set_under(under)
if over is not None:
cmap.set_over(over)
# Ensure middle colors sample full range when extreme colors are present
# by scaling colors as if extend='neither'
else:
step = 1.0
if over is not None and under is not None:
unique = 'neither'
elif over is not None: # turn off over-bounds unique bin
if extend == 'both':
unique = 'min'
elif extend == 'max':
unique = 'neither'
elif under is not None: # turn off under-bounds unique bin
if extend == 'both':
unique = 'min'
elif extend == 'max':
unique = 'neither'
# Generate DiscreteNorm and update "child" norm with vmin and vmax from
# levels. This lets the colorbar set tick locations properly!
if not isinstance(norm, mcolors.BoundaryNorm) and len(levels) > 1:
norm = pcolors.DiscreteNorm(levels, norm=norm, unique=unique, step=step)
return norm, cmap, kwargs
@warnings._rename_kwargs('0.6', centers='values')
def _parse_cmap(
self, *args,
cmap=None, cmap_kw=None, c=None, color=None, colors=None, default_cmap=None,
norm=None, norm_kw=None, extend=None, vmin=None, vmax=None,
sequential=None, diverging=None, qualitative=None, cyclic=None,
discrete=None, default_discrete=True, skip_autolev=False,
plot_lines=False, plot_contours=False, min_levels=None, **kwargs
):
"""
Parse colormap and normalizer arguments.
Parameters
----------
c, color, colors : sequence of color-spec, optional
Build a `DiscreteColormap` from the input color(s).
sequential, diverging, qualitative, cyclic : bool, optional
Toggle various colormap types.
plot_lines : bool, optional
Whether these are lines. In that case the default maximum
luminance for monochromatic colormaps will be 90 instead of 100.
plot_contours : bool, optional
Whether these are contours. Determines whether 'discrete'
is requied and return keyword args.
min_levels : int, optional
The minimum number of valid levels. This is 1 for line contour plots.
"""
# Parse keyword args
# NOTE: Always disable 'autodiverging' when an unknown colormap is passed to
# avoid awkwardly combining 'DivergingNorm' with sequential colormaps.
# However let people use diverging=False with diverging cmaps because
# some use them (wrongly IMO but nbd) for increased color contrast.
cmap_kw = cmap_kw or {}
norm_kw = norm_kw or {}
vmin = _not_none(vmin=vmin, norm_kw_vmin=norm_kw.pop('vmin', None))
vmax = _not_none(vmax=vmax, norm_kw_vmax=norm_kw.pop('vmax', None))
extend = _not_none(extend, 'neither')
colors = _not_none(c=c, color=color, colors=colors) # in case untranslated
autodiverging = rc['cmap.autodiverging']
name = getattr(cmap, 'name', cmap)
if isinstance(name, str) and name not in pcolors.CMAPS_DIVERGING:
autodiverging = False # avoid auto-truncation of sequential colormaps
# Build qualitative colormap using 'colors'
# NOTE: Try to match number of level centers with number of colors here
# WARNING: Previously 'colors' set the edgecolors. To avoid all-black
# colormap make sure to ignore 'colors' if 'cmap' was also passed.
# WARNING: Previously tried setting number of levels to len(colors) but
# this would make single-level contour plots and _parse_autolev is designed
# to only give approximate level count so failed anyway. Users should pass
# their own levels to avoid truncation/cycling in these very special cases.
if cmap is not None and colors is not None:
warnings._warn_proplot(
f'You specifed both cmap={cmap!r} and the qualitative-colormap '
f"colors={colors!r}. Ignoring 'colors'. If you meant to specify the "
f'edge color please use ec={colors!r} or edgecolor={colors!r} instead.'
)
colors = None
if colors is not None:
if mcolors.is_color_like(colors):
colors = [colors] # RGB[A] tuple possibly
cmap = colors = | np.atleast_1d(colors) | numpy.atleast_1d |
import mobula.layers as L
import numpy as np
def go_convt(stride, pad):
print ("test ConvT: ", stride, pad)
X = np.random.random((2, 4, 4, 4)) * 100
N, D, NH, NW = X.shape
K = 3
C = 1
FW = np.random.random((D, C, K * K)) * 10
F = FW.reshape((D, C, K, K))
data = L.Data(X)
convT = L.ConvT(data, kernel = K, pad = pad, stride = stride, dim_out = C)
pad_h = pad_w = pad
kernel_h = kernel_w = K
OH = (NH - 1) * stride + kernel_h - pad_h * 2
OW = (NW - 1) * stride + kernel_w - pad_w * 2
data.reshape()
convT.reshape()
convT.W = FW
convT.b = np.random.random(convT.b.shape) * 10
# Conv: (OH, OW) -> (NH, NW)
# ConvT: (NH. NW) -> (OH, OW)
influence = [[[None for _ in range(kernel_h * kernel_w)] for _ in range(OW)] for _ in range(OH)]
for h in range(NH):
for w in range(NW):
for fh in range(kernel_h):
for fw in range(kernel_w):
ph = h * stride + fh
pw = w * stride + fw
oh = ph - pad_h
ow = pw - pad_w
if oh >= 0 and ow >= 0 and oh < OH and ow < OW:
influence[oh][ow][fh * kernel_w + fw] = (h, w)
ty = np.zeros((N, C, OH, OW))
dW = np.zeros(convT.W.shape)
dX = np.zeros(convT.X.shape)
dY = np.random.random(convT.Y.shape) * 100
# F = FW.reshape((D, C, K, K))
# N, D, NH, NW = X.shape
for i in range(N):
for c in range(C):
for oh in range(OH):
for ow in range(OW):
il = influence[oh][ow]
for t, pos in enumerate(il):
if pos is not None:
h,w = pos
for d in range(D):
ty[i, c, oh, ow] += X[i, d, h, w] * FW[d, c].ravel()[t]
dW[d, c].ravel()[t] += dY[i, c, oh, ow] * X[i, d, h, w]
dX[i, d, h, w] += dY[i, c, oh, ow] * FW[d, c].ravel()[t]
ty += convT.b.reshape((1, -1, 1, 1))
db = np.sum(dY, (0, 2, 3)).reshape(convT.b.shape)
convT.forward()
assert np.allclose(convT.Y, ty)
# test backward
# db, dw, dx
convT.dY = dY
convT.backward()
assert np.allclose(convT.db, db)
assert | np.allclose(convT.dW, dW) | numpy.allclose |
import datetime
import json
import numpy as np
import time
from operator import itemgetter
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.conf import settings
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect, JsonResponse
)
from ..cart.utils import set_cart_cookie
from ..core.utils import serialize_decimal
from ..seo.schema.product import product_json_ld
from ..feature.models import ProductFeature, Feature
from .filters import ProductCategoryFilter, ProductBrandFilter, ProductCollectionFilter
from .models import Category, Collection, ProductRating, Brand, Product, MerchantLocation
from ..order.models import Order, OrderLine
from .utils import (
get_product_images, get_product_list_context, handle_cart_form,
products_for_cart, products_with_details)
from .utils.attributes import get_product_attributes_data
from django.db.models import Case, When
from .utils.availability import get_availability
from ..search.views import render_item, paginate_results, custom_query_validation
from .utils.variants_picker import get_variant_picker_data
from ..core.helper import create_navbar_tree
from .helper import (
get_filter_values, get_descendant, get_cross_section_order,
get_cross_section_rating, get_list_product_from_order, get_list_product_from_rating,
get_list_user_from_order, get_list_user_from_rating, get_all_user_rating, get_all_user_order_history,
get_product_order_history, get_user_order_history, get_product_rating_history, get_rating_relevant_item,
get_order_relevant_item, get_visit_relevant_item, get_all_rating_data, get_all_order_data)
from django.db.models import Avg
from joblib import (Parallel, delayed)
import psutil
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework import permissions
from math import log10
from django.db import connection,transaction
from .utils.availability import products_with_availability
import urllib
from django.forms.models import model_to_dict
from django.contrib.auth.models import AnonymousUser
from ..account.models import User
from ..track.models import VisitProduct, SearchHistory
APPROVED_FILTER = ['Brand','Jenis','Color','Gender']
#RECOMMENDATION MODULE PARAMETER:
"""
EVALUATION:
A strict approach means the evaluation only compare the recommended item with user actual item data,
whilst the non-strict approach means the evaluation will compare the recommended item with all item from top 5 (default)
categories which each user favoured. The non-strict approach is inspired by how a user behaviour, a user tend to like or need
only a specific number of categories, so we can recommend all product from a specific categories for them.
COLLABORATIVE:
We are using Associative Retrieval Correlation Algorithm as collaborative filtering, You need to specify the maximum limit
of ordinality used by ARC. By default our system will break the iteration if all product can successfully matched to a user
but to avoid a high number of iteration you can specify ARC_ORDINALITY as the maximum ordinality our system will handle.
CONTENT BASE:
We are using a TF-IDF Smooth approach to count the similarity between each item. This method retrieve information from item's
name, brand, category, description, information, service, location, and specification. By that, this method is highly dependent
on how clear you put information in each item. Luckily we use an actual E-commerce data crawled from www.blibli.com (Thanks for the data)
so we cann get satisfying result with this type of filtering. By default you will get all similar items, but you can specify a number as
a limit on how many similar item you would like to retrieve.
"""
LIMIT_COLLABORATIVE = 15 #A REAL NUMBER RANGE FROM 0 TO ANY POSITIVE NUMBER, IF NOT 0 THEN USE THE LIMIT IF 0 THEN USE ALL
LIMIT_CONTENT_BASE = 30 #A REAL NUMBER RAGE FROM 1 TO ANY POSITIVE NUMBER, GET THE NUMBER OF SIMILAR ITEM(S)
EVALUATION_MODE = 0 #A NUMBER OF 0 OR 1, IF 0 THEN USE A NON-STRICT APPROACH IF 1 THEN USE A STRICT APPROACH
ARC_ORDINALITY = 9 #A POSITIVE ODD REAL NUMBER, IN RANGE OF 1 TO 13, IF 1 THEN RETURNED THE USER ORIGINAL DATA
LIMIT_FEATURED = 12 #A POSITIVE REAL NUMBER STARTING FROM 1, TO LIMIT NUMBER OF FEATURED PRODUCT IN STOREFRONT
def product_details(request, slug, product_id, form=None):
"""Product details page.
The following variables are available to the template:
product:
The Product instance itself.
is_visible:
Whether the product is visible to regular users (for cases when an
admin is previewing a product before publishing).
form:
The add-to-cart form.
price_range:
The PriceRange for the product including all discounts.
undiscounted_price_range:
The PriceRange excluding all discounts.
discount:
Either a Price instance equal to the discount value or None if no
discount was available.
local_price_range:
The same PriceRange from price_range represented in user's local
currency. The value will be None if exchange rate is not available or
the local currency is the same as site's default currency.
"""
try:
product = Product.objects.get(id=product_id)
except Product.DoesNotExist:
raise Http404('No %s matches the given query.' % product.model._meta.object_name)
if product.get_slug() != slug:
return HttpResponsePermanentRedirect(product.get_absolute_url())
today = datetime.date.today()
is_visible = (
product.available_on is None or product.available_on <= today)
if form is None:
form = handle_cart_form(request, product, create_cart=False)[0]
availability = get_availability(product, discounts=request.discounts,
local_currency=request.currency)
product_images = get_product_images(product)
variant_picker_data = get_variant_picker_data(
product, request.discounts, request.currency)
product_attributes = get_product_attributes_data(product)
# show_variant_picker determines if variant picker is used or select input
show_variant_picker = all([v.attributes for v in product.variants.all()])
json_ld_data = product_json_ld(product, product_attributes)
rating = ProductRating.objects.filter(product_id=product).aggregate(value=Avg('value'))
rating['value'] = 0.0 if rating['value'] is None else rating['value']
brand = Brand.objects.get(id=product.brand_id_id)
tags = []
product_features = list(ProductFeature.objects.filter(product_id_id=product_id).values_list('feature_id_id', flat=True))
product_info = product.information
product_service = product.service
product_info = json.loads(product_info)
product_service = json.loads(product_service)
location = MerchantLocation.objects.get(id=product.location_id)
location_query = '+'.join(map(lambda e: e, str(location.location).split(' ')))
if product_features:
tags = Feature.objects.filter(id__in=product_features)
return TemplateResponse(
request, 'product/details.html', {
'is_visible': is_visible,
'form': form,
'availability': availability,
'rating' : rating,
'tags' : tags,
'service' : product_service,
'information' : product_info,
'brand' : brand,
'location':location,
'location_query':location_query,
'product': product,
'product_attributes': product_attributes,
'product_images': product_images,
'show_variant_picker': show_variant_picker,
'variant_picker_data': json.dumps(
variant_picker_data, default=serialize_decimal),
'json_ld_product_data': json.dumps(
json_ld_data, default=serialize_decimal)})
def product_add_to_cart(request, slug, product_id):
# types: (int, str, dict) -> None
if not request.method == 'POST':
return redirect(reverse(
'product:details',
kwargs={'product_id': product_id, 'slug': slug}))
products = products_for_cart(user=request.user)
product = get_object_or_404(products, pk=product_id)
form, cart = handle_cart_form(request, product, create_cart=True)
if form.is_valid():
form.save()
if request.is_ajax():
response = JsonResponse(
{'next': reverse('cart:index')}, status=200)
else:
response = redirect('cart:index')
else:
if request.is_ajax():
response = JsonResponse({'error': form.errors}, status=400)
else:
response = product_details(request, slug, product_id, form)
if not request.user.is_authenticated:
set_cart_cookie(cart, response)
return response
def category_index(request, path, category_id):
category = get_object_or_404(Category, id=category_id)
actual_path = category.get_full_path()
if actual_path != path:
return redirect('product:category', permanent=True, path=actual_path,
category_id=category_id)
# Check for subcategories
# categories = category.get_descendants(include_self=True)
categories = get_descendant(category_id,with_self=True)
products = products_with_details(user=request.user).filter(
category__in=categories).order_by('category_id','name')
approved_values = get_filter_values(categories, APPROVED_FILTER)
product_filter= ProductCategoryFilter(
request.GET, queryset=products, category=categories, attributes=APPROVED_FILTER, values=approved_values)
ctx = get_product_list_context(request, product_filter)
ctx.update({'object': category})
return TemplateResponse(request, 'category/index.html', ctx)
def brand_index(request, path, brand_id):
brand = get_object_or_404(Brand, id=brand_id)
actual_path = brand.get_full_path()
if actual_path != path:
return redirect('product:brand', permanent=True, path=actual_path,
brand_id=brand_id)
categories = Product.objects.values('category_id').distinct().filter(brand_id_id=brand_id)
products = products_with_details(user=request.user).filter(
brand_id_id=brand_id).order_by('name')
product_filter= ProductBrandFilter(
request.GET, queryset=products, category=categories, attributes=['Jenis','Color','Gender'])
ctx = get_product_list_context(request, product_filter)
ctx.update({'object': brand})
return TemplateResponse(request, 'brand/index.html', ctx)
def tags_index(request, path, tag_id):
request_page = int(request.GET.get('page','')) if request.GET.get('page','') else 1
tag = get_object_or_404(Feature, id=tag_id)
actual_path = tag.get_full_path()
if actual_path != path:
return redirect('product:tags', permanent=True, path=actual_path,
tag_id=tag_id)
ctx = {
'query': tag,
'query_string': '?page='+ str(request_page)
}
request.session['tag_query'] = tag_id
request.session['tag_page'] = request_page
response = TemplateResponse(request, 'tag/index.html', ctx)
return response
def tags_render(request):
ratings = list(ProductRating.objects.all().values('product_id').annotate(value=Avg('value')))
request_page = 1
if 'page' not in request.GET:
if 'tag_page' in request.session and request.session['tag_page']:
request_page = request.session['tag_page']
else:
request_page = int(request.GET.get('page')) if request.GET.get('page') else 1
tag = get_object_or_404(Feature, id=request.session['tag_query'])
results = []
start = (settings.PAGINATE_BY*(request_page-1))
end = start+(settings.PAGINATE_BY)
populate_product = list(ProductFeature.objects.filter(feature_id_id=tag.id).values_list('product_id_id', flat=True))
products = list(Product.objects.filter(id__in=populate_product[start:end]))
results = Parallel(n_jobs=psutil.cpu_count()*2,
verbose=50,
require='sharedmem',
backend="threading")(delayed(render_item)(item,request.discounts,request.currency,ratings) for item in products)
front = [i for i in range((start))]
results = front+results
for item in populate_product[end:]:
results.append(item)
page = paginate_results(list(results), request_page)
ctx = {
'query': tag,
'count_query' : len(results) if results else 0,
'results': page,
'query_string': '?page='+ str(request_page)}
response = TemplateResponse(request, 'tag/results.html', ctx)
return response
def collection_index(request, slug, pk):
collection = get_object_or_404(Collection, id=pk)
if collection.slug != slug:
return HttpResponsePermanentRedirect(collection.get_absolute_url())
products = products_with_details(user=request.user).filter(
collections__id=collection.id).order_by('name')
product_filter = ProductCollectionFilter(
request.GET, queryset=products, collection=collection)
ctx = get_product_list_context(request, product_filter)
ctx.update({'object': collection})
return TemplateResponse(request, 'collection/index.html', ctx)
def get_similar_product(product_id,limit=0):
start_time = time.time()
try:
product = Product.objects.get(id=product_id)
except Product.DoesNotExist:
raise Http404('No %s matches the given query.' % product.model._meta.object_name)
pivot_feature = list(ProductFeature.objects.filter(product_id_id=product_id).values_list('feature_id_id', flat=True))
in_clause = '('
for i,f in enumerate(pivot_feature):
in_clause += str(f)
if i < len(pivot_feature)-1:
in_clause += ', '
in_clause += ')'
query = """
SELECT fp.product_id_id AS id, f.id AS fid, f.count, fp.frequency
FROM feature_feature f JOIN feature_productfeature fp
ON fp.feature_id_id = f.id AND fp.feature_id_id IN """+in_clause+"""
ORDER BY id
"""
cursor = connection.cursor()
cursor.execute(query)
temp_product_features = np.array([item[:] for item in cursor.fetchall()] )
cursor.close()
total = len(list(Product.objects.all().values_list('id', flat=True)))
xs = temp_product_features[:,0]
ys = temp_product_features[:,1]
zs = temp_product_features[:,2]
fs = temp_product_features[:,3]
xs_val, xs_idx = np.unique(xs, return_inverse=True)
ys_val, ys_idx = np.unique(ys, return_inverse=True)
results_count = np.zeros(xs_val.shape+ys_val.shape)
results_freq = np.zeros(xs_val.shape+ys_val.shape)
results_count.fill(0)
results_freq.fill(0)
results_count[xs_idx,ys_idx] = zs
results_freq[xs_idx,ys_idx] = fs
pivot_idx = np.in1d(xs_val, float(product_id)).nonzero()[0]
pivot_freq = results_freq[pivot_idx]
results_freq = (1-(abs(results_freq-pivot_freq)/pivot_freq)) #normalize the frequency in order to make the pivot item always ranked first
temp_pivot = np.array(pivot_feature)
results_freq[results_freq[:,:]<0] = 0
check_idx = np.in1d(ys_val, temp_pivot).nonzero()[0]
# results_count = total/results_count
# results_count[results_count[:,:]==float('inf')] = 0
# final_weight = np.log10(1+results_count)
final_weight = results_freq
mask = np.ones(len(ys_val), np.bool)
mask[check_idx] = 0
final_weight[:,mask] = 0
del temp_product_features
del results_count
del results_freq
del check_idx
del xs
del ys
del zs
del fs
target_feature = len(pivot_feature)
arr_sum = np.zeros((len(xs_val),2))
arr_sum[:,0] = xs_val
arr_sum[:,1] = (np.sum(final_weight, axis=1))/target_feature
arr_sum = arr_sum[arr_sum[:,1].argsort()[::-1]]
if limit > 0:
arr_sum = arr_sum[:limit]
del final_weight
arr_sum = arr_sum.tolist()
list_similar_product = [{'id':item[0],'similarity':item[1]} for item in arr_sum]
list_similar_product = list(filter(lambda e:e.get('similarity') > 0, list_similar_product))
list_similar_product = sorted(list_similar_product, key=itemgetter('similarity'), reverse=True)
print('done populating similar products in %s'%(time.time() - start_time))
return list_similar_product
def render_similar_product(request, product_id):
start_time = time.time()
list_similar_product = []
products = []
try:
product = Product.objects.get(id=product_id)
except Product.DoesNotExist:
return TemplateResponse(request, 'product/_small_items.html', {
'products': products})
status = False
check = []
if 'similar_product' in request.session and request.session['similar_product']:
check = list(filter(lambda e : e['id'] == product.id, request.session['similar_product']))
if check and check[0]['related']:
status = True
if status:
list_similar_product = check[0]['related']
else:
temp = {}
list_similar_product = get_similar_product(product_id)
if list_similar_product:
all_temp = []
temp['id'] = product.id
temp['related'] = list_similar_product
if 'similar_product' in request.session and request.session['similar_product']:
all_temp = request.session['similar_product']
all_temp.append(temp)
request.session['similar_product'] = all_temp
list_similarity = []
if list_similar_product:
preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate([d['id'] for d in list_similar_product[:12]])])
products = list(Product.objects.filter(id__in=[d['id'] for d in list_similar_product[:12]]).order_by(preserved))
products = products_with_availability(
products, discounts=request.discounts, local_currency=request.currency)
list_similarity = [round(d['similarity'],4) for d in list_similar_product[:12]]
response = TemplateResponse(
request, 'product/_small_items.html', {
'products': products, 'product_id':product_id, 'similarity':list_similarity})
print("\nWaktu eksekusi : --- %s detik ---" % (time.time() - start_time))
return response
def all_similar_product(request, product_id):
request_page = int(request.GET.get('page','')) if request.GET.get('page','') else 1
try:
product = Product.objects.get(id=product_id)
except Product.DoesNotExist:
raise Http404('No %s matches the given query.' % product.model._meta.object_name)
ctx = {
'query': product,
'product_id' : product_id,
'query_string': '?page='+ str(request_page)
}
request.session['similar_page'] = request_page
response = TemplateResponse(request, 'product/all_similar.html', ctx)
return response
def render_all_similar_product(request, product_id):
start_time = time.time()
list_similar_product = []
products = []
request_page = int(request.GET.get('page')) if request.GET.get('page') else 1
try:
product = Product.objects.get(id=product_id)
except Product.DoesNotExist:
ctx = {
'query': product.model._meta.object_name,
'count_query' : '-',
'results': [],
'query_string': '?page='+ str(request_page)}
return TemplateResponse(request, 'product/similar_results.html', ctx)
status = False
check = []
if 'similar_product' in request.session and request.session['similar_product']:
check = list(filter(lambda e : e['id'] == product.id, request.session['similar_product']))
if check and check[0]['related']:
status = True
if status:
list_similar_product = check[0]['related']
else:
temp = {}
list_similar_product = get_similar_product(product_id)
if list_similar_product:
all_temp = []
temp['id'] = product.id
temp['related'] = list_similar_product
if 'similar_product' in request.session and request.session['similar_product']:
all_temp = request.session['similar_product']
all_temp.append(temp)
request.session['similar_product'] = all_temp
if list_similar_product:
ratings = list(ProductRating.objects.all().values('product_id').annotate(value=Avg('value')))
if 'page' not in request.GET:
if 'similar_page' in request.session and request.session['similar_page']:
request_page = request.session['similar_page']
else:
results = []
start = (settings.PAGINATE_BY*(request_page-1))
end = start+(settings.PAGINATE_BY)
preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate([d['id'] for d in list_similar_product[start:end]])])
products = list(Product.objects.filter(id__in=[d['id'] for d in list_similar_product[start:end]]).order_by(preserved))
results = Parallel(n_jobs=psutil.cpu_count()*2,
verbose=50,
require='sharedmem',
backend="threading")(delayed(render_item)(item,request.discounts,request.currency,ratings) for item in products)
front = [i for i in range((start))]
results = front+results
for item in [d['id'] for d in list_similar_product[end:]]:
results.append(item)
page = paginate_results(list(results), request_page)
ctx = {
'query': product,
'count_query' : len(results) if results else 0,
'results': page,
'query_string': '?page='+ str(request_page)}
response = TemplateResponse(request, 'product/similar_results.html', ctx)
return response
else:
ctx = {
'query': product.model._meta.object_name,
'count_query' : '-',
'results': [],
'query_string': '?page='+ str(request_page)}
return TemplateResponse(request, 'product/similar_results.html', ctx)
def get_all_discounted_product(request):
request_page = int(request.GET.get('page','')) if request.GET.get('page','') else 1
ctx = {
'query': '',
'query_string': '?page='+ str(request_page)
}
request.session['sale_page'] = request_page
response = TemplateResponse(request, 'sale/index.html', ctx)
return response
def render_discounted_product(request):
ratings = list(ProductRating.objects.all().values('product_id').annotate(value=Avg('value')))
request_page = 1
query = """
SELECT p.id AS id
FROM discount_sale_products d, product_product p, product_productvariant v
WHERE p.id = d.product_id AND p.is_published = True AND p.id = v.product_id AND v.quantity - v.quantity_allocated > 0
ORDER BY id;
"""
cursor = connection.cursor()
cursor.execute(query)
product_list = row = [item[0] for item in cursor.fetchall()]
cursor.close()
if 'page' not in request.GET:
if 'sale_page' in request.session and request.session['sale_page']:
request_page = request.session['sale_page']
else:
request_page = int(request.GET.get('page')) if request.GET.get('page') else 1
results = []
start = (settings.PAGINATE_BY*(request_page-1))
end = start+(settings.PAGINATE_BY)
products = list(Product.objects.filter(id__in=product_list[start:end]))
results = Parallel(n_jobs=psutil.cpu_count()*2,
verbose=50,
require='sharedmem',
backend="threading")(delayed(render_item)(item,request.discounts,request.currency,ratings) for item in products)
front = [i for i in range((start))]
results = front+results
for item in product_list[end:]:
results.append(item)
page = paginate_results(list(results), request_page)
ctx = {
'query': '',
'count_query' : len(results) if results else 0,
'results': page,
'query_string': '?page='+ str(request_page)}
response = TemplateResponse(request, 'sale/results.html', ctx)
return response
def get_data_order():
return get_cross_section_order()
def get_data_rating():
return get_cross_section_rating()
MODE_REECOMMENDER = ['verbose','quiet']
@csrf_exempt
@api_view(['GET'])
@permission_classes((permissions.AllowAny,))
def get_arc_recommendation(request, mode, limit):
start_time = time.time()
if request.method == 'GET':
if mode not in MODE_REECOMMENDER:
result = {'success':False,'recommendation':None,'process_time':(time.time() - start_time)}
return JsonResponse(result, status=status.HTTP_400_BAD_REQUEST)
else:
data = request.GET
if 'user' not in data:
result = {'success':False,'recommendation':None,'process_time':(time.time() - start_time)}
return JsonResponse(result, status=status.HTTP_400_BAD_REQUEST)
if data['user']:
status_source = False
source = ''
try:
source_data = Order.objects.filter(user_id=data['user'])
source = 'order'
status_source = True
except Order.DoesNotExist:
pass
if not status_source:
try:
source_data = ProductRating.objects.filter(user_id=data['user'])
source = 'rating'
status_source = True
except ProductRating.DoesNotExist:
result = {'success':False,'recommendation':'user has no data to process','process_time':(time.time() - start_time)}
return JsonResponse(result, status=status.HTTP_400_BAD_REQUEST)
del source_data
if status_source:
if source == 'order':
data_input = get_data_order()
else:
data_input = get_data_rating()
print('done db queries in %s'%(time.time() - start_time))
cross_section, binary_cross_section, distinct_user, distinct_product = process_cross_section(data_input)
start_count = time.time()
user_similarity = collaborative_similarity(cross_section, len(distinct_user))
print('done processing collaborative similarity in %s'%(time.time() - start_count))
result_matrix = []
user_id = distinct_user.index(int(data['user']))
order = 1
results = {}
start_similarity = time.time()
while True:
if result_matrix:
if 0 not in result_matrix[-1][user_id] or order >= int(limit):
results['ordinality'] = order
results['score_for_user'] = result_matrix[-1][user_id]
break
if order == 1:
final_weight = cross_section
result_matrix.append(final_weight)
else:
check = result_matrix[-1]
final_weight = np.matmul((np.matmul(binary_cross_section,binary_cross_section.T))*(user_similarity),check)
result_matrix.append(final_weight)
order += 2
print('done processing similarity %s'%(time.time() - start_similarity))
all_info = []
user_info = []
if source == 'rating':
all_info = list(ProductRating.objects.all().values('product_id').annotate(value=Avg('value')))
user_info = get_all_user_rating(data['user'])
else:
all_info = get_product_order_history()
user_info = get_user_order_history(data['user'])
results['score_for_user'] = list(filter(lambda e : e > 0, results['score_for_user']))
products = [distinct_product[i] for i in range(0,len(results['score_for_user']))]
products = list(Product.objects.filter(id__in=products))
recommended_items = {}
all_products = []
process_result = zip(products, results['score_for_user'])
for item, score in process_result:
temp = {}
temp['id'] = item.id
temp['name'] = item.name
temp['confident'] = score
all_products.append(temp)
all_products = sorted(all_products, key=itemgetter('confident'), reverse=True)
if LIMIT_COLLABORATIVE > 0:
all_products = all_products[:LIMIT_COLLABORATIVE]
products = {}
for item in reversed(all_products):
products[item['id']] = {'name':item['name'],
'value':item['confident']}
if LIMIT_CONTENT_BASE > 1:
similar_product = get_similar_product(item['id'], LIMIT_CONTENT_BASE)
for sub_item in similar_product:
if sub_item['id'] in products:
new_val = item['confident']*sub_item['similarity']
if products[sub_item['id']]['value'] < new_val:
products[sub_item['id']] = {'name':item['name'],
'value':new_val}
else:
products[sub_item['id']] = {'name':item['name'],
'value':item['confident']*sub_item['similarity']}
final_product = []
for key, value in products.items():
element = {}
element['id'] = int(key)
element['name'] = value['name']
element['confident'] = round(value['value'],4)
check = list(filter(lambda e: e['product_id'] == int(key), all_info))
info = check[0] if check else {'product_id':int(key),'value':0.0}
if source == 'rating':
element['total_rating'] = info['value']
else:
element['total_order'] = info['value']
check = list(filter(lambda e: e['product_id'] == int(key), user_info))
info = check[0] if check else {'product_id':int(key),'value':0.0}
if source == 'rating':
element['user_rating'] = info['value']
else:
element['user_order'] = info['value']
final_product.append(element)
final_product = sorted(final_product, key=itemgetter('confident'), reverse=True)
del results['score_for_user']
recommended_items['products'] = final_product
recommended_items['total'] = len(final_product)
results['recommendation'] = recommended_items
results['success'] = True
results['source'] = source
results['process_time'] = (time.time() - start_time)
return JsonResponse(results)
else:
result = {'success':False,'recommendation':None,'process_time':(time.time() - start_time)}
return JsonResponse(result, status=status.HTTP_400_BAD_REQUEST)
print("\nWaktu eksekusi : --- %s detik ---" % (time.time() - start_time))
def get_recommendation(request):
start_time = time.time()
if '_auth_user_id' in request.session and request.session['_auth_user_id']:
user = request.session['_auth_user_id']
status_source = False
source = ''
source_order = get_user_order_history(user)
source_rating = get_all_user_rating(user)
if source_order and source_rating:
status_source = True
if len(source_rating) >= len(source_order):
source = 'rating'
else:
source = 'order'
elif source_order and not source_rating:
status_source = True
source = 'order'
elif source_rating and not source_order:
status_source = True
source = 'rating'
else:
results = {}
try:
source_data = list(VisitProduct.objects.filter(user_id=user).values('product_id_id','count'))
if source_data:
if source == 'order':
data_input = get_data_order()
else:
data_input = get_data_rating()
print('done db queries in %s'%(time.time() - start_time))
visited = np.array([[item.get('product_id_id'), item.get('count')] for item in source_data] )
results = {}
recommended_items = {}
cross_section, binary_cross_section, distinct_user, distinct_product = process_cross_section(data_input)
anon_user = int(np.max(distinct_user)) + 1
distinct_user.append(anon_user)
anon_record = np.zeros([1,len(distinct_product)])
ys = visited[:,0]
zs = visited[:,1]
ys_val, ys_idx = np.unique(distinct_product, return_inverse=True)
check_idx = np.in1d(ys_val, ys).nonzero()[0]
anon_record[:,check_idx] = zs
anon_record[anon_record>5] = 5
anon_binary = np.copy(anon_record)
anon_binary[anon_binary>1] = 1
cross_section = np.vstack((cross_section,anon_record))
binary_cross_section = np.vstack((binary_cross_section,anon_binary))
source = 'visit'
status_source = True
all_products, ordinality = collaborative_filtering(anon_user, cross_section, binary_cross_section, distinct_user, distinct_product)
if LIMIT_COLLABORATIVE > 0:
all_products = all_products[:LIMIT_COLLABORATIVE] #select number of recommended product from another user
products = {}
for item in reversed(all_products):
products[item['id']] = item['confidence']
if LIMIT_CONTENT_BASE>1:
similar_product = get_similar_product(item['id'], LIMIT_CONTENT_BASE) #select number of similar products on each recommended product
for sub_item in similar_product:
if sub_item['id'] in products:
new_val = item['confidence']*sub_item['similarity']
if new_val > products[sub_item['id']]:
products[sub_item['id']] = new_val
else:
products[sub_item['id']] = item['confidence']*sub_item['similarity']
final_product = []
for key, value in products.items():
element = {}
element['id'] = key
element['confidence'] = round(value,4)
final_product.append(element)
final_product = sorted(final_product, key=itemgetter('confidence'), reverse=True)
recommended_items['products'] = final_product
recommended_items['total'] = len(final_product)
results['recommendation'] = recommended_items
results['success'] = True
results['ordinality'] = ordinality
results['evaluate'] = True
results['source'] = source
results['process_time'] = (time.time() - start_time)
return JsonResponse(results)
else:
status_source = False
except VisitProduct.DoesNotExist:
pass
if not status_source:
try:
source_data = list(SearchHistory.objects.filter(user_id=user).values_list('clean_query', flat=True))
if source_data:
source = 'search'
status_source = True
common_query = []
for query in source_data:
common_query += query.split(' ')
common_query = np.array(common_query)
unique, pos = np.unique(common_query, return_inverse=True)
counts = np.bincount(pos)
maxsort = counts.argsort()[::-1]
user_query = ' '.join(unique[maxsort][:3].tolist())
products = custom_query_validation(user_query)
final_product = []
for item in products:
element = {}
element['id'] = item.get('id')
element['confidence'] = round(item.get('similarity'),4)
final_product.append(element)
recommended_items = {}
final_product = sorted(final_product, key=itemgetter('confidence'), reverse=True)
recommended_items['products'] = final_product
recommended_items['total'] = len(final_product)
results['recommendation'] = recommended_items
results['success'] = True
results['evaluate'] = False
results['source'] = source
results['process_time'] = (time.time() - start_time)
return JsonResponse(results)
else:
status_source = False
except SearchHistory.DoesNotExist:
pass
del source_data
if not status_source:
results = get_default_recommendation(request)
return JsonResponse(results)
if status_source:
if source == 'order':
data_input = get_data_order()
else:
data_input = get_data_rating()
print('done db queries in %s'%(time.time() - start_time))
results = {}
recommended_items = {}
cross_section, binary_cross_section, distinct_user, distinct_product = process_cross_section(data_input)
all_products, ordinality = collaborative_filtering(user, cross_section, binary_cross_section, distinct_user, distinct_product)
if LIMIT_COLLABORATIVE > 0:
all_products = all_products[:LIMIT_COLLABORATIVE] #select number of recommended product from another user
products = {}
for item in reversed(all_products):
products[item['id']] = item['confidence']
if LIMIT_CONTENT_BASE > 1:
similar_product = get_similar_product(item['id'], LIMIT_CONTENT_BASE) #select number of similar products on each recommended product
for sub_item in similar_product:
if sub_item['id'] in products:
new_val = item['confidence']*sub_item['similarity']
if new_val > products[sub_item['id']]:
products[sub_item['id']] = new_val
else:
products[sub_item['id']] = item['confidence']*sub_item['similarity']
final_product = []
for key, value in products.items():
element = {}
element['id'] = key
element['confidence'] = round(value,4)
final_product.append(element)
final_product = sorted(final_product, key=itemgetter('confidence'), reverse=True)
recommended_items['products'] = final_product
recommended_items['total'] = len(final_product)
results['recommendation'] = recommended_items
results['ordinality'] = ordinality
results['success'] = True
results['evaluate'] = True
results['source'] = source
results['process_time'] = (time.time() - start_time)
return JsonResponse(results)
else:
results = get_default_recommendation(request)
return JsonResponse(results)
else:
results = get_default_recommendation(request)
return JsonResponse(results)
def collaborative_filtering(user, cross_section, binary_cross_section, distinct_user, distinct_product, limit=ARC_ORDINALITY):
start_count = time.time()
user_similarity = collaborative_similarity(cross_section, len(distinct_user))
print('done processing collaborative similarity in %s'%(time.time() - start_count))
result_matrix = []
user_id = distinct_user.index(int(user))
order = 1
results = []
start_similarity = time.time()
ordinality = 1
while True:
if order == 1:
final_weight = cross_section
result_matrix.append(final_weight)
else:
check = result_matrix[-1]
final_weight = np.dot((np.dot(binary_cross_section,binary_cross_section.T))*(user_similarity),check)
result_matrix.append(final_weight)
if result_matrix:
if 0 not in result_matrix[-1][user_id] and order >= 3 or order >= int(limit):
ordinality = order
results = result_matrix[-1][user_id]
break
order += 2
del result_matrix
print('done processing similarity %s'%(time.time() - start_similarity))
all_products = []
process_result = zip(distinct_product, results.tolist())
for item, score in process_result:
temp = {}
temp['id'] = item
temp['confidence'] = score
all_products.append(temp)
all_products = list(filter(lambda e: e.get('confidence') > 0, all_products))
all_products = sorted(all_products, key=itemgetter('confidence'), reverse=True)
return all_products, ordinality
def get_default_recommendation(request):
start_time = time.time()
source = 'top'
results = {}
recommended_items = {}
products = []
if 'history' in request.session and request.session['history']:
if 'visit' in request.session['history'] and request.session['history']['visit']:
data_input = get_data_order()
if not data_input:
data_input = get_data_rating()
if not data_input:
products = get_product_order_history()
final_product = []
if not products:
products = get_product_rating_history()
for item in products:
element = {}
element['id'] = item['product_id']
element['confidence'] = item['value']
final_product.append(element)
recommended_items['products'] = final_product
recommended_items['total'] = len(final_product)
results['recommendation'] = recommended_items
results['success'] = True
results['process_time'] = (time.time() - start_time)
results['evaluate'] = False
results['source'] = source
return results
print('done db queries in %s'%(time.time() - start_time))
source = "visit"
visited = np.array([[int(item),value] for item,value in request.session['history']['visit'].items()] )
results = {}
recommended_items = {}
cross_section, binary_cross_section, distinct_user, distinct_product = process_cross_section(data_input)
anon_user = int(np.max(distinct_user)) + 1
distinct_user.append(anon_user)
anon_record = np.zeros([1,len(distinct_product)])
ys = visited[:,0]
zs = visited[:,1]
ys_val, ys_idx = np.unique(distinct_product, return_inverse=True)
check_idx = np.in1d(ys_val, ys).nonzero()[0]
anon_record[:,check_idx] = zs
anon_record[anon_record>5] = 5
anon_binary = np.copy(anon_record)
anon_binary[anon_binary>1] = 1
cross_section = np.vstack((cross_section,anon_record))
binary_cross_section = np.vstack((binary_cross_section,anon_binary))
all_products, ordinality = collaborative_filtering(anon_user, cross_section, binary_cross_section, distinct_user, distinct_product)
if LIMIT_COLLABORATIVE > 0:
all_products = all_products[:LIMIT_COLLABORATIVE] #select number of recommended product from another user
products = {}
for item in reversed(all_products):
products[item['id']] = item['confidence']
if LIMIT_CONTENT_BASE > 1:
similar_product = get_similar_product(item['id'], LIMIT_CONTENT_BASE) #select number of similar products on each recommended product
for sub_item in similar_product:
if sub_item['id'] in products:
new_val = item['confidence']*sub_item['similarity']
if new_val > products[sub_item['id']]:
products[sub_item['id']] = new_val
else:
products[sub_item['id']] = item['confidence']*sub_item['similarity']
final_product = []
for key, value in products.items():
element = {}
element['id'] = key
element['confidence'] = round(value,4)
final_product.append(element)
final_product = sorted(final_product, key=itemgetter('confidence'), reverse=True)
recommended_items['products'] = final_product
recommended_items['total'] = len(final_product)
results['recommendation'] = recommended_items
results['success'] = True
results['ordinality'] = ordinality
results['evaluate'] = False
results['source'] = source
results['process_time'] = (time.time() - start_time)
return results
elif 'search' in request.session['history'] and request.session['history']['search']:
source = "search"
common_query = []
for query in request.session['history'] and request.session['history']['search']:
common_query += query.get('clean').split(' ')
common_query = np.array(common_query)
unique, pos = np.unique(common_query, return_inverse=True)
counts = np.bincount(pos)
maxsort = counts.argsort()[::-1]
user_query = ' '.join(unique[maxsort][:3].tolist())
products = custom_query_validation(user_query)
final_product = []
for item in products:
element = {}
element['id'] = item.get('id')
element['confidence'] = round(item.get('similarity'),4)
final_product.append(element)
final_product = sorted(final_product, key=itemgetter('confidence'), reverse=True)
recommended_items['products'] = final_product
recommended_items['total'] = len(final_product)
results['recommendation'] = recommended_items
results['success'] = True
results['evaluate'] = False
results['source'] = source
results['process_time'] = (time.time() - start_time)
return results
else:
products = get_product_order_history()
else:
products = get_product_order_history()
final_product = []
if not products:
products = get_product_rating_history()
for item in products:
element = {}
element['id'] = item['product_id']
element['confidence'] = item['value']
final_product.append(element)
recommended_items['products'] = final_product
recommended_items['total'] = len(final_product)
results['recommendation'] = recommended_items
results['success'] = True
results['process_time'] = (time.time() - start_time)
results['evaluate'] = False
results['source'] = source
return results
@csrf_exempt
@api_view(['POST'])
@permission_classes((permissions.AllowAny,))
def render_recommendation(request):
allowed_source = ['visit','search','rating','order']
if request.method == 'POST':
data = request.data
list_confidence = []
list_product = json.loads(data.get('products'))
list_product = sorted(list_product, key=itemgetter('confidence'), reverse=True)
request.session['recommendation'] = list_product
request.session['source_recommendation'] = data.get('source')
list_product = list_product[:LIMIT_FEATURED]
products = []
preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate([d.get('id') for d in list_product])])
products = list(Product.objects.filter(id__in=[d.get('id') for d in list_product]).order_by(preserved))
products = products_with_availability(
products, discounts=request.discounts, local_currency=request.currency)
if data.get('source') in allowed_source:
list_confidence = [round(d.get('confidence'),4) for d in list_product]
response = TemplateResponse(
request, 'product/_items.html', {
'products': products, 'confidences':list_confidence})
return response
def all_recommendation(request):
request_page = int(request.GET.get('page','')) if request.GET.get('page','') else 1
ctx = {
'query_string': '?page='+ str(request_page)
}
request.session['recommendation_page'] = request_page
response = TemplateResponse(request, 'recommendation/index.html', ctx)
return response
def get_render_all_recommendation(request):
start_time = time.time()
allowed_source = ['visit','search','rating','order']
list_recommendation = []
products = []
source = ''
request_page = int(request.GET.get('page')) if request.GET.get('page') else 1
print(request_page)
if 'recommendation' in request.session and request.session['recommendation'] and 'source_recommendation' in request.session and request.session['source_recommendation']:
list_recommendation = request.session['recommendation']
source = request.session['source_recommendation']
else:
temp = json.loads(get_recommendation(request).content)
list_recommendation = temp['recommendation']['products']
source = temp['source']
if list_recommendation:
confidences = []
ratings = list(ProductRating.objects.all().values('product_id').annotate(value=Avg('value')))
if 'page' not in request.GET:
if 'recommendation_page' in request.session and request.session['recommendation_page']:
request_page = request.session['recommendation_page']
else:
results = []
start = (settings.PAGINATE_BY*(request_page-1))
end = start+(settings.PAGINATE_BY)
preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate([d['id'] for d in list_recommendation[start:end]])])
products = list(Product.objects.filter(id__in=[d['id'] for d in list_recommendation[start:end]]).order_by(preserved))
results = Parallel(n_jobs=psutil.cpu_count()*2,
verbose=50,
require='sharedmem',
backend="threading")(delayed(render_item)(item,request.discounts,request.currency,ratings) for item in products)
front = [i for i in range((start))]
results = front+results
for item in [d['id'] for d in list_recommendation[end:]]:
results.append(item)
if source in allowed_source:
confidences = [round(d.get('confidence'),4) for d in list_recommendation]
page = paginate_results(list(results), request_page)
ctx = {
'query': '',
'count_query' : len(results) if results else 0,
'results': page,
'confidences': confidences,
'query_string': '?page='+ str(request_page)}
response = TemplateResponse(request, 'recommendation/results.html', ctx)
return response
else:
ctx = {
'query': '',
'count_query' : '-',
'results': [],
'confidences': [],
'query_string': '?page='+ str(request_page)}
return TemplateResponse(request, 'recommendation/results.html', ctx)
@csrf_exempt
@api_view(['POST'])
@permission_classes((permissions.AllowAny,))
def evaluate_recommendation(request):
allowed_source = ['visit','rating','order']
start_time = time.time()
results = {}
if request.method == 'POST':
if '_auth_user_id' in request.session and request.session['_auth_user_id']:
user = request.session['_auth_user_id']
data = request.data
if 'source' in data and data.get('source') in allowed_source:
source = data.get('source')
actual = []
if source == 'rating':
all_data = get_all_rating_data()
all_data = [{'y':item[0], 'x':item[1]} for item in all_data]
if EVALUATION_MODE==0:
actual = get_rating_relevant_item(user)
else:
actual = get_all_user_rating(user)
actual = [item.get('product_id') for item in actual]
elif source == 'order':
all_data = get_all_order_data()
all_data = [{'y':item[0], 'x':item[1]} for item in all_data]
if EVALUATION_MODE==0:
actual = get_order_relevant_item(user)
else:
actual = get_user_order_history(user)
actual = [item.get('product_id') for item in actual]
else:
all_data = get_all_rating_data()
all_data = [{'y':item[0], 'x':item[1]} for item in all_data]
if EVALUATION_MODE==0:
actual = get_visit_relevant_item(user)
else:
actual = list(VisitProduct.objects.filter(user_id_id=user).values_list('product_id_id', flat=True))
if actual:
target = [{'y':user,'x':item} for item in actual]
total = len(list(Product.objects.all()))
if 'recommended' in data and data.get('recommended'):
products = json.loads(data.get('recommended'))
recommended = [item['id'] for item in products]
recommended_products = [{'y':user,'x':item['id']} for item in products]
tp = len(set(actual)&set(recommended))
fp = abs(len(recommended) - tp)
fn = abs(len(actual) - tp)
relevant = tp + fn
irrelevant = abs(total - len(actual))
tn = abs(irrelevant - fp)
current_user = User.objects.get(id=user)
score = {}
score['Method'] = 'Hybrid' if LIMIT_CONTENT_BASE > 1 else 'Collaborative'
score['Rule'] = 'Strict' if EVALUATION_MODE == 1 else 'Non-Strict'
score['Precission'] = round(tp/(tp+fp),4)
score['Recall'] = round(tp/(tp+fn),4)
score['Fallout'] = round(fp/(fp+tn),4)
score['Missrate'] = round(fn/(tp+fn),4)
score['F-one-score'] = round((2*score['Precission']*score['Recall'])/(score['Precission']+score['Recall']),4)
results['evaluation'] = score
results['user'] = {'id':user,'email':current_user.email}
results['data'] = {'tp':tp,
'fn':fn,
'tn':tn,
'fp':fp,
'total':total,
'relevant':relevant,
'irrelevant':irrelevant}
results['success'] = True
results['all_products'] = all_data
results['target'] = target
results['recommended_products'] = recommended_products
results['process_time'] = time.time() - start_time
return JsonResponse(results)
else:
result = {'success':False,'evaluation':None,'process_time':(time.time() - start_time)}
return JsonResponse(result)
else:
result = {'success':False,'evaluation':None,'process_time':(time.time() - start_time)}
return JsonResponse(result)
else:
result = {'success':False,'evaluation':None,'process_time':(time.time() - start_time)}
return JsonResponse(result)
else:
result = {'success':False,'evaluation':None,'process_time':(time.time() - start_time)}
return JsonResponse(result)
else:
result = {'success':False,'evaluation':None,'process_time':(time.time() - start_time)}
return JsonResponse(result, status=status.HTTP_400_BAD_REQUEST)
def fake_user_data(data_input, distinct_user, distinct_product, fake_data):
new_user = int(np.max(distinct_user))
new_distinct_user = distinct_user.append(new_user)
new_data = np.vstack([data_input, fake_data])
return new_data, new_distinct_user, distinct_product
def collaborative_similarity(array_input, users):
list_similarity = []
max_range = np.max(array_input)
for i in range(0,users):
row = []
for j in range(0,users):
if i == j:
row.append(1.0)
else:
user_a = np.array(array_input[i])
user_b = | np.array(array_input[j]) | numpy.array |
import numpy as np
from .status import StatusGrid
from .links import link_is_active, find_active_links, LinkGrid
from .links import _split_link_ends
from .cells import CellGrid
from .nodes import NodeGrid
from landlab.utils.decorators import deprecated
def _default_axis_names(n_dims):
"""Returns a tuple of the default axis names."""
_DEFAULT_NAMES = ('z', 'y', 'x')
return _DEFAULT_NAMES[- n_dims:]
def _default_axis_units(n_dims):
"""Returns a tuple of the default axis units."""
return ('-', ) * n_dims
class BaseGrid(object):
"""__init__([coord0, coord1, ...], axis_name=None, axis_units=None)
Parameters
----------
coord0, coord1, ... : sequence of array-like
Coordinates of grid nodes
axis_name : sequence of strings, optional
Names of coordinate axes
axis_units : sequence of strings, optional
Units of coordinate axes
Returns
-------
BaseGrid :
A newly-created BaseGrid
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> ngrid.number_of_nodes
4
>>> ngrid.x_at_node
array([ 0., 1., 0., 1.])
>>> ngrid.x_at_node[2]
0.0
>>> ngrid.point_at_node[2]
array([ 1., 0.])
>>> ngrid.coord_at_node[:, [2, 3]]
array([[ 1., 1.],
[ 0., 1.]])
>>> cells = ([0, 1, 2, 1, 3, 2], [3, 3], [0, 1])
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]), cells=cells)
>>> ngrid.number_of_cells
2
>>> ngrid.node_at_cell
array([0, 1])
>>> links = [(0, 2), (1, 3), (0, 1), (1, 2), (0, 3)]
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]), links=zip(*links))
>>> ngrid.number_of_links
5
>>> ngrid.links_leaving_at_node(0)
array([0, 2, 4])
>>> len(ngrid.links_entering_at_node(0)) == 0
True
>>> tails, heads = zip(*links)
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]),
... node_status=[0, 0, 0, 4], links=[tails, heads])
>>> grid.status_at_node
array([0, 0, 0, 4])
>>> len(grid.active_links_entering_at_node(0)) == 0
True
>>> grid.active_links_leaving_at_node(0)
array([0, 2])
"""
def __init__(self, nodes, axis_name=None, axis_units=None,
node_status=None, links=None, cells=None):
"""__init__([coord0, coord1, ...], axis_name=None, axis_units=None)
Parameters
----------
coord0, coord1, ... : sequence of array-like
Coordinates of grid nodes
axis_name : sequence of strings, optional
Names of coordinate axes
axis_units : sequence of strings, optional
Units of coordinate axes
Returns
-------
BaseGrid :
A newly-created BaseGrid
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> ngrid.number_of_nodes
4
>>> ngrid.x_at_node
array([ 0., 1., 0., 1.])
>>> ngrid.x_at_node[2]
0.0
>>> ngrid.point_at_node[2]
array([ 1., 0.])
>>> ngrid.coord_at_node[:, [2, 3]]
array([[ 1., 1.],
[ 0., 1.]])
>>> cells = ([0, 1, 2, 1, 3, 2], [3, 3], [0, 1])
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]), cells=cells)
>>> ngrid.number_of_cells
2
>>> ngrid.node_at_cell
array([0, 1])
>>> links = [(0, 2), (1, 3), (0, 1), (1, 2), (0, 3)]
>>> ngrid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]), links=zip(*links))
>>> ngrid.number_of_links
5
>>> ngrid.links_leaving_at_node(0)
array([0, 2, 4])
>>> len(ngrid.links_entering_at_node(0)) == 0
True
>>> tails, heads = zip(*links)
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]),
... node_status=[0, 0, 0, 4], links=[tails, heads])
>>> grid.status_at_node
array([0, 0, 0, 4])
>>> len(grid.active_links_entering_at_node(0)) == 0
True
>>> grid.active_links_leaving_at_node(0)
array([0, 2])
"""
self._node_grid = NodeGrid(nodes)
self._axis_name = tuple(axis_name or _default_axis_names(self.ndim))
self._axis_units = tuple(axis_units or _default_axis_units(self.ndim))
if cells is not None:
try:
self._cell_grid = CellGrid(*cells)
except TypeError:
self._cell_grid = cells
if links is not None:
try:
self._link_grid = LinkGrid(links, self.number_of_nodes)
except TypeError:
self._link_grid = links
if node_status is not None:
self._status_grid = StatusGrid(node_status)
if links is not None and node_status is not None:
links = _split_link_ends(links)
self._active_link_grid = BaseGrid.create_active_link_grid(
self.status_at_node, links, self.number_of_nodes)
@staticmethod
def create_active_link_grid(node_status, links, number_of_nodes):
active_link_ids = find_active_links(node_status, links)
return LinkGrid((links[0][active_link_ids], links[1][active_link_ids]),
number_of_nodes, link_ids=active_link_ids)
@property
def ndim(self):
return self._node_grid.ndim
@property
def axis_units(self):
"""Coordinate units of each axis.
Returns
-------
tuple of strings :
Coordinate units of each axis.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> ngrid = BaseGrid(([0, 1, 0], [1, 1, 0]))
>>> ngrid.axis_units
('-', '-')
>>> ngrid = BaseGrid(([0, 1, 0], [1, 1, 0]),
... axis_units=['degrees_north', 'degrees_east'])
>>> ngrid.axis_units
('degrees_north', 'degrees_east')
"""
return self._axis_units
@property
def axis_name(self):
"""Name of each axis.
Returns
-------
tuple of strings :
Names of each axis.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> ngrid = BaseGrid(([0, 1, 0], [1, 1, 0]))
>>> ngrid.axis_name
('y', 'x')
>>> ngrid = BaseGrid(([0, 1, 0], [1, 1, 0]), axis_name=['lat', 'lon'])
>>> ngrid.axis_name
('lat', 'lon')
"""
return self._axis_name
@property
def number_of_links(self):
"""Number of links.
"""
return self._link_grid.number_of_links
@property
def number_of_cells(self):
"""Number of cells.
"""
return self._cell_grid.number_of_cells
@property
def number_of_nodes(self):
"""Number of nodes.
"""
return self._node_grid.number_of_nodes
@property
def coord_at_node(self):
return self._node_grid.coord
@property
def x_at_node(self):
return self._node_grid.x
@property
def y_at_node(self):
return self._node_grid.y
@property
def point_at_node(self):
return self._node_grid.point
def links_leaving_at_node(self, node):
return self._link_grid.out_link_at_node(node)
def links_entering_at_node(self, node):
return self._link_grid.in_link_at_node(node)
def active_links_leaving_at_node(self, node):
return self._active_link_grid.out_link_at_node(node)
def active_links_entering_at_node(self, node):
return self._active_link_grid.in_link_at_node(node)
@property
def node_at_link_start(self):
return self._link_grid.node_at_link_start
@property
def node_at_link_end(self):
return self._link_grid.node_at_link_end
@property
def node_at_cell(self):
return self._cell_grid.node_at_cell
@property
def cell_at_node(self):
return self._cell_grid.cell_at_node
def core_cells(self):
return self.cell_at_node[self.core_nodes]
@property
def status_at_node(self):
return self._status_grid.node_status
@status_at_node.setter
def status_at_node(self, status):
self._status_grid.node_status = status
self._active_link_grid = BaseGrid.create_active_link_grid(
self.status_at_node, (self.node_at_link_start,
self.node_at_link_end), self.number_of_nodes)
def active_nodes(self):
return self._status_grid.active_nodes()
def core_nodes(self):
return self._status_grid.core_nodes()
def boundary_nodes(self):
return self._status_grid.boundary_nodes()
def closed_boundary_nodes(self):
return self._status_grid.closed_boundary_nodes()
def fixed_gradient_boundary_nodes(self):
return self._status_grid.fixed_gradient_boundary_nodes()
def fixed_value_boundary_nodes(self):
return self._status_grid.fixed_value_boundary_nodes()
def active_links(self):
return self._active_link_grid.link_id
@deprecated(use='length_of_link', version=1.0)
def link_length(self, link=None):
return self.length_of_link(link=link)
def length_of_link(self, link=None):
"""Length of grid links.
Parameters
----------
link : array-like, optional
Link IDs
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> links = [(0, 2), (1, 3), (0, 1), (2, 3), (0, 3)]
>>> grid = BaseGrid(([0, 0, 4, 4], [0, 3, 0, 3]), links=links)
>>> grid.length_of_link()
array([ 4., 4., 3., 3., 5.])
>>> grid.length_of_link(0)
array([ 4.])
>>> grid.length_of_link().min()
3.0
>>> grid.length_of_link().max()
5.0
"""
if link is None:
node0, node1 = (self.node_at_link_start, self.node_at_link_end)
else:
node0, node1 = (self.node_at_link_start[link],
self.node_at_link_end[link])
return self.node_to_node_distance(node0, node1)
def node_to_node_distance(self, node0, node1, out=None):
"""Distance between nodes.
Parameters
----------
node0 : array-like
Node ID of start
node1 : array-like
Node ID of end
Returns
-------
array :
Distances between nodes.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 4, 4], [0, 3, 0, 3]))
>>> grid.node_to_node_distance(0, 3)
array([ 5.])
>>> grid.node_to_node_distance(0, [0, 1, 2, 3])
array([ 0., 3., 4., 5.])
"""
return point_to_point_distance(
self._get_coord_at_node(node0), self._get_coord_at_node(node1),
out=out)
node0, node1 = np.broadcast_arrays(node0, node1)
return np.sqrt(np.sum((self.coord_at_node[:, node1] -
self.coord_at_node[:, node0]) ** 2, axis=0))
def point_to_node_distance(self, point, node=None, out=None):
"""Distance from a point to a node.
Parameters
----------
point : tuple
Coordinates of point
node : array-like
Node IDs
Returns
-------
array :
Distances from point to node.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 4, 4], [0, 3, 0, 3]))
>>> grid.point_to_node_distance((0., 0.), [1, 2, 3])
array([ 3., 4., 5.])
>>> grid.point_to_node_distance((0., 0.))
array([ 0., 3., 4., 5.])
>>> out = np.empty(4)
>>> out is grid.point_to_node_distance((0., 0.), out=out)
True
>>> out
array([ 0., 3., 4., 5.])
"""
return point_to_point_distance(point, self._get_coord_at_node(node),
out=out)
def point_to_node_angle(self, point, node=None, out=None):
"""Angle from a point to a node.
Parameters
----------
point : tuple
Coordinates of point
node : array-like
Node IDs
Returns
-------
array :
Angles from point to node as radians.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> grid.point_to_node_angle((0., 0.), [1, 2, 3]) / np.pi
array([ 0. , 0.5 , 0.25])
>>> grid.point_to_node_angle((0., 0.)) / np.pi
array([ 0. , 0. , 0.5 , 0.25])
>>> out = np.empty(4)
>>> out is grid.point_to_node_angle((0., 0.), out=out)
True
>>> out / np.pi
array([ 0. , 0. , 0.5 , 0.25])
"""
return point_to_point_angle(point, self._get_coord_at_node(node),
out=out)
def point_to_node_azimuth(self, point, node=None, out=None):
"""Azimuth from a point to a node.
Parameters
----------
point : tuple
Coordinates of point
node : array-like
Node IDs
Returns
-------
array :
Azimuths from point to node.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> grid.point_to_node_azimuth((0., 0.), [1, 2, 3])
array([ 90., 0., 45.])
>>> grid.point_to_node_azimuth((0., 0.))
array([ 90., 90., 0., 45.])
>>> grid.point_to_node_azimuth((0., 0.), 1)
array([ 90.])
>>> out = np.empty(4)
>>> out is grid.point_to_node_azimuth((0., 0.), out=out)
True
>>> out
array([ 90., 90., 0., 45.])
"""
return point_to_point_azimuth(point, self._get_coord_at_node(node),
out=out)
def point_to_node_vector(self, point, node=None, out=None):
"""Azimuth from a point to a node.
Parameters
----------
point : tuple
Coordinates of point
node : array-like
Node IDs
Returns
-------
array :
Vector from point to node.
Examples
--------
>>> from landlab.grid.unstructured.base import BaseGrid
>>> grid = BaseGrid(([0, 0, 1, 1], [0, 1, 0, 1]))
>>> grid.point_to_node_vector((0., 0.), [1, 2, 3])
array([[ 0., 1., 1.],
[ 1., 0., 1.]])
>>> grid.point_to_node_vector((0., 0.))
array([[ 0., 0., 1., 1.],
[ 0., 1., 0., 1.]])
>>> grid.point_to_node_vector((0., 0.), 1)
array([[ 0.],
[ 1.]])
>>> out = np.empty((2, 1))
>>> out is grid.point_to_node_vector((0., 0.), 1, out=out)
True
>>> out
array([[ 0.],
[ 1.]])
"""
return point_to_point_vector(point, self._get_coord_at_node(node),
out=out)
def _get_coord_at_node(self, node=None):
if node is None:
return self.coord_at_node
else:
return self.coord_at_node[:, node].reshape((2, -1))
def point_to_point_distance(point0, point1, out=None):
"""Length of vector that joins two points.
Parameters
----------
(y0, x0) : tuple of array_like
(y1, x1) : tuple of array_like
out : array_like, optional
An array to store the output. Must be the same shape as the output
would have.
Returns
-------
l : array_like
Length of vector joining points; if *out* is provided, *v* will be
equal to *out*.
Examples
--------
>>> from landlab.grid.unstructured.base import point_to_point_distance
>>> point_to_point_distance((0, 0), (3, 4))
array([ 5.])
>>> point_to_point_distance((0, 0), ([3, 6], [4, 8]))
array([ 5., 10.])
"""
point0 = np.reshape(point0, (2, -1))
point1 = np.reshape(point1, (2, -1))
if out is None:
sum_of_squares = np.sum((point1 - point0) ** 2., axis=0)
return | np.sqrt(sum_of_squares) | numpy.sqrt |
"""
------------------------------------------------------------
Mask R-CNN for Object_RPE
------------------------------------------------------------
"""
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
import cv2
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '../..'))
print("ROOT_DIR: ", ROOT_DIR)
# Path to trained weights file
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
import argparse
############################################################
# Parse command line arguments
############################################################
parser = argparse.ArgumentParser(description='Get Stats from Image Dataset')
parser.add_argument('--detect', required=False,
default='rgbd+',
type=str,
metavar="Train RGB or RGB+D")
parser.add_argument('--dataset', required=False,
default='/data/Akeaveny/Datasets/part-affordance_combined/real/',
# default='/data/Akeaveny/Datasets/part-affordance_combined/ndds4/',
type=str,
metavar="/path/to/Affordance/dataset/")
parser.add_argument('--dataset_type', required=False,
default='hammer',
type=str,
metavar='real or syn')
parser.add_argument('--dataset_split', required=False, default='test',
type=str,
metavar='test or val')
parser.add_argument('--weights', required=False, default='coco',
metavar="/path/to/weights.h5 or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/ or Logs and checkpoints directory (default=logs/)")
parser.add_argument('--show_plots', required=False, default=False,
type=bool,
metavar='show plots from matplotlib')
parser.add_argument('--save_output', required=False, default=False,
type=bool,
metavar='save terminal output to text file')
args = parser.parse_args()
############################################################
# REAL OR SYN
############################################################
# assert args.dataset_type == 'real' or args.dataset_type == 'syn' or args.dataset_type == 'syn1' or args.dataset_type == 'hammer'
if args.dataset_type == 'real':
import dataset_real as Affordance
save_to_folder = '/images/test_images_real/'
# MEAN_PIXEL_ = np.array([103.57, 103.38, 103.52]) ### REAL
MEAN_PIXEL_ = np.array([93.70, 92.43, 89.58]) ### TEST
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
### config ###
MAX_GT_INSTANCES_ = 10
DETECTION_MAX_INSTANCES_ = 10
DETECTION_MIN_CONFIDENCE_ = 0.9 # 0.985
POST_NMS_ROIS_INFERENCE_ = 100
RPN_NMS_THRESHOLD_ = 0.8
DETECTION_NMS_THRESHOLD_ = 0.5
### crop ###
# CROP = True
# IMAGE_RESIZE_MODE_ = "crop"
# IMAGE_MIN_DIM_ = 384
# IMAGE_MAX_DIM_ = 384
### sqaure ###
CROP = False
IMAGE_RESIZE_MODE_ = "square"
IMAGE_MIN_DIM_ = 640
IMAGE_MAX_DIM_ = 640
elif args.dataset_type == 'syn':
import dataset_syn as Affordance
save_to_folder = '/images/test_images_syn/'
MEAN_PIXEL_ = np.array([91.13, 88.92, 98.65]) ### REAL RGB
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
### config ###
MAX_GT_INSTANCES_ = 10
DETECTION_MAX_INSTANCES_ = 10
DETECTION_MIN_CONFIDENCE_ = 0.9 # 0.985
POST_NMS_ROIS_INFERENCE_ = 100
RPN_NMS_THRESHOLD_ = 0.8
DETECTION_NMS_THRESHOLD_ = 0.5
### crop ###
# CROP = True
# IMAGE_RESIZE_MODE_ = "crop"
# IMAGE_MIN_DIM_ = 384
# IMAGE_MAX_DIM_ = 384
### sqaure ###
CROP = False
IMAGE_RESIZE_MODE_ = "square"
IMAGE_MIN_DIM_ = 640
IMAGE_MAX_DIM_ = 640
elif args.dataset_type == 'syn1':
import dataset_syn1 as Affordance
save_to_folder = '/images/test_images_syn1/'
MEAN_PIXEL_ = np.array([91.13, 88.92, 98.65]) ### REAL RGB
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
### config ###
MAX_GT_INSTANCES_ = 20 # 2
DETECTION_MAX_INSTANCES_ = 20 # 2
DETECTION_MIN_CONFIDENCE_ = 0.9 # 0.985
POST_NMS_ROIS_INFERENCE_ = 100
RPN_NMS_THRESHOLD_ = 0.8
DETECTION_NMS_THRESHOLD_ = 0.5
### crop ###
# CROP = True
# IMAGE_RESIZE_MODE_ = "crop"
# IMAGE_MIN_DIM_ = 384
# IMAGE_MAX_DIM_ = 384
### sqaure ###
CROP = False
IMAGE_RESIZE_MODE_ = "square"
IMAGE_MIN_DIM_ = 640
IMAGE_MAX_DIM_ = 640
elif args.dataset_type == 'hammer':
import objects.dataset_syn_hammer as Affordance
save_to_folder = '/images/objects/test_images_syn_hammer/'
MEAN_PIXEL_ = np.array([91.13, 88.92, 98.65]) ### REAL RGB
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
### crop ###
MAX_GT_INSTANCES_ = 20
DETECTION_MAX_INSTANCES_ = 20
DETECTION_MIN_CONFIDENCE_ = 0.5
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
IMAGE_RESIZE_MODE_ = "crop"
IMAGE_MIN_DIM_ = 384
IMAGE_MAX_DIM_ = 384
### sqaure ###
# MAX_GT_INSTANCES_ = 3
# DETECTION_MAX_INSTANCES_ = 30
# DETECTION_MIN_CONFIDENCE_ = 0.5
# RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
# IMAGE_RESIZE_MODE_ = "square"
# IMAGE_MIN_DIM_ = 640
# IMAGE_MAX_DIM_ = 640
elif args.dataset_type == 'scissors':
import objects.dataset_syn_scissors as Affordance
save_to_folder = '/images/objects/test_images_syn_scissors/'
MEAN_PIXEL_ = np.array([91.13, 88.92, 98.65]) ### REAL RGB
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
### crop ###
MAX_GT_INSTANCES_ = 2
DETECTION_MAX_INSTANCES_ = 2
DETECTION_MIN_CONFIDENCE_ = 0.5
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
IMAGE_RESIZE_MODE_ = "crop"
IMAGE_MIN_DIM_ = 384
IMAGE_MAX_DIM_ = 384
### sqaure ###
# MAX_GT_INSTANCES_ = 3
# DETECTION_MAX_INSTANCES_ = 30
# DETECTION_MIN_CONFIDENCE_ = 0.5
# RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
# IMAGE_RESIZE_MODE_ = "square"
# IMAGE_MIN_DIM_ = 640
# IMAGE_MAX_DIM_ = 640
elif args.dataset_type == 'scissors_20k':
import objects.dataset_syn_scissors_20k as Affordance
save_to_folder = '/images/objects/test_images_syn_scissors_20k/'
MEAN_PIXEL_ = np.array([91.13, 88.92, 98.65]) ### REAL RGB
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
### crop ###
MAX_GT_INSTANCES_ = 10
DETECTION_MAX_INSTANCES_ = 10
DETECTION_MIN_CONFIDENCE_ = 0.5
RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
IMAGE_RESIZE_MODE_ = "crop"
IMAGE_MIN_DIM_ = 384
IMAGE_MAX_DIM_ = 384
### sqaure ###
# MAX_GT_INSTANCES_ = 3
# DETECTION_MAX_INSTANCES_ = 30
# DETECTION_MIN_CONFIDENCE_ = 0.5
# RPN_ANCHOR_SCALES_ = (16, 32, 64, 128, 256)
# IMAGE_RESIZE_MODE_ = "square"
# IMAGE_MIN_DIM_ = 640
# IMAGE_MAX_DIM_ = 640
if not (os.path.exists(os.getcwd()+save_to_folder)):
os.makedirs(os.getcwd()+save_to_folder)
from mrcnn.config import Config
# from mrcnn import model as modellib, utils, visualize
from mrcnn.model import log
from mrcnn.visualize import display_images
import tensorflow as tf
if args.detect == 'rgb':
from mrcnn import model as modellib, utils, visualize
if args.detect == 'rgbd':
from mrcnn import modeldepth as modellib, utils, visualize
elif args.detect == 'rgbd+':
from mrcnn import modeldepthv2 as modellib, utils, visualize
else:
print("*** No Model Selected ***")
exit(1)
###########################################################
# Test
###########################################################
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
def compute_batch_ap(dataset, image_ids, verbose=1):
APs = []
for image_id in image_ids:
# Load image
image, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_image_gt(dataset, config,
image_id, use_mini_mask=False)
# Run object detection
results = model.detect_molded(image[np.newaxis], image_meta[np.newaxis], verbose=0)
# Compute AP over range 0.5 to 0.95
r = results[0]
ap = utils.compute_ap_range(
gt_bbox, gt_class_id, gt_mask,
r['rois'], r['class_ids'], r['scores'], r['masks'],
verbose=0)
APs.append(ap)
if verbose:
info = dataset.image_info[image_id]
meta = modellib.parse_image_meta(image_meta[np.newaxis, ...])
print("{:3} {} AP: {:.2f}".format(
meta["image_id"][0], meta["original_image_shape"][0], ap))
return APs
def detect_and_get_masks(model, config, args):
np.random.seed(0)
if args.save_output:
sys.stdout = open(os.getcwd() + save_to_folder + 'output.txt', "w")
else:
pass
########################
# Load test images
########################
print("args.dataset_split", args.dataset_split)
dataset = Affordance.UMDDataset()
dataset.load_Affordance(args.dataset, args.dataset_split)
dataset.prepare()
#### print KERAS model
model.keras_model.summary()
config.display()
captions = np.array(dataset.class_names)
print("Num of Test Images: {}".format(len(dataset.image_ids)))
########################
# rgbd
########################
if args.detect == 'rgbd' or args.detect == 'rgbd+':
########################
# batch mAP
########################
# print('\n --------------- mAP ---------------')
#
# APs, verbose = [], True
# for image_id in dataset.image_ids:
# # Load image
# image, depthimage, image_meta, gt_class_id, gt_bbox, gt_mask = \
# modellib.load_images_gt(dataset, config, image_id, use_mini_mask=False)
#
# # Run object detection
# # results = model.detect_molded(image[np.newaxis], image_meta[np.newaxis], verbose=0)
# results = model.detect_molded(image[np.newaxis], depthimage[np.newaxis], image_meta[np.newaxis], verbose=0)
# # Compute AP over range 0.5 to 0.95
# r = results[0]
# ap = utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask,
# r['rois'], r['class_ids'], r['scores'], r['masks'],
# verbose=0)
# APs.append(ap)
# if verbose:
# info = dataset.image_info[image_id]
# meta = modellib.parse_image_meta(image_meta[np.newaxis, ...])
# print("{:3} {} AP: {:.2f}".format(meta["image_id"][0], meta["original_image_shape"][0], ap))
# print("Mean AP over {} test images: {:.4f}".format(len(APs), np.mean(APs)))
#################
# Activations
#################
print('\n --------------- Activations ---------------')
np.random.seed(0)
image_id = int(np.random.choice(len(dataset.image_ids), size=1)[0])
image, depthimage, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_images_gt(dataset, config, image_id, use_mini_mask=False)
# Get activations of a few sample layers
activations = model.run_graph([image], [depthimage], [
# images
("input_image", tf.identity(model.keras_model.get_layer("input_image").output)),
("input_depth_image", tf.identity(model.keras_model.get_layer("input_depth_image").output)),
# RESNET
("res2c_out", model.keras_model.get_layer("res2c_out").output),
("res2c_out_depth", model.keras_model.get_layer("res2c_out_depth").output),
("res3d_out", model.keras_model.get_layer("res3d_out").output),
("res3d_out_depth", model.keras_model.get_layer("res3d_out_depth").output),
("res4w_out", model.keras_model.get_layer("res4w_out").output),
("res4w_out_depth", model.keras_model.get_layer("res4w_out_depth").output),
("res5c_out", model.keras_model.get_layer("res5c_out").output),
("res5c_out_depth", model.keras_model.get_layer("res5c_out_depth").output),
# FPN
# ("fpn_p5", model.keras_model.get_layer("fpn_p5").output),
# ("fpn_p5_depth", model.keras_model.get_layer("fpn_p5_depth").output),
###################
("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
("roi", model.keras_model.get_layer("ROI").output),
###################
# ("activation_143", model.keras_model.get_layer("activation_143").output),
])
# Images
display_images(np.transpose(activations["input_image"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_input_image.png", bbox_inches='tight')
display_images(np.transpose(activations["input_depth_image"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_input_depth_image.png", bbox_inches='tight')
# Backbone feature map
display_images(np.transpose(activations["res2c_out"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_res2c_out.png", bbox_inches='tight')
display_images(np.transpose(activations["res2c_out_depth"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_res2c_out_depth.png", bbox_inches='tight')
display_images(np.transpose(activations["res3d_out"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_res3d_out.png", bbox_inches='tight')
display_images(np.transpose(activations["res3d_out_depth"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_res3d_out_depth.png", bbox_inches='tight')
display_images(np.transpose(activations["res4w_out"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_res4w_out.png", bbox_inches='tight')
display_images(np.transpose(activations["res4w_out_depth"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_res4w_out_depth.png", bbox_inches='tight')
display_images(np.transpose(activations["res5c_out"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_res5c_out.png", bbox_inches='tight')
display_images(np.transpose(activations["res5c_out_depth"][0, :, :, :4], [2, 0, 1]), cols=4)
plt.savefig(os.getcwd() + save_to_folder + "activations/activations_res5c_out_depth.png", bbox_inches='tight')
### display_images(np.transpose(activations["fpn_p5"][0, :, :, :4], [2, 0, 1]), cols=4)
### plt.savefig(os.getcwd() + save_to_folder + "activations/activations_fpn_p5.png", bbox_inches='tight')
### display_images(np.transpose(activations["fpn_p5_depth"][0, :, :, :4], [2, 0, 1]), cols=4)
### plt.savefig(os.getcwd() + save_to_folder + "activations/activations_fpn_p5_depth.png", bbox_inches='tight')
### display_images(np.transpose(activations["activation_143"][0, :, :, :, 5], [2, 0, 1]), cols=4)
### plt.savefig(os.getcwd() + save_to_folder + "activations/activations_activation_143.png", bbox_inches='tight')
########################
# detect
########################
for idx_samples in range(4):
print('\n --------------- detect ---------------')
# for image_id in dataset.image_ids:
image_ids = np.random.choice(len(dataset.image_ids), size=16)
# Load the image multiple times to show augmentations
limit = 4
ax = get_ax(rows=int(np.sqrt(limit)), cols=int(np.sqrt(limit)))
for i in range(limit):
# load images
image_id = image_ids[i]
image, depthimage, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_images_gt(dataset, config, image_id, use_mini_mask=False)
######################
# configure depth
######################
depthimage[np.isnan(depthimage)] = 0
depthimage[depthimage == -np.inf] = 0
depthimage[depthimage == np.inf] = 0
# convert to 8-bit image
# depthimage = depthimage * (2 ** 16 -1) / np.max(depthimage) ### 16 bit
depthimage = depthimage * (2 ** 8 - 1) / np.max(depthimage) ### 8 bit
depthimage = np.array(depthimage, dtype=np.uint8)
# print("depthimage min: ", np.min(np.array(depthimage)))
# print("depthimage max: ", np.max(np.array(depthimage)))
#
# print("depthimage type: ", depthimage.dtype)
# print("depthimage shape: ", depthimage.shape)
# run detect
results = model.detectWdepth([image], [depthimage], verbose=1)
r = results[0]
class_ids = r['class_ids'] - 1
# plot
visualize.display_instances(image, r['rois'], r['masks'], class_ids, dataset.class_names, r['scores'],
ax=ax[i // int(np.sqrt(limit)), i % int(np.sqrt(limit))],
title="Predictions", show_bbox=True, show_mask=True)
plt.savefig(os.getcwd() + save_to_folder + "gt_affordance_labels/gt_affordance_labels_" + np.str(idx_samples) + ".png", bbox_inches='tight')
########################
# RPN
########################
print('\n --------------- RPNs ---------------')
limit = 10
# Get anchors and convert to pixel coordinates
anchors = model.get_anchors(image.shape)
anchors = utils.denorm_boxes(anchors, image.shape[:2])
log("anchors", anchors)
# Generate RPN trainig targets
# target_rpn_match is 1 for positive anchors, -1 for negative anchors
# and 0 for neutral anchors.
target_rpn_match, target_rpn_bbox = modellib.build_rpn_targets(
image.shape, anchors, gt_class_id, gt_bbox, model.config)
log("target_rpn_match", target_rpn_match)
log("target_rpn_bbox", target_rpn_bbox)
positive_anchor_ix = np.where(target_rpn_match[:] == 1)[0]
negative_anchor_ix = np.where(target_rpn_match[:] == -1)[0]
neutral_anchor_ix = np.where(target_rpn_match[:] == 0)[0]
positive_anchors = anchors[positive_anchor_ix]
negative_anchors = anchors[negative_anchor_ix]
neutral_anchors = anchors[neutral_anchor_ix]
log("positive_anchors", positive_anchors)
log("negative_anchors", negative_anchors)
log("neutral anchors", neutral_anchors)
# Apply refinement deltas to positive anchors
refined_anchors = utils.apply_box_deltas(
positive_anchors,
target_rpn_bbox[:positive_anchors.shape[0]] * model.config.RPN_BBOX_STD_DEV)
log("refined_anchors", refined_anchors, )
# Display positive anchors before refinement (dotted) and
# after refinement (solid).
visualize.draw_boxes(
image, ax=get_ax(),
boxes=positive_anchors,
refined_boxes=refined_anchors)
# plt.savefig(os.getcwd() + save_to_folder + "anchors_positive.png", bbox_inches='tight')
# Run RPN sub-graph
pillar = model.keras_model.get_layer("ROI").output # node to start searching from
# TF 1.4 and 1.9 introduce new versions of NMS. Search for all names to support TF 1.3~1.10
nms_node = model.ancestor(pillar, "ROI/rpn_non_max_suppression:0")
if nms_node is None:
nms_node = model.ancestor(pillar, "ROI/rpn_non_max_suppression/NonMaxSuppressionV2:0")
if nms_node is None: # TF 1.9-1.10
nms_node = model.ancestor(pillar, "ROI/rpn_non_max_suppression/NonMaxSuppressionV3:0")
rpn = model.run_graph([image], [depthimage], [
("rpn_class", model.keras_model.get_layer("rpn_class").output),
("pre_nms_anchors", model.ancestor(pillar, "ROI/pre_nms_anchors:0")),
("refined_anchors", model.ancestor(pillar, "ROI/refined_anchors:0")),
("refined_anchors_clipped", model.ancestor(pillar, "ROI/refined_anchors_clipped:0")),
("post_nms_anchor_ix", nms_node),
("proposals", model.keras_model.get_layer("ROI").output),
], image_metas=image_meta[np.newaxis])
# Show top anchors by score (before refinement)
sorted_anchor_ids = np.argsort(rpn['rpn_class'][:, :, 1].flatten())[::-1]
visualize.draw_boxes(image, boxes=anchors[sorted_anchor_ids[:limit]], ax=get_ax())
# plt.savefig(os.getcwd() + save_to_folder + "anchors_top.png", bbox_inches='tight')
# Show top anchors with refinement. Then with clipping to image boundaries
ax = get_ax(1, 2)
pre_nms_anchors = utils.denorm_boxes(rpn["pre_nms_anchors"][0], image.shape[:2])
refined_anchors = utils.denorm_boxes(rpn["refined_anchors"][0], image.shape[:2])
refined_anchors_clipped = utils.denorm_boxes(rpn["refined_anchors_clipped"][0], image.shape[:2])
visualize.draw_boxes(image, boxes=pre_nms_anchors[:limit],
refined_boxes=refined_anchors[:limit], ax=ax[0])
visualize.draw_boxes(image, refined_boxes=refined_anchors_clipped[:limit], ax=ax[1])
# plt.savefig(os.getcwd() + save_to_folder + "anchors_refinement.png", bbox_inches='tight')
# Show final proposals
# These are the same as the previous step (refined anchors
# after NMS) but with coordinates normalized to [0, 1] range.
# Convert back to image coordinates for display
# h, w = config.IMAGE_SHAPE[:2]
# proposals = rpn['proposals'][0, :limit] * np.array([h, w, h, w])
visualize.draw_boxes(
image, ax=get_ax(),
refined_boxes=utils.denorm_boxes(rpn['proposals'][0, :limit], image.shape[:2]))
# plt.savefig(os.getcwd() + save_to_folder + "final_proposals.png", bbox_inches='tight')
#############################
# Proposal Classification
#############################
print('\n --------------- Proposal Classification ---------------')
# Get input and output to classifier and mask heads.
mrcnn = model.run_graph([image], [depthimage], [
("proposals", model.keras_model.get_layer("ROI").output),
("probs", model.keras_model.get_layer("mrcnn_class").output),
("deltas", model.keras_model.get_layer("mrcnn_bbox").output),
("masks", model.keras_model.get_layer("mrcnn_mask").output),
("detections", model.keras_model.get_layer("mrcnn_detection").output),
])
# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
print("det_class_ids: ", det_class_ids)
# det_count = np.where(det_class_ids != 0)[0][0]
det_count = len(np.where(det_class_ids != 0)[0])
det_class_ids = det_class_ids[:det_count]
detections = mrcnn['detections'][0, :det_count]
print("{} detections: {}".format(
det_count, np.array(dataset.class_names)[det_class_ids]))
captions = ["{} {:.3f}".format(dataset.class_names[int(c)], s) if c > 0 else ""
for c, s in zip(detections[:, 4], detections[:, 5])]
visualize.draw_boxes(
image,
refined_boxes=utils.denorm_boxes(detections[:, :4], image.shape[:2]),
visibilities=[2] * len(detections),
captions=captions, title="Detections",
ax=get_ax())
# Proposals are in normalized coordinates
proposals = mrcnn["proposals"][0]
# Class ID, score, and mask per proposal
roi_class_ids = np.argmax(mrcnn["probs"][0], axis=1)
roi_scores = mrcnn["probs"][0, np.arange(roi_class_ids.shape[0]), roi_class_ids]
roi_class_names = np.array(dataset.class_names)[roi_class_ids]
roi_positive_ixs = np.where(roi_class_ids > 0)[0]
# How many ROIs vs empty rows?
print("{} Valid proposals out of {}".format(np.sum(np.any(proposals, axis=1)), proposals.shape[0]))
print("{} Positive ROIs".format(len(roi_positive_ixs)))
# Class counts
print(list(zip(*np.unique(roi_class_names, return_counts=True))))
# Display a random sample of proposals.
# Proposals classified as background are dotted, and
# the rest show their class and confidence score.
limit = 200
ixs = np.random.randint(0, proposals.shape[0], limit)
captions = ["{} {:.3f}".format(dataset.class_names[c], s) if c > 0 else ""
for c, s in zip(roi_class_ids[ixs], roi_scores[ixs])]
visualize.draw_boxes(
image,
boxes=utils.denorm_boxes(proposals[ixs], image.shape[:2]),
visibilities=np.where(roi_class_ids[ixs] > 0, 2, 1),
captions=captions, title="ROIs Before Refinement",
ax=get_ax())
# plt.savefig(os.getcwd() + save_to_folder + "rois_before_refinement.png", bbox_inches='tight')
# Class-specific bounding box shifts.
roi_bbox_specific = mrcnn["deltas"][0, np.arange(proposals.shape[0]), roi_class_ids]
log("roi_bbox_specific", roi_bbox_specific)
# Apply bounding box transformations
# Shape: [N, (y1, x1, y2, x2)]
refined_proposals = utils.apply_box_deltas(
proposals, roi_bbox_specific * config.BBOX_STD_DEV)
log("refined_proposals", refined_proposals)
# Show positive proposals
# ids = np.arange(roi_boxes.shape[0]) # Display all
limit = 5
ids = np.random.randint(0, len(roi_positive_ixs), limit) # Display random sample
captions = ["{} {:.3f}".format(dataset.class_names[c], s) if c > 0 else ""
for c, s in zip(roi_class_ids[roi_positive_ixs][ids], roi_scores[roi_positive_ixs][ids])]
visualize.draw_boxes(
image, ax=get_ax(),
boxes=utils.denorm_boxes(proposals[roi_positive_ixs][ids], image.shape[:2]),
refined_boxes=utils.denorm_boxes(refined_proposals[roi_positive_ixs][ids], image.shape[:2]),
visibilities=np.where(roi_class_ids[roi_positive_ixs][ids] > 0, 1, 0),
captions=captions, title="ROIs After Refinement")
# plt.savefig(os.getcwd() + save_to_folder + "rois_after_refinement.png", bbox_inches='tight')
# Remove boxes classified as background
keep = np.where(roi_class_ids > 0)[0]
print("Keep {} detections:\n{}".format(keep.shape[0], keep))
# Remove low confidence detections
keep = np.intersect1d(keep, np.where(roi_scores >= config.DETECTION_MIN_CONFIDENCE)[0])
print("Remove boxes below {} confidence. Keep {}:\n{}".format(
config.DETECTION_MIN_CONFIDENCE, keep.shape[0], keep))
# Apply per-class non-max suppression
pre_nms_boxes = refined_proposals[keep]
pre_nms_scores = roi_scores[keep]
pre_nms_class_ids = roi_class_ids[keep]
nms_keep = []
for class_id in np.unique(pre_nms_class_ids):
# Pick detections of this class
ixs = np.where(pre_nms_class_ids == class_id)[0]
# Apply NMS
class_keep = utils.non_max_suppression(pre_nms_boxes[ixs],
pre_nms_scores[ixs],
config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = keep[ixs[class_keep]]
nms_keep = np.union1d(nms_keep, class_keep)
print("{:22}: {} -> {}".format(dataset.class_names[class_id][:20],
keep[ixs], class_keep))
keep = np.intersect1d(keep, nms_keep).astype(np.int32)
print("\nKept after per-class NMS: {}\n{}".format(keep.shape[0], keep))
# Show final detections
ixs = np.arange(len(keep)) # Display all
# ixs = np.random.randint(0, len(keep), 10) # Display random sample
captions = ["{} {:.3f}".format(dataset.class_names[c], s) if c > 0 else ""
for c, s in zip(roi_class_ids[keep][ixs], roi_scores[keep][ixs])]
visualize.draw_boxes(
image,
boxes=utils.denorm_boxes(proposals[keep][ixs], image.shape[:2]),
refined_boxes=utils.denorm_boxes(refined_proposals[keep][ixs], image.shape[:2]),
visibilities=np.where(roi_class_ids[keep][ixs] > 0, 1, 0),
captions=captions, title="Detections after NMS",
ax=get_ax())
plt.savefig(os.getcwd() + save_to_folder + "rois_after_nms/rois_after_nms_" + np.str(idx_samples) + ".png", bbox_inches='tight')
###############
# MASKS
###############
print('\n --------------- MASKS ---------------')
limit = 8
display_images(np.transpose(gt_mask[..., :limit], [2, 0, 1]), cmap="Blues")
# Get predictions of mask head
mrcnn = model.run_graph([image], [depthimage], [
("detections", model.keras_model.get_layer("mrcnn_detection").output),
("masks", model.keras_model.get_layer("mrcnn_mask").output),
])
# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
# det_count = np.where(det_class_ids == 0)[0][0]
det_count = len(np.where(det_class_ids != 0)[0])
det_class_ids = det_class_ids[:det_count]
print("{} detections: {}".format(
det_count, | np.array(dataset.class_names) | numpy.array |
#!/usr/bin/env python3
'''
LSTM RNN Model Class
'''
import sys
import random
import numpy as np
import tensorflow.keras as keras
from tensorflow.keras import layers
class Model(object):
'''
This portion is modeled from Chapter 8 (Text Generation with LSTM) in the book:
"Deep Learning with Python" - <NAME>
'''
def __init__(self, rnnSize, rnnLoss, rnnActivation, seqLen, vocabSize):
'''
Model Creation
- using keras sequential model
- adds a LSTM layer wtih rnnSize (default is 128), and input shape that is determined
by seqLen (default 40) and vocabSize (default from data is 27)
- adds a Dense layer with input size of vocabSize and uses 'softmax' activation
- optimizer uses RMSprop (root mean square propogation)
- compiles model using 'categorical crossentropy' loss function
'''
self.model = keras.models.Sequential()
self.model.add(layers.LSTM(rnnSize, input_shape=(seqLen, vocabSize)))
self.model.add(layers.Dense(vocabSize, activation=rnnActivation))
self.optimizer = keras.optimizers.RMSprop(lr=0.01)
self.model.compile(loss=rnnLoss, optimizer=self.optimizer)
def sample(self, pred, temperature=1.0):
'''
Sample Function
- takes in probabily distribution from the model, reweights the distribution and
selects the next character index to use
'''
pred = | np.asarray(pred) | numpy.asarray |
import numpy as np
from envs.particle.core import World, Agent, Landmark
from envs.particle.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self, args):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = getattr(args, "num_agents", 3)
num_landmarks = getattr(args, "num_landmarks", 3)
world.collaborative = True
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.15
agent.dead = False
agent.view_radius = getattr(args, "agent_view_radius", -1)
print("AGENT VIEW RADIUS set to: {}".format(agent.view_radius))
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.35, 0.85])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum( | np.square(a.state.p_pos - l.state.p_pos) | numpy.square |
import h5py
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import keras
import h5py
import numpy as np
from keras.layers import Input, Dense, Conv1D, MaxPooling2D, MaxPooling1D, BatchNormalization
from keras.layers.core import Dropout, Activation, Flatten
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from keras.utils import multi_gpu_model
from keras.regularizers import l1,l2, l1_l2
from keras.constraints import MaxNorm
from keras.optimizers import SGD
from keras.activations import relu
import os
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
input_bp = 600
batch_size=128
seqInput = Input(shape=(8, 4), name='seqInput')
seq = Conv1D(3, 5)(seqInput)
seq = Activation('relu')(seq)
seq = MaxPooling1D(2)(seq)
seq = Conv1D(1, 2)(seq)
seq = Activation('sigmoid')(seq)
seq = Flatten()(seq)
model = Model(inputs = [seqInput], outputs = [seq])
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
#from keras.optimizers import RMSprop
model.compile('adam', loss='binary_crossentropy', metrics=['accuracy'])
PWM0 = np.loadtxt('PWM0')
PWM1 = np.loadtxt('PWM1')
PWM = np.ones(PWM1.shape)*0.25
def pwm_to_sample(PWM, n = 1000):
PWM /= PWM.sum(axis=0)
PWM = PWM.T
PWM = PWM[::-1,:]
PWM = PWM[:,::-1]
sample = np.zeros((n,PWM.shape[0],PWM.shape[1]))
for i in range(n):
for j in range(sample.shape[1]):
sample[i,j,np.random.choice(4,1,p=PWM[j,:])] = 1
return sample
sp0 = pwm_to_sample(PWM0)
sp1 = pwm_to_sample(PWM1)
spn = pwm_to_sample(PWM,n=2000)
sp = np.concatenate([sp0,sp1,spn],axis=0)
label = np.r_[ | np.ones(2000) | numpy.ones |
# -*- coding: utf-8 -*-
"""h4_2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1OM6DjnR0yQBTljAjxNZRpbzCU7tS6Pv6
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from numpy import shape
from numpy import mat
from numpy import arange
from matplotlib.colors import ListedColormap
"""Q1.Generate two clusters of data points with 100 points each, by sampling from Gaussian distributions centered at (0.5, 0.5) and (−0.5, −0.5)."""
cmap_bold = ListedColormap(['darkorange', 'c'])
standard_deviation = [[0.1,0],[0,0.1]]
mean1 = [0.5,0.5]
data1 = np.random.multivariate_normal(mean1, standard_deviation,100)
mean2 = [-0.5,-0.5]
data2 = np.random.multivariate_normal(mean2, standard_deviation, 100)
plt.ylim(-1.0, 1.0)
plt.xlim(-1.0, 1.0)
plt.scatter(data1[:, 0], data1[:, 1], c='blue')
plt.scatter(data2[:, 0], data2[:, 1], c='red')
plt.show()
"""Q2.Implement the Perceptron algorithm as discussed in class. Choose the initial weights to be zero and the maximum number of epochs as T = 100, and the learning rate α = 1. How quickly does your implementation converge?"""
# add label to data1, data2
d1 = np.insert(data1, 2, values=1, axis=1)
d2 = np.insert(data2, 2, values=-1, axis=1)
data = np.vstack((d1, d2))
X = data[:, :-1] # except last col
Y = data[:, -1] # last col
# split data into 4 parts
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.30, random_state=43)
# print(X_test[0].shape)
# print(Y_test.shape)
# initial w to 1 row 2 cols [w1, w2]
W = np.zeros((1, 2))
def perceptron(W, X_train, X_test, Y_train, Y_test, epochs, learning_rate):
# loop and compare
for epoch in range(epochs):
for idx in range(len(X_train)):
# update weights if the prediction is wrong
# W [1,2] * [2,1] we need to use np.dot to do matrix calculation but not *
if (np.dot(np.sign(np.dot(W, X_train[idx])), Y_train[idx])) <= 0:
W = W + np.dot(learning_rate, | np.dot(Y_train[idx], X_train[idx].T) | numpy.dot |
"""
Forest of trees-based ensemble methods for Uplift modeling on Classification
Problem. Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``UpliftRandomForestClassifier`` base class implements different
variants of uplift models based on random forest, with 'fit' and 'predict'
method.
- The ``UpliftTreeClassifier`` base class implements the uplift trees (without
Bootstraping for random forest), this class is called within
``UpliftRandomForestClassifier`` for constructing random forest.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
from __future__ import print_function
from collections import defaultdict
import numpy as np
import scipy.stats as stats
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import clone
from sklearn.calibration import CalibratedClassifierCV
from sklearn.utils.testing import ignore_warnings
class DecisionTree:
""" Tree Node Class
Tree node class to contain all the statistics of the tree node.
Parameters
----------
col : int, optional (default = -1)
The column index for splitting the tree node to children nodes.
value : float, optional (default = None)
The value of the feature column to split the tree node to children nodes.
trueBranch : object of DecisionTree
The true branch tree node (feature > value).
falseBranch : object of DecisionTree
The flase branch tree node (feature > value).
results : dictionary
The classification probability Pr(1) for each experiment group in the tree node.
summary : dictionary
Summary statistics of the tree nodes, including impurity, sample size, uplift score, etc.
maxDiffTreatment : string
The treatment name generating the maximum difference between treatment and control group.
maxDiffSign : float
The sign of the maxium difference (1. or -1.).
nodeSummary : dictionary
Summary statistics of the tree nodes {treatment: [y_mean, n]}, where y_mean stands for the target metric mean
and n is the sample size.
backupResults : dictionary
The conversion proabilities in each treatment in the parent node {treatment: y_mean}. The parent node
information is served as a backup for the children node, in case no valid statistics can be calculated from the
children node, the parent node information will be used in certain cases.
bestTreatment : string
The treatment name providing the best uplift (treatment effect).
upliftScore : list
The uplift score of this node: [max_Diff, p_value], where max_Diff stands for the maxium treatment effect, and
p_value stands for the p_value of the treatment effect.
matchScore : float
The uplift score by filling a trained tree with validation dataset or testing dataset.
"""
def __init__(self, col=-1, value=None, trueBranch=None, falseBranch=None,
results=None, summary=None, maxDiffTreatment=None,
maxDiffSign=1., nodeSummary=None, backupResults=None,
bestTreatment=None, upliftScore=None, matchScore=None):
self.col = col
self.value = value
self.trueBranch = trueBranch
self.falseBranch = falseBranch
self.results = results # None for nodes, not None for leaves
self.summary = summary
# the treatment with max( |p(y|treatment) - p(y|control)| )
self.maxDiffTreatment = maxDiffTreatment
# the sign for p(y|maxDiffTreatment) - p(y|control)
self.maxDiffSign = maxDiffSign
self.nodeSummary = nodeSummary
self.backupResults = backupResults
self.bestTreatment = bestTreatment
self.upliftScore = upliftScore
# match actual treatment for validation and testing
self.matchScore = matchScore
# Uplift Tree Classifier
class UpliftTreeClassifier:
""" Uplift Tree Classifier for Classification Task.
A uplift tree classifier estimates the individual treatment effect by modifying the loss function in the
classification trees.
The uplift tree classifer is used in uplift random forest to construct the trees in the forest.
Parameters
----------
evaluationFunction : string
Choose from one of the models: 'KL', 'ED', 'Chi', 'CTS'.
max_features: int, optional (default=10)
The number of features to consider when looking for the best split.
max_depth: int, optional (default=5)
The maximum depth of the tree.
min_samples_leaf: int, optional (default=100)
The minimum number of samples required to be split at a leaf node.
min_samples_treatment: int, optional (default=10)
The minimum number of samples required of the experiment group to be split at a leaf node.
n_reg: int, optional (default=10)
The regularization parameter defined in Rzepakowski et al. 2012, the weight (in terms of sample size) of the
parent node influence on the child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.
control_name: string
The name of the control group (other experiment groups will be regarded as treatment groups)
normalization: boolean, optional (default=True)
The normalization factor defined in Rzepakowski et al. 2012, correcting for tests with large number of splits
and imbalanced treatment and control splits
"""
def __init__(self, max_features=None, max_depth=3, min_samples_leaf=100,
min_samples_treatment=10, n_reg=100, evaluationFunction='KL',
control_name=None, normalization=True):
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.min_samples_treatment = min_samples_treatment
self.n_reg = n_reg
self.max_features = max_features
if evaluationFunction == 'KL':
self.evaluationFunction = self.evaluate_KL
elif evaluationFunction == 'ED':
self.evaluationFunction = self.evaluate_ED
elif evaluationFunction == 'Chi':
self.evaluationFunction = self.evaluate_Chi
else:
self.evaluationFunction = self.evaluate_CTS
self.fitted_uplift_tree = None
self.control_name = control_name
self.normalization = normalization
def fit(self, X, treatment, y):
""" Fit the uplift model.
Args
----
X : ndarray, shape = [num_samples, num_features]
An ndarray of the covariates used to train the uplift model.
treatment : array-like, shape = [num_samples]
An array containing the treatment group for each unit.
y : array-like, shape = [num_samples]
An array containing the outcome of interest for each unit.
Returns
-------
self : object
"""
assert len(X) == len(y) and len(X) == len(treatment), 'Data length must be equal for X, treatment, and y.'
rows = [list(X[i]) + [treatment[i]] + [y[i]] for i in range(len(X))]
resTree = self.growDecisionTreeFrom(
rows, evaluationFunction=self.evaluationFunction,
max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf,
depth=1, min_samples_treatment=self.min_samples_treatment,
n_reg=self.n_reg, parentNodeSummary=None
)
self.fitted_uplift_tree = resTree
return self
# Prune Trees
def prune(self, X, treatment, y, minGain=0.0001, rule='maxAbsDiff'):
""" Prune the uplift model.
Args
----
X : ndarray, shape = [num_samples, num_features]
An ndarray of the covariates used to train the uplift model.
treatment : array-like, shape = [num_samples]
An array containing the treatment group for each unit.
y : array-like, shape = [num_samples]
An array containing the outcome of interest for each unit.
minGain : float, optional (default = 0.0001)
The minimum gain required to make a tree node split. The children
tree branches are trimmed if the actual split gain is less than
the minimum gain.
rule : string, optional (default = 'maxAbsDiff')
The prune rules. Supported values are 'maxAbsDiff' for optimizing
the maximum absolute difference, and 'bestUplift' for optimizing
the node-size weighted treatment effect.
Returns
-------
self : object
"""
assert len(X) == len(y) and len(X) == len(treatment), 'Data length must be equal for X, treatment, and y.'
rows = [list(X[i]) + [treatment[i]] + [y[i]] for i in range(len(X))]
self.pruneTree(rows,
tree=self.fitted_uplift_tree,
rule=rule,
minGain=minGain,
evaluationFunction=self.evaluationFunction,
notify=False,
n_reg=self.n_reg,
parentNodeSummary=None)
return self
def pruneTree(self, rows, tree, rule='maxAbsDiff', minGain=0.,
evaluationFunction=None, notify=False, n_reg=0,
parentNodeSummary=None):
"""Prune one single tree node in the uplift model.
Args
----
X : ndarray, shape = [num_samples, num_features]
An ndarray of the covariates used to train the uplift model.
treatment : array-like, shape = [num_samples]
An array containing the treatment group for each unit.
y : array-like, shape = [num_samples]
An array containing the outcome of interest for each unit.
minGain : float, optional (default = 0.0001)
The minimum gain required to make a tree node split. The children tree branches are trimmed if the actual
split gain is less than the minimum gain.
rule : string, optional (default = 'maxAbsDiff')
The prune rules. Supported values are 'maxAbsDiff' for optimizing the maximum absolute difference, and
'bestUplift' for optimizing the node-size weighted treatment effect.
Returns
-------
self : object
"""
# Current Node Summary for Validation Data Set
currentNodeSummary = self.tree_node_summary(
rows, min_samples_treatment=self.min_samples_treatment,
n_reg=n_reg, parentNodeSummary=parentNodeSummary
)
tree.nodeSummary = currentNodeSummary
# Divide sets for child nodes
(set1, set2) = self.divideSet(rows, tree.col, tree.value)
# recursive call for each branch
if tree.trueBranch.results is None:
self.pruneTree(set1, tree.trueBranch, rule, minGain,
evaluationFunction, notify, n_reg,
parentNodeSummary=currentNodeSummary)
if tree.falseBranch.results is None:
self.pruneTree(set2, tree.falseBranch, rule, minGain,
evaluationFunction, notify, n_reg,
parentNodeSummary=currentNodeSummary)
# merge leaves (potentionally)
if (tree.trueBranch.results is not None and
tree.falseBranch.results is not None):
if rule == 'maxAbsDiff':
# Current D
if (tree.maxDiffTreatment in currentNodeSummary and
self.control_name in currentNodeSummary):
currentScoreD = tree.maxDiffSign * (currentNodeSummary[tree.maxDiffTreatment][0]
- currentNodeSummary[self.control_name][0])
else:
currentScoreD = 0
# trueBranch D
trueNodeSummary = self.tree_node_summary(
set1, min_samples_treatment=self.min_samples_treatment,
n_reg=n_reg, parentNodeSummary=currentNodeSummary
)
if (tree.trueBranch.maxDiffTreatment in trueNodeSummary and
self.control_name in trueNodeSummary):
trueScoreD = tree.trueBranch.maxDiffSign * (trueNodeSummary[tree.trueBranch.maxDiffTreatment][0]
- trueNodeSummary[self.control_name][0])
trueScoreD = (
trueScoreD
* (trueNodeSummary[tree.trueBranch.maxDiffTreatment][1]
+ trueNodeSummary[self.control_name][1])
/ (currentNodeSummary[tree.trueBranch.maxDiffTreatment][1]
+ currentNodeSummary[self.control_name][1])
)
else:
trueScoreD = 0
# falseBranch D
falseNodeSummary = self.tree_node_summary(
set2, min_samples_treatment=self.min_samples_treatment,
n_reg=n_reg, parentNodeSummary=currentNodeSummary
)
if (tree.falseBranch.maxDiffTreatment in falseNodeSummary and
self.control_name in falseNodeSummary):
falseScoreD = (
tree.falseBranch.maxDiffSign *
(falseNodeSummary[tree.falseBranch.maxDiffTreatment][0]
- falseNodeSummary[self.control_name][0])
)
falseScoreD = (
falseScoreD *
(falseNodeSummary[tree.falseBranch.maxDiffTreatment][1]
+ falseNodeSummary[self.control_name][1])
/ (currentNodeSummary[tree.falseBranch.maxDiffTreatment][1]
+ currentNodeSummary[self.control_name][1])
)
else:
falseScoreD = 0
if ((trueScoreD + falseScoreD) - currentScoreD <= minGain or
(trueScoreD + falseScoreD < 0.)):
tree.trueBranch, tree.falseBranch = None, None
tree.results = tree.backupResults
elif rule == 'bestUplift':
# Current D
if (tree.bestTreatment in currentNodeSummary and
self.control_name in currentNodeSummary):
currentScoreD = (
currentNodeSummary[tree.bestTreatment][0]
- currentNodeSummary[self.control_name][0]
)
else:
currentScoreD = 0
# trueBranch D
trueNodeSummary = self.tree_node_summary(
set1, min_samples_treatment=self.min_samples_treatment,
n_reg=n_reg, parentNodeSummary=currentNodeSummary
)
if (tree.trueBranch.bestTreatment in trueNodeSummary and
self.control_name in trueNodeSummary):
trueScoreD = (
trueNodeSummary[tree.trueBranch.bestTreatment][0]
- trueNodeSummary[self.control_name][0]
)
else:
trueScoreD = 0
# falseBranch D
falseNodeSummary = self.tree_node_summary(
set2, min_samples_treatment=self.min_samples_treatment,
n_reg=n_reg, parentNodeSummary=currentNodeSummary
)
if (tree.falseBranch.bestTreatment in falseNodeSummary and
self.control_name in falseNodeSummary):
falseScoreD = (
falseNodeSummary[tree.falseBranch.bestTreatment][0]
- falseNodeSummary[self.control_name][0]
)
else:
falseScoreD = 0
gain = ((1. * len(set1) / len(rows) * trueScoreD
+ 1. * len(set2) / len(rows) * falseScoreD)
- currentScoreD)
if gain <= minGain or (trueScoreD + falseScoreD < 0.):
tree.trueBranch, tree.falseBranch = None, None
tree.results = tree.backupResults
return self
def fill(self, X, treatment, y):
""" Fill the data into an existing tree.
This is a higher-level function to transform the original data inputs
into lower level data inputs (list of list and tree).
Args
----
X : ndarray, shape = [num_samples, num_features]
An ndarray of the covariates used to train the uplift model.
treatment : array-like, shape = [num_samples]
An array containing the treatment group for each unit.
y : array-like, shape = [num_samples]
An array containing the outcome of interest for each unit.
Returns
-------
self : object
"""
assert len(X) == len(y) and len(X) == len(treatment), 'Data length must be equal for X, treatment, and y.'
rows = [list(X[i]) + [treatment[i]] + [y[i]] for i in range(len(X))]
self.fillTree(rows, tree=self.fitted_uplift_tree)
return self
def fillTree(self, rows, tree):
""" Fill the data into an existing tree.
This is a lower-level function to execute on the tree filling task.
Args
----
rows : list of list
The internal data format for the training data (combining X, Y, treatment).
tree : object
object of DecisionTree class
Returns
-------
self : object
"""
# Current Node Summary for Validation Data Set
currentNodeSummary = self.tree_node_summary(rows, min_samples_treatment=0, n_reg=0, parentNodeSummary=None)
tree.nodeSummary = currentNodeSummary
# Divide sets for child nodes
(set1, set2) = self.divideSet(rows, tree.col, tree.value)
# recursive call for each branch
if tree.trueBranch is not None:
self.fillTree(set1, tree.trueBranch)
if tree.falseBranch is not None:
self.fillTree(set2, tree.falseBranch)
# Update Information
# matchScore
matchScore = (currentNodeSummary[tree.bestTreatment][0] - currentNodeSummary[self.control_name][0])
tree.matchScore = round(matchScore, 4)
tree.summary['matchScore'] = round(matchScore, 4)
# Samples, Group_size
tree.summary['samples'] = len(rows)
tree.summary['group_size'] = ''
for treatment_group in currentNodeSummary:
tree.summary['group_size'] += ' ' + treatment_group + ': ' + str(currentNodeSummary[treatment_group][1])
# classProb
if tree.results is not None:
tree.results = self.uplift_classification_results(rows)
return self
def predict(self, X, full_output=False):
'''
Returns the recommended treatment group and predicted optimal
probability conditional on using the recommended treatment group.
Args
----
X : ndarray, shape = [num_samples, num_features]
An ndarray of the covariates used to train the uplift model.
full_output : bool, optional (default=False)
Whether the UpliftTree algorithm returns upliftScores, pred_nodes
alongside the recommended treatment group and p_hat in the treatment group.
Returns
-------
df_res : DataFrame, shape = [num_samples, (num_treatments + 1)]
A DataFrame containing the predicted delta in each treatment group,
the best treatment group and the maximum delta.
'''
p_hat_optimal = []
treatment_optimal = []
pred_nodes = {}
upliftScores = []
for xi in range(len(X)):
pred_leaf, upliftScore = self.classify(X[xi], self.fitted_uplift_tree, dataMissing=False)
# Predict under uplift optimal treatment
opt_treat = max(pred_leaf, key=pred_leaf.get)
p_hat_optimal.append(pred_leaf[opt_treat])
treatment_optimal.append(opt_treat)
if full_output:
if xi == 0:
for key_i in pred_leaf:
pred_nodes[key_i] = [pred_leaf[key_i]]
else:
for key_i in pred_leaf:
pred_nodes[key_i].append(pred_leaf[key_i])
upliftScores.append(upliftScore)
if full_output:
return treatment_optimal, p_hat_optimal, upliftScores, pred_nodes
else:
return treatment_optimal, p_hat_optimal
def divideSet(self, rows, column, value):
'''
Tree node split.
Args
----
rows : list of list
The internal data format.
column : int
The column used to split the data.
value : float or int
The value in the column for splitting the data.
Returns
-------
(list1, list2) : list of list
The left node (list of data) and the right node (list of data).
'''
splittingFunction = None
# for int and float values
if isinstance(value, int) or isinstance(value, float):
splittingFunction = lambda row: row[column] >= value
else: # for strings
splittingFunction = lambda row: row[column] == value
list1 = [row for row in rows if splittingFunction(row)]
list2 = [row for row in rows if not splittingFunction(row)]
return (list1, list2)
def group_uniqueCounts(self, rows):
'''
Count sample size by experiment group.
Args
----
rows : list of list
The internal data format.
Returns
-------
results : dictionary
The control and treatment sample size.
'''
results = {}
for row in rows:
# treatment group in the 2nd last column
r = row[-2]
if r not in results:
results[r] = {0: 0, 1: 0}
results[r][row[-1]] += 1
return results
@staticmethod
def kl_divergence(pk, qk):
'''
Calculate KL Divergence for binary classification.
sum(np.array(pk) * np.log(np.array(pk) / np.array(qk)))
Args
----
pk : float
The probability of 1 in one distribution.
qk : float
The probability of 1 in the other distribution.
Returns
-------
S : float
The KL divergence.
'''
if qk < 0.1**6:
qk = 0.1**6
elif qk > 1-0.1**6:
qk = 1-0.1**6
S = pk * np.log(pk / qk) + (1-pk) * np.log((1-pk) / (1-qk))
return S
def evaluate_KL(self, nodeSummary, control_name):
'''
Calculate KL Divergence as split evaluation criterion for a given node.
Args
----
nodeSummary : dictionary
The tree node summary statistics, produced by tree_node_summary()
method.
control_name : string
The control group name.
Returns
-------
d_res : KL Divergence
'''
if control_name not in nodeSummary:
return 0
pc = nodeSummary[control_name][0]
d_res = 0
for treatment_group in nodeSummary:
if treatment_group != control_name:
d_res += self.kl_divergence(nodeSummary[treatment_group][0], pc)
return d_res
@staticmethod
def evaluate_ED(nodeSummary, control_name):
'''
Calculate Euclidean Distance as split evaluation criterion for a given node.
Args
----
nodeSummary : dictionary
The tree node summary statistics, produced by tree_node_summary()
method.
control_name : string
The control group name.
Returns
-------
d_res : Euclidean Distance
'''
if control_name not in nodeSummary:
return 0
pc = nodeSummary[control_name][0]
d_res = 0
for treatment_group in nodeSummary:
if treatment_group != control_name:
d_res += 2*(nodeSummary[treatment_group][0] - pc)**2
return d_res
@staticmethod
def evaluate_Chi(nodeSummary, control_name):
'''
Calculate Chi-Square statistic as split evaluation criterion for a given node.
Args
----
nodeSummary : dictionary
The tree node summary statistics, produced by tree_node_summary() method.
control_name : string
The control group name.
Returns
-------
d_res : Chi-Square
'''
if control_name not in nodeSummary:
return 0
pc = nodeSummary[control_name][0]
d_res = 0
for treatment_group in nodeSummary:
if treatment_group != control_name:
d_res += ((nodeSummary[treatment_group][0] - pc) ** 2 / max(0.1 ** 6, pc)
+ (nodeSummary[treatment_group][0] - pc) ** 2 / max(0.1 ** 6, 1 - pc))
return d_res
@staticmethod
def evaluate_CTS(currentNodeSummary):
'''
Calculate CTS (conditional treatment selection) as split evaluation criterion for a given node.
Args
----
nodeSummary : dictionary
The tree node summary statistics, produced by tree_node_summary() method.
control_name : string
The control group name.
Returns
-------
d_res : Chi-Square
'''
mu = 0.0
# iterate treatment group
for r in currentNodeSummary:
mu = max(mu, currentNodeSummary[r][0])
return -mu
@staticmethod
def entropyH(p, q=None):
'''
Entropy
Entropy calculation for normalization.
Args
----
p : float
The probability used in the entropy calculation.
q : float, optional, (default = None)
The second probability used in the entropy calculation.
Returns
-------
entropy : float
'''
if q is None and p > 0:
return -p * np.log(p)
elif q > 0:
return -p * np.log(q)
else:
return 0
def normI(self, currentNodeSummary, leftNodeSummary, rightNodeSummary, control_name, alpha=0.9):
'''
Normalization factor.
Args
----
currentNodeSummary : dictionary
The summary statistics of the current tree node.
leftNodeSummary : dictionary
The summary statistics of the left tree node.
rightNodeSummary : dictionary
The summary statistics of the right tree node.
control_name : string
The control group name.
alpha : float
The weight used to balance different normalization parts.
Returns
-------
norm_res : float
Normalization factor.
'''
norm_res = 0
# n_t, n_c: sample size for all treatment, and control
# pt_a, pc_a: % of treatment is in left node, % of control is in left node
n_c = currentNodeSummary[control_name][1]
n_c_left = leftNodeSummary[control_name][1]
n_t = []
n_t_left = []
for treatment_group in currentNodeSummary:
if treatment_group != control_name:
n_t.append(currentNodeSummary[treatment_group][1])
if treatment_group in leftNodeSummary:
n_t_left.append(leftNodeSummary[treatment_group][1])
else:
n_t_left.append(0)
pt_a = 1. * np.sum(n_t_left) / (np.sum(n_t) + 0.1)
pc_a = 1. * n_c_left / (n_c + 0.1)
# Normalization Part 1
norm_res += (
alpha * self.entropyH(1. * np.sum(n_t) / (np.sum(n_t) + n_c), 1. * n_c / (np.sum(n_t) + n_c))
* self.kl_divergence(pt_a, pc_a)
)
# Normalization Part 2 & 3
for i in range(len(n_t)):
pt_a_i = 1. * n_t_left[i] / (n_t[i] + 0.1)
norm_res += (
(1 - alpha) * self.entropyH(1. * n_t[i] / (n_t[i] + n_c), 1. * n_c / (n_t[i] + n_c))
* self.kl_divergence(1. * pt_a_i, pc_a)
)
norm_res += (1. * n_t[i] / (np.sum(n_t) + n_c) * self.entropyH(pt_a_i))
# Normalization Part 4
norm_res += 1. * n_c/(np.sum(n_t) + n_c) * self.entropyH(pc_a)
# Normalization Part 5
norm_res += 0.5
return norm_res
def tree_node_summary(self, rows, min_samples_treatment=10, n_reg=100, parentNodeSummary=None):
'''
Tree node summary statistics.
Args
----
rows : list of list
The internal data format for the training data (combining X, Y, treatment).
min_samples_treatment: int, optional (default=10)
The minimum number of samples required of the experiment group t be split at a leaf node.
n_reg : int, optional (default=10)
The regularization parameter defined in Rzepakowski et al. 2012,
the weight (in terms of sample size) of the parent node influence
on the child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.
parentNodeSummary : dictionary
Node summary statistics of the parent tree node.
Returns
-------
nodeSummary : dictionary
The node summary of the current tree node.
'''
# returns {treatment_group: p(1)}
results = self.group_uniqueCounts(rows)
# node Summary: {treatment_group: [p(1), size]}
nodeSummary = {}
# iterate treatment group
for r in results:
n1 = results[r][1]
ntot = results[r][0] + results[r][1]
if parentNodeSummary is None:
y_mean = 1.*n1/ntot
elif ntot > min_samples_treatment:
y_mean = 1. * (results[r][1] + parentNodeSummary[r][0] * n_reg) / (ntot + n_reg)
else:
y_mean = parentNodeSummary[r][0]
nodeSummary[r] = [y_mean, ntot]
return nodeSummary
def uplift_classification_results(self, rows):
'''
Classification probability for each treatment in the tree node.
Args
----
rows : list of list
The internal data format for the training data (combining X, Y, treatment).
Returns
-------
res : dictionary
The probability of 1 in each treatment in the tree node.
'''
results = self.group_uniqueCounts(rows)
res = {}
for r in results:
p = float(results[r][1]) / (results[r][0] + results[r][1])
res[r] = round(p, 6)
return res
def growDecisionTreeFrom(self, rows, evaluationFunction, max_depth=10,
min_samples_leaf=100, depth=1,
min_samples_treatment=10, n_reg=100,
parentNodeSummary=None):
'''
Train the uplift decision tree.
Args
----
rows : list of list
The internal data format for the training data (combining X, Y, treatment).
evaluationFunction : string
Choose from one of the models: 'KL', 'ED', 'Chi', 'CTS'.
max_depth: int, optional (default=10)
The maximum depth of the tree.
min_samples_leaf: int, optional (default=100)
The minimum number of samples required to be split at a leaf node.
depth : int, optional (default = 1)
The current depth.
min_samples_treatment: int, optional (default=10)
The minimum number of samples required of the experiment group to be split at a leaf node.
n_reg: int, optional (default=10)
The regularization parameter defined in Rzepakowski et al. 2012,
the weight (in terms of sample size) of the parent node influence
on the child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.
parentNodeSummary : dictionary, optional (default = None)
Node summary statistics of the parent tree node.
Returns
-------
object of DecisionTree class
'''
if len(rows) == 0:
return DecisionTree()
# Current Node Info and Summary
currentNodeSummary = self.tree_node_summary(
rows, min_samples_treatment=min_samples_treatment, n_reg=n_reg, parentNodeSummary=parentNodeSummary
)
if evaluationFunction == self.evaluate_CTS:
currentScore = evaluationFunction(currentNodeSummary)
else:
currentScore = evaluationFunction(currentNodeSummary, control_name=self.control_name)
# Prune Stats
maxAbsDiff = 0
maxDiff = -1.
bestTreatment = self.control_name
suboptTreatment = self.control_name
maxDiffTreatment = self.control_name
maxDiffSign = 0
for treatment_group in currentNodeSummary:
if treatment_group != self.control_name:
diff = (currentNodeSummary[treatment_group][0]
- currentNodeSummary[self.control_name][0])
if abs(diff) >= maxAbsDiff:
maxDiffTreatment = treatment_group
maxDiffSign = np.sign(diff)
maxAbsDiff = abs(diff)
if diff >= maxDiff:
maxDiff = diff
suboptTreatment = treatment_group
if diff > 0:
bestTreatment = treatment_group
if maxDiff > 0:
pt = currentNodeSummary[bestTreatment][0]
nt = currentNodeSummary[bestTreatment][1]
pc = currentNodeSummary[self.control_name][0]
nc = currentNodeSummary[self.control_name][1]
p_value = (1. - stats.norm.cdf((pt - pc) / np.sqrt(pt * (1 - pt) / nt + pc * (1 - pc) / nc))) * 2
else:
pt = currentNodeSummary[suboptTreatment][0]
nt = currentNodeSummary[suboptTreatment][1]
pc = currentNodeSummary[self.control_name][0]
nc = currentNodeSummary[self.control_name][1]
p_value = (1. - stats.norm.cdf((pc - pt) / np.sqrt(pt * (1 - pt) / nt + pc * (1 - pc) / nc))) * 2
upliftScore = [maxDiff, p_value]
bestGain = 0.0
bestAttribute = None
bestSets = None
# last column is the result/target column, 2nd to the last is the treatment group
columnCount = len(rows[0]) - 2
if (self.max_features and self.max_features > 0 and self.max_features <= columnCount):
max_features = self.max_features
else:
max_features = columnCount
for col in list(np.random.choice(a=range(columnCount), size=max_features, replace=False)):
columnValues = [row[col] for row in rows]
# unique values
lsUnique = list(set(columnValues))
if (isinstance(lsUnique[0], int) or
isinstance(lsUnique[0], float)):
if len(lsUnique) > 10:
lspercentile = np.percentile(columnValues, [3, 5, 10, 20, 30, 50, 70, 80, 90, 95, 97])
else:
lspercentile = np.percentile(lsUnique, [10, 50, 90])
lsUnique = list(set(lspercentile))
for value in lsUnique:
(set1, set2) = self.divideSet(rows, col, value)
# check the split validity on min_samples_leaf 372
if (len(set1) < min_samples_leaf or len(set2) < min_samples_leaf):
continue
# summarize notes
# Gain -- Entropy or Gini
p = float(len(set1)) / len(rows)
leftNodeSummary = self.tree_node_summary(
set1, min_samples_treatment=min_samples_treatment,
n_reg=n_reg, parentNodeSummary=currentNodeSummary
)
rightNodeSummary = self.tree_node_summary(
set2, min_samples_treatment=min_samples_treatment,
n_reg=n_reg, parentNodeSummary=parentNodeSummary
)
# check the split validity on min_samples_treatment
if set(leftNodeSummary.keys()) != set(rightNodeSummary.keys()):
continue
node_mst = 10**8
for ti in leftNodeSummary:
node_mst = np.min([node_mst, leftNodeSummary[ti][1]])
node_mst = np.min([node_mst, rightNodeSummary[ti][1]])
if node_mst < min_samples_treatment:
continue
# evaluate the split
if evaluationFunction == self.evaluate_CTS:
leftScore1 = evaluationFunction(leftNodeSummary)
rightScore2 = evaluationFunction(rightNodeSummary)
gain = (currentScore - p * leftScore1 - (1 - p) * rightScore2)
else:
if (self.control_name in leftNodeSummary and
self.control_name in rightNodeSummary):
leftScore1 = evaluationFunction(leftNodeSummary, control_name=self.control_name)
rightScore2 = evaluationFunction(rightNodeSummary, control_name=self.control_name)
gain = (p * leftScore1 + (1 - p) * rightScore2 - currentScore)
if self.normalization:
norm_factor = self.normI(currentNodeSummary,
leftNodeSummary,
rightNodeSummary,
self.control_name,
alpha=0.9)
else:
norm_factor = 1
gain = gain / norm_factor
else:
gain = 0
if (gain > bestGain and len(set1) > min_samples_leaf and
len(set2) > min_samples_leaf):
bestGain = gain
bestAttribute = (col, value)
bestSets = (set1, set2)
dcY = {'impurity': '%.3f' % currentScore, 'samples': '%d' % len(rows)}
# Add treatment size
dcY['group_size'] = ''
for treatment_group in currentNodeSummary:
dcY['group_size'] += ' ' + treatment_group + ': ' + str(currentNodeSummary[treatment_group][1])
dcY['upliftScore'] = [round(upliftScore[0], 4), round(upliftScore[1], 4)]
dcY['matchScore'] = round(upliftScore[0], 4)
if bestGain > 0 and depth < max_depth:
trueBranch = self.growDecisionTreeFrom(
bestSets[0], evaluationFunction, max_depth, min_samples_leaf,
depth + 1, min_samples_treatment=min_samples_treatment,
n_reg=n_reg, parentNodeSummary=currentNodeSummary
)
falseBranch = self.growDecisionTreeFrom(
bestSets[1], evaluationFunction, max_depth, min_samples_leaf,
depth + 1, min_samples_treatment=min_samples_treatment,
n_reg=n_reg, parentNodeSummary=currentNodeSummary
)
return DecisionTree(
col=bestAttribute[0], value=bestAttribute[1],
trueBranch=trueBranch, falseBranch=falseBranch, summary=dcY,
maxDiffTreatment=maxDiffTreatment, maxDiffSign=maxDiffSign,
nodeSummary=currentNodeSummary,
backupResults=self.uplift_classification_results(rows),
bestTreatment=bestTreatment, upliftScore=upliftScore
)
else:
if evaluationFunction == self.evaluate_CTS:
return DecisionTree(
results=self.uplift_classification_results(rows),
summary=dcY, nodeSummary=currentNodeSummary,
bestTreatment=bestTreatment, upliftScore=upliftScore
)
else:
return DecisionTree(
results=self.uplift_classification_results(rows),
summary=dcY, maxDiffTreatment=maxDiffTreatment,
maxDiffSign=maxDiffSign, nodeSummary=currentNodeSummary,
bestTreatment=bestTreatment, upliftScore=upliftScore
)
def classify(self, observations, tree, dataMissing=False):
'''
Classifies (prediction) the observationss according to the tree.
Args
----
observations : list of list
The internal data format for the training data (combining X, Y, treatment).
dataMissing: boolean, optional (default = False)
An indicator for if data are missing or not.
Returns
-------
tree.results, tree.upliftScore :
The results in the leaf node.
'''
def classifyWithoutMissingData(observations, tree):
'''
Classifies (prediction) the observationss according to the tree, assuming without missing data.
Args
----
observations : list of list
The internal data format for the training data (combining X, Y, treatment).
Returns
-------
tree.results, tree.upliftScore :
The results in the leaf node.
'''
if tree.results is not None: # leaf
return tree.results, tree.upliftScore
else:
v = observations[tree.col]
branch = None
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
else:
if v == tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
return classifyWithoutMissingData(observations, branch)
def classifyWithMissingData(observations, tree):
'''
Classifies (prediction) the observationss according to the tree, assuming with missing data.
Args
----
observations : list of list
The internal data format for the training data (combining X, Y, treatment).
Returns
-------
tree.results, tree.upliftScore :
The results in the leaf node.
'''
if tree.results is not None: # leaf
return tree.results
else:
v = observations[tree.col]
if v is None:
tr = classifyWithMissingData(observations, tree.trueBranch)
fr = classifyWithMissingData(observations, tree.falseBranch)
tcount = sum(tr.values())
fcount = sum(fr.values())
tw = float(tcount) / (tcount + fcount)
fw = float(fcount) / (tcount + fcount)
# Problem description: http://blog.ludovf.net/python-collections-defaultdict/
result = defaultdict(int)
for k, v in tr.items():
result[k] += v * tw
for k, v in fr.items():
result[k] += v * fw
return dict(result)
else:
branch = None
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
else:
if v == tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
return classifyWithMissingData(observations, branch)
# function body
if dataMissing:
return classifyWithMissingData(observations, tree)
else:
return classifyWithoutMissingData(observations, tree)
def cat_group(dfx, kpix, n_group=10):
'''
Category Reduction for Categorical Variables
Args
----
dfx : dataframe
The inputs data dataframe.
kpix : string
The column of the feature.
n_group : int, optional (default = 10)
The number of top category values to be remained, other category values will be put into "Other".
Returns
-------
The transformed categorical feature value list.
'''
if dfx[kpix].nunique() > n_group:
# get the top categories
top = dfx[kpix].isin(dfx[kpix].value_counts().index[:n_group])
dfx.loc[~top, kpix] = "Other"
return dfx[kpix].values
else:
return dfx[kpix].values
def cat_transform(dfx, kpix, kpi1):
'''
Encoding string features.
Args
----
dfx : dataframe
The inputs data dataframe.
kpix : string
The column of the feature.
kpi1 : list
The list of feature names.
Returns
-------
dfx : DataFrame
The updated dataframe containing the encoded data.
kpi1 : list
The updated feature names containing the new dummy feature names.
'''
df_dummy = pd.get_dummies(dfx[kpix].values)
new_col_names = ['%s_%s' % (kpix, x) for x in df_dummy.columns]
df_dummy.columns = new_col_names
dfx = pd.concat([dfx, df_dummy], axis=1)
for new_col in new_col_names:
if new_col not in kpi1:
kpi1.append(new_col)
if kpix in kpi1:
kpi1.remove(kpix)
return dfx, kpi1
def cv_fold_index(n, i, k, random_seed=2018):
'''
Encoding string features.
Args
----
dfx : dataframe
The inputs data dataframe.
kpix : string
The column of the feature.
kpi1 : list
The list of feature names.
Returns
-------
dfx : DataFrame
The updated dataframe containing the encoded data.
kpi1 : list
The updated feature names containing the new dummy feature names.
'''
np.random.seed(random_seed)
rlist = np.random.choice(a=range(k), size=n, replace=True)
fold_i_index = np.where(rlist == i)[0]
return fold_i_index
# Categorize continuous variable
def cat_continuous(x, granularity='Medium'):
'''
Categorize (bin) continuous variable based on percentile.
Args
----
x : list
Feature values.
granularity : string, optional, (default = 'Medium')
Control the granularity of the bins, optional values are: 'High', 'Medium', 'Low'.
Returns
-------
res : list
List of percentile bins for the feature value.
'''
if granularity == 'High':
lspercentile = [np.percentile(x, 5),
np.percentile(x, 10),
np.percentile(x, 15),
np.percentile(x, 20),
np.percentile(x, 25),
np.percentile(x, 30),
np.percentile(x, 35),
np.percentile(x, 40),
np.percentile(x, 45),
np.percentile(x, 50),
np.percentile(x, 55),
np.percentile(x, 60),
np.percentile(x, 65),
np.percentile(x, 70),
np.percentile(x, 75),
np.percentile(x, 80),
np.percentile(x, 85),
np.percentile(x, 90),
np.percentile(x, 95),
np.percentile(x, 99)
]
res = ['> p90 (%s)' % (lspercentile[8]) if z > lspercentile[8] else
'<= p10 (%s)' % (lspercentile[0]) if z <= lspercentile[0] else
'<= p20 (%s)' % (lspercentile[1]) if z <= lspercentile[1] else
'<= p30 (%s)' % (lspercentile[2]) if z <= lspercentile[2] else
'<= p40 (%s)' % (lspercentile[3]) if z <= lspercentile[3] else
'<= p50 (%s)' % (lspercentile[4]) if z <= lspercentile[4] else
'<= p60 (%s)' % (lspercentile[5]) if z <= lspercentile[5] else
'<= p70 (%s)' % (lspercentile[6]) if z <= lspercentile[6] else
'<= p80 (%s)' % (lspercentile[7]) if z <= lspercentile[7] else
'<= p90 (%s)' % (lspercentile[8]) if z <= lspercentile[8] else
'> p90 (%s)' % (lspercentile[8]) for z in x]
elif granularity == 'Medium':
lspercentile = [np.percentile(x, 10),
np.percentile(x, 20),
np.percentile(x, 30),
np.percentile(x, 40),
np.percentile(x, 50),
np.percentile(x, 60),
np.percentile(x, 70),
np.percentile(x, 80),
np.percentile(x, 90)
]
res = ['<= p10 (%s)' % (lspercentile[0]) if z <= lspercentile[0] else
'<= p20 (%s)' % (lspercentile[1]) if z <= lspercentile[1] else
'<= p30 (%s)' % (lspercentile[2]) if z <= lspercentile[2] else
'<= p40 (%s)' % (lspercentile[3]) if z <= lspercentile[3] else
'<= p50 (%s)' % (lspercentile[4]) if z <= lspercentile[4] else
'<= p60 (%s)' % (lspercentile[5]) if z <= lspercentile[5] else
'<= p70 (%s)' % (lspercentile[6]) if z <= lspercentile[6] else
'<= p80 (%s)' % (lspercentile[7]) if z <= lspercentile[7] else
'<= p90 (%s)' % (lspercentile[8]) if z <= lspercentile[8] else
'> p90 (%s)' % (lspercentile[8]) for z in x]
else:
lspercentile = [np.percentile(x, 15), np.percentile(x, 50), np.percentile(x, 85)]
res = ['1-Very Low' if z < lspercentile[0] else
'2-Low' if z < lspercentile[1] else
'3-High' if z < lspercentile[2] else
'4-Very High' for z in x]
return res
def kpi_transform(dfx, kpi_combo, kpi_combo_new):
'''
Feature transformation from continuous feature to binned features for a list of features
Args
----
dfx : DataFrame
DataFrame containing the features.
kpi_combo : list of string
List of feature names to be transformed
kpi_combo_new : list of string
List of new feature names to be assigned to the transformed features.
Returns
-------
dfx : DataFrame
Updated DataFrame containing the new features.
'''
for j in range(len(kpi_combo)):
if type(dfx[kpi_combo[j]].values[0]) == str:
dfx[kpi_combo_new[j]] = dfx[kpi_combo[j]].values
dfx[kpi_combo_new[j]] = cat_group(dfx=dfx, kpix=kpi_combo_new[j])
else:
if len(kpi_combo) > 1:
dfx[kpi_combo_new[j]] = cat_continuous(
dfx[kpi_combo[j]].values, granularity='Low'
)
else:
dfx[kpi_combo_new[j]] = cat_continuous(
dfx[kpi_combo[j]].values, granularity='High'
)
return dfx
# Uplift Random Forests
class UpliftRandomForestClassifier:
""" Uplift Random Forest for Classification Task.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the uplift random forest.
evaluationFunction : string
Choose from one of the models: 'KL', 'ED', 'Chi', 'CTS'.
max_features: int, optional (default=10)
The number of features to consider when looking for the best split.
random_state: int, optional (default=2019)
The seed used by the random number generator.
max_depth: int, optional (default=5)
The maximum depth of the tree.
min_samples_leaf: int, optional (default=100)
The minimum number of samples required to be split at a leaf node.
min_samples_treatment: int, optional (default=10)
The minimum number of samples required of the experiment group to be split at a leaf node.
n_reg: int, optional (default=10)
The regularization parameter defined in Rzepakowski et al. 2012, the
weight (in terms of sample size) of the parent node influence on the
child node, only effective for 'KL', 'ED', 'Chi', 'CTS' methods.
control_name: string
The name of the control group (other experiment groups will be regarded as treatment groups)
normalization: boolean, optional (default=True)
The normalization factor defined in Rzepakowski et al. 2012,
correcting for tests with large number of splits and imbalanced
treatment and control splits
Outputs
----------
df_res: pandas dataframe
A user-level results dataframe containing the estimated individual treatment effect.
"""
def __init__(self,
n_estimators=10,
max_features=10,
random_state=2019,
max_depth=5,
min_samples_leaf=100,
min_samples_treatment=10,
n_reg=10,
evaluationFunction=None,
control_name=None,
normalization=True):
"""
Initialize the UpliftRandomForestClassifier class.
"""
self.classes_ = {}
self.n_estimators = n_estimators
self.max_features = max_features
self.random_state = random_state
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.min_samples_treatment = min_samples_treatment
self.n_reg = n_reg
self.evaluationFunction = evaluationFunction
self.control_name = control_name
# Create forest
self.uplift_forest = []
for _ in range(n_estimators):
uplift_tree = UpliftTreeClassifier(
max_features=self.max_features, max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
min_samples_treatment=self.min_samples_treatment,
n_reg=self.n_reg,
evaluationFunction=self.evaluationFunction,
control_name=self.control_name,
normalization=normalization)
self.uplift_forest.append(uplift_tree)
def fit(self, X, treatment, y):
"""
Fit the UpliftRandomForestClassifier.
Args
----
X : ndarray, shape = [num_samples, num_features]
An ndarray of the covariates used to train the uplift model.
treatment : array-like, shape = [num_samples]
An array containing the treatment group for each unit.
y : array-like, shape = [num_samples]
An array containing the outcome of interest for each unit.
"""
np.random.seed(self.random_state)
# Get treatment group keys
treatment_group_keys = list(set(treatment))
treatment_group_keys.remove(self.control_name)
treatment_group_keys.sort()
self.classes_ = {}
for i, treatment_group_key in enumerate(treatment_group_keys):
self.classes_[treatment_group_key] = i
# Bootstrap
for tree_i in range(len(self.uplift_forest)):
bt_index = np.random.choice(len(X), len(X))
x_train_bt = X[bt_index]
y_train_bt = y[bt_index]
treatment_train_bt = treatment[bt_index]
self.uplift_forest[tree_i].fit(X=x_train_bt, treatment=treatment_train_bt, y=y_train_bt)
@ignore_warnings(category=FutureWarning)
def predict(self, X, full_output=False):
'''
Returns the recommended treatment group and predicted optimal
probability conditional on using the recommended treatment group.
Args
----
X : ndarray, shape = [num_samples, num_features]
An ndarray of the covariates used to train the uplift model.
full_output : bool, optional (default=False)
Whether the UpliftTree algorithm returns upliftScores, pred_nodes
alongside the recommended treatment group and p_hat in the treatment group.
Returns
-------
df_res : DataFrame, shape = [num_samples, (num_treatments + 1)]
A DataFrame containing the predicted delta in each treatment group,
the best treatment group and the maximum delta.
'''
df_res = pd.DataFrame()
y_pred_ensemble = dict()
y_pred_list = np.zeros((X.shape[0], len(self.classes_)))
# Make prediction by each tree
for tree_i in range(len(self.uplift_forest)):
_, _, _, y_pred_full = self.uplift_forest[tree_i].predict(X=X, full_output=True)
if tree_i == 0:
for treatment_group in y_pred_full:
y_pred_ensemble[treatment_group] = (
| np.array(y_pred_full[treatment_group]) | numpy.array |
import numpy
from radiomics import base, cMatrices
class RadiomicsGLRLM(base.RadiomicsFeaturesBase):
r"""
A Gray Level Run Length Matrix (GLRLM) quantifies gray level runs, which are defined as the length in number of
pixels, of consecutive pixels that have the same gray level value. In a gray level run length matrix
:math:`\textbf{P}(i,j|\theta)`, the :math:`(i,j)^{\text{th}}` element describes the number of runs with gray level
:math:`i` and length :math:`j` occur in the image (ROI) along angle :math:`\theta`.
As a two dimensional example, consider the following 5x5 image, with 5 discrete gray levels:
.. math::
\textbf{I} = \begin{bmatrix}
5 & 2 & 5 & 4 & 4\\
3 & 3 & 3 & 1 & 3\\
2 & 1 & 1 & 1 & 3\\
4 & 2 & 2 & 2 & 3\\
3 & 5 & 3 & 3 & 2 \end{bmatrix}
The GLRLM for :math:`\theta = 0`, where 0 degrees is the horizontal direction, then becomes:
.. math::
\textbf{P} = \begin{bmatrix}
1 & 0 & 1 & 0 & 0\\
3 & 0 & 1 & 0 & 0\\
4 & 1 & 1 & 0 & 0\\
1 & 1 & 0 & 0 & 0\\
3 & 0 & 0 & 0 & 0 \end{bmatrix}
Let:
- :math:`N_g` be the number of discreet intensity values in the image
- :math:`N_r` be the number of discreet run lengths in the image
- :math:`N_p` be the number of voxels in the image
- :math:`N_r(\theta)` be the number of runs in the image along angle :math:`\theta`, which is equal to
:math:`\sum^{N_g}_{i=1}\sum^{N_r}_{j=1}{\textbf{P}(i,j|\theta)}` and :math:`1 \leq N_r(\theta) \leq N_p`
- :math:`\textbf{P}(i,j|\theta)` be the run length matrix for an arbitrary direction :math:`\theta`
- :math:`p(i,j|\theta)` be the normalized run length matrix, defined as :math:`p(i,j|\theta) =
\frac{\textbf{P}(i,j|\theta)}{N_r(\theta)}`
By default, the value of a feature is calculated on the GLRLM for each angle separately, after which the mean of these
values is returned. If distance weighting is enabled, GLRLMs are weighted by the distance between neighbouring voxels
and then summed and normalised. Features are then calculated on the resultant matrix. The distance between
neighbouring voxels is calculated for each angle using the norm specified in 'weightingNorm'.
The following class specific settings are possible:
- weightingNorm [None]: string, indicates which norm should be used when applying distance weighting.
Enumerated setting, possible values:
- 'manhattan': first order norm
- 'euclidean': second order norm
- 'infinity': infinity norm.
- 'no_weighting': GLCMs are weighted by factor 1 and summed
- None: Applies no weighting, mean of values calculated on separate matrices is returned.
In case of other values, an warning is logged and option 'no_weighting' is used.
References
- <NAME>. 1975. Texture analysis using gray level run lengths. Computer Graphics and Image Processing,
4(2):172-179.
- <NAME>., <NAME>., <NAME>. 1990. Use of gray value distribution of run length for texture analysis.
Pattern Recognition Letters, 11(6):415-419
- <NAME>., <NAME>., <NAME>., <NAME>. 2004. Run-Length Encoding For Volumetric Texture. International Conference on
Visualization, Imaging and Image Processing (VIIP), p. 452-458
- <NAME>. 1998. Texture information in run-length matrices. IEEE Transactions on Image Processing 7(11):1602-1609.
- `<NAME>., <NAME>. Run-Length Matrices For Texture Analysis. Insight Journal 2008 January - June.
<http://www.insight-journal.org/browse/publication/231>`_
"""
def __init__(self, inputImage, inputMask, **kwargs):
super(RadiomicsGLRLM, self).__init__(inputImage, inputMask, **kwargs)
self.weightingNorm = kwargs.get('weightingNorm', None) # manhattan, euclidean, infinity
self.P_glrlm = None
self.imageArray = self._applyBinning(self.imageArray)
def _initCalculation(self, voxelCoordinates=None):
self.P_glrlm = self._calculateMatrix(voxelCoordinates)
self._calculateCoefficients()
self.logger.debug('GLRLM feature class initialized, calculated GLRLM with shape %s', self.P_glrlm.shape)
def _calculateMatrix(self, voxelCoordinates=None):
self.logger.debug('Calculating GLRLM matrix in C')
Ng = self.coefficients['Ng']
Nr = numpy.max(self.imageArray.shape)
matrix_args = [
self.imageArray,
self.maskArray,
Ng,
Nr,
self.settings.get('force2D', False),
self.settings.get('force2Ddimension', 0)
]
if self.voxelBased:
matrix_args += [self.settings.get('kernelRadius', 1), voxelCoordinates]
P_glrlm, angles = cMatrices.calculate_glrlm(*matrix_args) # shape (Nvox, Ng, Nr, Na)
self.logger.debug('Process calculated matrix')
# Delete rows that specify gray levels not present in the ROI
NgVector = range(1, Ng + 1) # All possible gray values
GrayLevels = self.coefficients['grayLevels'] # Gray values present in ROI
emptyGrayLevels = numpy.array(list(set(NgVector) - set(GrayLevels)), dtype=int) # Gray values NOT present in ROI
P_glrlm = numpy.delete(P_glrlm, emptyGrayLevels - 1, 1)
# Optionally apply a weighting factor
if self.weightingNorm is not None:
self.logger.debug('Applying weighting (%s)', self.weightingNorm)
pixelSpacing = self.inputImage.GetSpacing()[::-1]
weights = numpy.empty(len(angles))
for a_idx, a in enumerate(angles):
if self.weightingNorm == 'infinity':
weights[a_idx] = max(numpy.abs(a) * pixelSpacing)
elif self.weightingNorm == 'euclidean':
weights[a_idx] = numpy.sqrt(numpy.sum((numpy.abs(a) * pixelSpacing) ** 2))
elif self.weightingNorm == 'manhattan':
weights[a_idx] = numpy.sum(numpy.abs(a) * pixelSpacing)
elif self.weightingNorm == 'no_weighting':
weights[a_idx] = 1
else:
self.logger.warning('weigthing norm "%s" is unknown, weighting factor is set to 1', self.weightingNorm)
weights[a_idx] = 1
P_glrlm = numpy.sum(P_glrlm * weights[None, None, None, :], 3, keepdims=True)
Nr = numpy.sum(P_glrlm, (1, 2))
# Delete empty angles if no weighting is applied
if P_glrlm.shape[3] > 1:
emptyAngles = numpy.where(numpy.sum(Nr, 0) == 0)
if len(emptyAngles[0]) > 0: # One or more angles are 'empty'
self.logger.debug('Deleting %d empty angles:\n%s', len(emptyAngles[0]), angles[emptyAngles])
P_glrlm = numpy.delete(P_glrlm, emptyAngles, 3)
Nr = numpy.delete(Nr, emptyAngles, 1)
else:
self.logger.debug('No empty angles')
Nr[Nr == 0] = numpy.nan # set sum to numpy.spacing(1) if sum is 0?
self.coefficients['Nr'] = Nr
return P_glrlm
def _calculateCoefficients(self):
self.logger.debug('Calculating GLRLM coefficients')
pr = numpy.sum(self.P_glrlm, 1) # shape (Nvox, Nr, Na)
pg = numpy.sum(self.P_glrlm, 2) # shape (Nvox, Ng, Na)
ivector = self.coefficients['grayLevels'].astype(float) # shape (Ng,)
jvector = numpy.arange(1, self.P_glrlm.shape[2] + 1, dtype=numpy.float64) # shape (Nr,)
# Delete columns that run lengths not present in the ROI
emptyRunLenghts = numpy.where(numpy.sum(pr, (0, 2)) == 0)
self.P_glrlm = numpy.delete(self.P_glrlm, emptyRunLenghts, 2)
jvector = numpy.delete(jvector, emptyRunLenghts)
pr = numpy.delete(pr, emptyRunLenghts, 1)
self.coefficients['pr'] = pr
self.coefficients['pg'] = pg
self.coefficients['ivector'] = ivector
self.coefficients['jvector'] = jvector
def getShortRunEmphasisFeatureValue(self):
r"""
**1. Short Run Emphasis (SRE)**
.. math::
\textit{SRE} = \frac{\sum^{N_g}_{i=1}\sum^{N_r}_{j=1}{\frac{\textbf{P}(i,j|\theta)}{j^2}}}{N_r(\theta)}
SRE is a measure of the distribution of short run lengths, with a greater value indicative of shorter run lengths
and more fine textural textures.
"""
pr = self.coefficients['pr']
jvector = self.coefficients['jvector']
Nr = self.coefficients['Nr']
sre = numpy.sum((pr / (jvector[None, :, None] ** 2)), 1) / Nr
return numpy.nanmean(sre, 1)
def getLongRunEmphasisFeatureValue(self):
r"""
**2. Long Run Emphasis (LRE)**
.. math::
\textit{LRE} = \frac{\sum^{N_g}_{i=1}\sum^{N_r}_{j=1}{\textbf{P}(i,j|\theta)j^2}}{N_r(\theta)}
LRE is a measure of the distribution of long run lengths, with a greater value indicative of longer run lengths and
more coarse structural textures.
"""
pr = self.coefficients['pr']
jvector = self.coefficients['jvector']
Nr = self.coefficients['Nr']
lre = numpy.sum((pr * (jvector[None, :, None] ** 2)), 1) / Nr
return numpy.nanmean(lre, 1)
def getGrayLevelNonUniformityFeatureValue(self):
r"""
**3. Gray Level Non-Uniformity (GLN)**
.. math::
\textit{GLN} = \frac{\sum^{N_g}_{i=1}\left(\sum^{N_r}_{j=1}{\textbf{P}(i,j|\theta)}\right)^2}{N_r(\theta)}
GLN measures the similarity of gray-level intensity values in the image, where a lower GLN value correlates with a
greater similarity in intensity values.
"""
pg = self.coefficients['pg']
Nr = self.coefficients['Nr']
gln = numpy.sum((pg ** 2), 1) / Nr
return numpy.nanmean(gln, 1)
def getGrayLevelNonUniformityNormalizedFeatureValue(self):
r"""
**4. Gray Level Non-Uniformity Normalized (GLNN)**
.. math::
\textit{GLNN} = \frac{\sum^{N_g}_{i=1}\left(\sum^{N_r}_{j=1}{\textbf{P}(i,j|\theta)}\right)^2}{N_r(\theta)^2}
GLNN measures the similarity of gray-level intensity values in the image, where a lower GLNN value correlates with a
greater similarity in intensity values. This is the normalized version of the GLN formula.
"""
pg = self.coefficients['pg']
Nr = self.coefficients['Nr']
glnn = numpy.sum(pg ** 2, 1) / (Nr ** 2)
return numpy.nanmean(glnn, 1)
def getRunLengthNonUniformityFeatureValue(self):
r"""
**5. Run Length Non-Uniformity (RLN)**
.. math::
\textit{RLN} = \frac{\sum^{N_r}_{j=1}\left(\sum^{N_g}_{i=1}{\textbf{P}(i,j|\theta)}\right)^2}{N_r(\theta)}
RLN measures the similarity of run lengths throughout the image, with a lower value indicating more homogeneity
among run lengths in the image.
"""
pr = self.coefficients['pr']
Nr = self.coefficients['Nr']
rln = numpy.sum((pr ** 2), 1) / Nr
return numpy.nanmean(rln, 1)
def getRunLengthNonUniformityNormalizedFeatureValue(self):
r"""
**6. Run Length Non-Uniformity Normalized (RLNN)**
.. math::
\textit{RLNN} = \frac{\sum^{N_r}_{j=1}\left(\sum^{N_g}_{i=1}{\textbf{P}(i,j|\theta)}\right)^2}{N_r(\theta)^2}
RLNN measures the similarity of run lengths throughout the image, with a lower value indicating more homogeneity
among run lengths in the image. This is the normalized version of the RLN formula.
"""
pr = self.coefficients['pr']
Nr = self.coefficients['Nr']
rlnn = numpy.sum((pr ** 2), 1) / Nr ** 2
return numpy.nanmean(rlnn, 1)
def getRunPercentageFeatureValue(self):
r"""
**7. Run Percentage (RP)**
.. math::
\textit{RP} = {\frac{N_r(\theta)}{N_p}}
RP measures the coarseness of the texture by taking the ratio of number of runs and number of voxels in the ROI.
Values are in range :math:`\frac{1}{N_p} \leq RP \leq 1`, with higher values indicating a larger portion of the ROI
consists of short runs (indicates a more fine texture).
.. note::
Note that when weighting is applied and matrices are merged before calculation, :math:`N_p` is multiplied by
:math:`n` number of matrices merged to ensure correct normalization (as each voxel is considered :math:`n` times)
"""
pr = self.coefficients['pr']
jvector = self.coefficients['jvector']
Nr = self.coefficients['Nr']
Np = numpy.sum(pr * jvector[None, :, None], 1) # shape (Nvox, Na)
rp = Nr / Np
return numpy.nanmean(rp, 1)
def getGrayLevelVarianceFeatureValue(self):
r"""
**8. Gray Level Variance (GLV)**
.. math::
\textit{GLV} = \displaystyle\sum^{N_g}_{i=1}\displaystyle\sum^{N_r}_{j=1}{p(i,j|\theta)(i - \mu)^2}
Here, :math:`\mu = \displaystyle\sum^{N_g}_{i=1}\displaystyle\sum^{N_r}_{j=1}{p(i,j|\theta)i}`
GLV measures the variance in gray level intensity for the runs.
"""
ivector = self.coefficients['ivector']
Nr = self.coefficients['Nr']
pg = self.coefficients['pg'] / Nr[:, None, :] # divide by Nr to get the normalized matrix
u_i = numpy.sum(pg * ivector[None, :, None], 1, keepdims=True)
glv = numpy.sum(pg * (ivector[None, :, None] - u_i) ** 2, 1)
return | numpy.nanmean(glv, 1) | numpy.nanmean |
import os
import tempfile
import unittest
import numpy as np
from keras_pos_embd.backend import keras
from keras_pos_embd import TrigPosEmbedding
class TestSinCosPosEmbd(unittest.TestCase):
def test_invalid_output_dim(self):
with self.assertRaises(NotImplementedError):
TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_EXPAND,
output_dim=5,
)
def test_missing_output_dim(self):
with self.assertRaises(NotImplementedError):
TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_EXPAND,
)
def test_brute(self):
seq_len = np.random.randint(1, 10)
embd_dim = np.random.randint(1, 20) * 2
indices = np.expand_dims(np.arange(seq_len), 0)
model = keras.models.Sequential()
model.add(TrigPosEmbedding(
input_shape=(seq_len,),
mode=TrigPosEmbedding.MODE_EXPAND,
output_dim=embd_dim,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(), 'test_trig_pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={'TrigPosEmbedding': TrigPosEmbedding})
model.summary()
predicts = model.predict(indices)[0].tolist()
for i in range(seq_len):
for j in range(embd_dim):
actual = predicts[i][j]
if j % 2 == 0:
expect = np.sin(i / 10000.0 ** (float(j) / embd_dim))
else:
expect = np.cos(i / 10000.0 ** ((j - 1.0) / embd_dim))
self.assertAlmostEqual(expect, actual, places=6, msg=(embd_dim, i, j, expect, actual))
def test_add(self):
seq_len = np.random.randint(1, 10)
embed_dim = np.random.randint(1, 20) * 2
inputs = | np.ones((1, seq_len, embed_dim)) | numpy.ones |
'''
Code for CRNN model and it's training.
Also has the data_generator used. The neccessary reshaping of the matrices and normalization etc. done here.
Take note of the paths to the data_generator.
Previous knowledge of generators and keras/tensorflow required to understand the code.
'''
import numpy as np
import librosa
import os
#import logging
from keras.optimizers import SGD, Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau,Callback
import keras as k
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU,Dropout
from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = False # to log device placement (on which device the operation ran)
# (nothing gets printed in Jupyter, only if you run it standalone)
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
import h5py
import json
import os
#import csv
import sys
#import pandas as pd
#import mir_eval
import math
from sklearn.preprocessing import LabelBinarizer,normalize
def train_model(model):
'''
The function that trains a certain neural network model with the given arguments.
:param model: Keras.Model - Constructed model
:param args: List - Input arguments
:return:
'''
# x_train, y_train, x_validation, y_validation = load_dataset_TD(dataset_number=args.dataset_number, args=args)
#
# dataset_train_size = x_train.shape[0] # First dimension gives the number of samples
# dataset_validation_size = x_validation.shape[0]
batch_size = 16
# Set the optimizers
opt_ADAM = Adam(clipnorm=1., clipvalue=0.5)
opt_SGD = SGD(lr=0.0005, decay=1e-4, momentum=0.9, nesterov=True)
# Compile the model
model.compile(loss='categorical_crossentropy', optimizer=opt_ADAM, metrics=['accuracy'])
# Use either a part of training set per epoch or all the set per epoch
# if args.use_part_of_training_set_per_epoch:
# number_of_batches_train = np.int(np.floor(args.training_amount_number_of_samples/args.batch_size))
# else:
# number_of_batches_train = np.max((np.floor((dataset_train_size) / args.batch_size), 1))
#
# number_of_batches_validation = np.max((np.floor(dataset_validation_size / args.batch_size), 1))
# if args.use_part_of_training_set:
# filename = 'model{0}_' \
# 'datasetNumber-{1}_' \
# 'augment-{2}_patchSize-{3}_' \
# 'numberOfPatches-{4}_' \
# 'batchSize-{5}_' \
# 'batchInOneEpoch-{6}_' \
# 'trainingAmountPercentage-{7}'.format(
# args.model_name, args.dataset_number, args.augment_data, args.patch_size, args.number_of_patches,
# args.batch_size, number_of_batches_train, np.int(args.training_amount_percentage))
# else:
# filename = 'model{0}_' \
# 'datasetNumber-{1}_' \
# 'augment-{2}_' \
# 'patchSize-{3}_' \
# 'numberOfPatches-{4}_' \
# 'batchSize-{5}_' \
# 'batchInOneEpoch-{6}'.format(
# args.model_name, args.dataset_number, args.augment_data, args.patch_size, args.number_of_patches,
# args.batch_size, number_of_batches_train)
cb = set_callbacks()
model.fit_generator(generator = generator(train_names),
steps_per_epoch = 85,
epochs = 100,
validation_data= generator(val_names),
validation_steps= 20,
callbacks= cb,
verbose= 1)
#model.load_weights('{0}/{1}.h5'.format(get_trained_model_save_path(dataset_name=args.dataset_name), filename))
return model
def sq(x):
from keras import backend as K
return K.squeeze(x, axis=4)
def construct_model():
'''
Construcs the CRNN model
:param args: Input arguments
:return: model: Constructed Model object
'''
number_of_patches = 20
patch_size = 50
feature_size = 301
number_of_classes = 61
step_notes = 5
RNN = 'LSTM'
verbose = False
kernel_coeff = 0.00001
number_of_channels = 1
input_shape = (number_of_patches, patch_size, feature_size, number_of_channels)
inputs = Input(shape=input_shape)
zp = ZeroPadding3D(padding=(0, 0, 2))(inputs)
#### CNN LAYERS ####
cnn1 = TimeDistributed(Conv2D(64, (1, 5),
padding='valid',
activation='relu',
strides=(1, np.int(step_notes)),
kernel_regularizer=k.regularizers.l2(kernel_coeff),
data_format='channels_last', name='cnn1'))(inputs)
cnn1a = BatchNormalization()(cnn1)
zp = ZeroPadding3D(padding=(0, 1, 2))(cnn1a)
cnn2 = TimeDistributed(
Conv2D(64, (3, 5), padding='valid', activation='relu', data_format='channels_last', name='cnn2'))(zp)
cnn2a = BatchNormalization()(cnn2)
zp = ZeroPadding3D(padding=(0, 1, 1))(cnn2a)
cnn3 = TimeDistributed(
Conv2D(64, (3, 3), padding='valid', activation='relu', data_format='channels_last', name='cnn3'))(zp)
cnn3a = BatchNormalization()(cnn3)
zp = ZeroPadding3D(padding=(0, 1, 7))(cnn3a)
cnn4 = TimeDistributed(
Conv2D(16, (3, 15), padding='valid', activation='relu', data_format='channels_last', name='cnn4'))(zp)
cnn4a = BatchNormalization()(cnn4)
cnn5 = TimeDistributed(
Conv2D(1, (1, 1), padding='same', activation='relu', data_format='channels_last', name='cnn5'))(cnn4a)
#### RESHAPING LAYERS ####
cnn5a = Lambda(sq)(cnn5)
cnn5b = Reshape((number_of_patches * patch_size, -1), name='cnn5-reshape')(cnn5a)
#### BIDIRECTIONAL RNN LAYERS ####
# if RNN == 'LSTM':
# rnn1 = Bidirectional(LSTM(128,
# kernel_regularizer=k.regularizers.l1_l2(0.0001),
# return_sequences=True), name='rnn1')(cnn5b)
# elif RNN == 'GRU':
# rnn1 = Bidirectional(GRU(128,
# kernel_regularizer=k.regularizers.l1_l2(0.0001),
# return_sequences=True), name='rnn1')(cnn5b)
#### CLASSIFICATION (DENSE) LAYER ####
classifier = TimeDistributed(Dense(number_of_classes,
activation='softmax',
kernel_regularizer=k.regularizers.l2(0.00001),
bias_regularizer=k.regularizers.l2()), name='output')(cnn5b)
model = Model(inputs=inputs, outputs=classifier)
if verbose == True or 1:
model.summary()
print('{0} as RNN!'.format(RNN))
return model
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def song(sp, gp):
HF0 = | np.load(sp) | numpy.load |
import numpy as np
import datetime
from collections import defaultdict
from argparse import Namespace
import json
import os
import copy
from shutil import copyfile
from pointcloud import translate_transform_to_new_center_of_rotation
def ns_to_dict(ns):
return {k: ns_to_dict(v) if type(v) == Namespace else v for k, v in ns.__dict__.items()}
def eval_translation(t, gt_t):
levels = np.array([0, 0, 0])
level_thresholds = np.array([0.02, 0.1, 0.2])
dist = np.linalg.norm(t[:2] - gt_t[:2])
for idx, thresh in enumerate(level_thresholds):
if dist < thresh:
levels[idx] = 1
return dist, levels
def angle_diff(a, b):
d = b - a
return float((d + np.pi) % (np.pi * 2.0) - np.pi)
def eval_angle(a, gt_a, accept_inverted_angle):
levels = np.array([0, 0, 0])
level_thresholds = np.array([1., 5.0, 10.0])
dist = np.abs(angle_diff(a, gt_a)) / np.pi * 180.
if accept_inverted_angle:
dist = np.minimum(dist, np.abs(angle_diff(a + np.pi, gt_a)) / np.pi * 180.)
for idx, thresh in enumerate(level_thresholds):
if dist < thresh:
levels[idx] = 1
return dist, levels
def eval_transform(t, gt_t, a, gt_a, accept_inverted_angle):
_, levels_translation = eval_translation(t, gt_t)
_, levels_angle = eval_angle(a, gt_a, accept_inverted_angle=accept_inverted_angle)
return np.minimum(levels_translation, levels_angle)
def evaluate_held(cfg, val_idxs, all_pred_translations, all_pred_angles, all_gt_translations, all_gt_angles, eval_dir=None, avg_window=5, mean_time=0):
tracks = defaultdict(dict)
for idx, file_idx in enumerate(val_idxs):
meta = json.load(open(f'{cfg.data.basepath}/meta/{str(file_idx).zfill(8)}.json', 'r'))
trackid = meta['trackid']
frame2 = meta['frames'][1]
timestamp1, timestamp2 = meta['timestamps']
pred_translation = all_pred_translations[idx]
time_passed = np.maximum(0.05, timestamp2 - timestamp1)
tracks[trackid][frame2] = (pred_translation, time_passed)
velocities = defaultdict(list)
for trackid, track in tracks.items():
track_translations = list(zip(*track.items()))[1]
track_translations = np.array(track_translations)
# print(track_translations.shape)
if eval_dir is not None:
with open(f'{eval_dir}/track{trackid}.txt', 'w') as file_handler:
for idx, (track_translation, time_passed) in enumerate(track_translations):
prev_translations = track_translations[max(0, idx - avg_window + 1):idx + avg_window + 1]
# print(trackid, idx, prev_translations.shape)
prev_velocities = prev_translations[:, 0] / prev_translations[:, 1]
mean_velocity = np.mean(prev_velocities, axis=0).copy()
# mean_translation[0][2] = 0.
# print(mean_translation)
mean_velocity_length = np.linalg.norm(mean_velocity[:2])
velocities[trackid].append(mean_velocity_length)
file_handler.write(f'{mean_velocity_length}\n')
return velocities, dict(mean_time=mean_time)
def process_velocities(tracks, eval_dir, avg_window):
if eval_dir is not None:
eval_dir = eval_dir + '/velocities'
os.makedirs(eval_dir, exist_ok=True)
else:
return
velocities = defaultdict(list)
for intermediate_trackid, traj in tracks.items():
max_frame = max(traj.keys())
start_frames = [idx for idx in range(max_frame + 1) if idx in traj.keys() and idx - 1 not in traj.keys()]
for start_frame in start_frames:
new_track_id = intermediate_trackid + start_frame - 1 # -1 because start frame is not actually the start frame, but the second after the initial pose (pc1)
track_translations = [(np.array([0., 0, 0]), 0.1)]
for curr_frame in range(start_frame, max_frame + 1):
track_translations.append(traj[curr_frame])
if curr_frame + 1 not in traj.keys():
break
# track_translations = list(zip(*track.items()))[1]
track_translations = np.array(track_translations)
# print(track_translations.shape)
if eval_dir is not None:
with open(f'{eval_dir}/track{new_track_id:09}.txt', 'w') as file_handler:
# velocities[new_track_id].append(0.)
# file_handler.write(f'{0.}\n')
for idx, (track_translation, time_passed) in enumerate(track_translations):
prev_translations = track_translations[max(0, idx - avg_window):idx + avg_window + 1]
prev_velocities = prev_translations[:, 0] / prev_translations[:, 1]
mean_velocity = np.mean(prev_velocities, axis=0).copy()
mean_velocity_length = np.linalg.norm(mean_velocity[:2])
velocities[new_track_id].append(mean_velocity_length)
file_handler.write(f'{mean_velocity_length}\n')
return velocities
def get_at_dist_measures(eval_measures, dist):
return Namespace(
corr_levels=eval_measures[dist]['corr_levels'].tolist(),
corr_levels_translation=eval_measures[dist]['corr_levels_translation'].tolist(),
mean_dist_translation=eval_measures[dist]['mean_dist_translation'],
mean_sq_dist_translation=eval_measures[dist]['mean_sq_dist_translation'],
corr_levels_angles=eval_measures[dist]['corr_levels_angles'].tolist(),
mean_dist_angle=eval_measures[dist]['mean_dist_angle'],
mean_sq_dist_angle=eval_measures[dist]['mean_sq_dist_angle'],
num=eval_measures[dist]['num'],
)
def evaluate(cfg, val_idxs, all_pred_translations, all_pred_angles, all_gt_translations, all_gt_angles, all_pred_centers, all_gt_pc1centers, eval_dir=None, accept_inverted_angle=False, detailed_eval=False, avg_window=5, mean_time=0):
new_all_pred_translations = translate_transform_to_new_center_of_rotation(all_pred_translations, all_pred_angles, all_pred_centers, all_gt_pc1centers)
np.set_printoptions(precision=3, suppress=True)
# print(np.concatenate([all_pred_translations, new_all_pred_translations, all_gt_translations, all_pred_angles, all_gt_angles], axis=1))
tracks = defaultdict(dict)
empty_dict = {'corr_levels_translation': np.array([0, 0, 0], dtype=float), 'corr_levels_angles': | np.array([0, 0, 0], dtype=float) | numpy.array |
import numpy as np
from ..resources import Buffer
from ._base import Geometry
from .utils import merge
def generate_torso(
radius_bottom,
radius_top,
height,
radial_segments,
height_segments,
theta_start,
theta_length,
):
# compute POSITIONS assuming x-y horizontal plane and z up axis
# radius for each vertex ring from bottom to top
n_rings = height_segments + 1
radii = np.linspace(radius_bottom, radius_top, num=n_rings, dtype=np.float32)
# height for each vertex ring from bottom to top
half_height = height / 2
heights = np.linspace(-half_height, half_height, num=n_rings, dtype=np.float32)
# to enable texture mapping to fully wrap around the cylinder,
# we can't close the geometry and need a degenerate vertex
n_vertices = radial_segments + 1
# xy coordinates on unit circle for a single vertex ring
theta = np.linspace(
theta_start, theta_start + theta_length, num=n_vertices, dtype=np.float32
)
ring_xy = np.column_stack([np.cos(theta), np.sin(theta)])
# put all the rings together
positions = np.empty((n_rings, n_vertices, 3), dtype=np.float32)
positions[..., :2] = ring_xy[None, ...] * radii[:, None, None]
positions[..., 2] = heights[:, None]
# the NORMALS are the same for every ring, so compute for only one ring
# and then repeat
slope = (radius_bottom - radius_top) / height
ring_normals = np.empty(positions.shape[1:], dtype=np.float32)
ring_normals[..., :2] = ring_xy
ring_normals[..., 2] = slope
ring_normals /= np.linalg.norm(ring_normals, axis=-1)[:, None]
normals = np.empty_like(positions)
normals[:] = ring_normals[None, ...]
# the TEXTURE COORDS
# u maps 0..1 to theta_start..theta_start+theta_length
# v maps 0..1 to -height/2..height/2
ring_u = (theta - theta_start) / theta_length
ring_v = (heights / height) + 0.5
texcoords = np.empty((n_rings, n_vertices, 2), dtype=np.float32)
texcoords[..., 0] = ring_u[None, :]
texcoords[..., 1] = ring_v[:, None]
# the face INDEX
# the amount of vertices
indices = np.arange(n_rings * n_vertices, dtype=np.uint32).reshape(
(n_rings, n_vertices)
)
# for every panel (height_segments, radial_segments) there is a quad (2, 3)
index = np.empty((height_segments, radial_segments, 2, 3), dtype=np.uint32)
# create a grid of initial indices for the panels
index[:, :, 0, 0] = indices[
np.arange(height_segments)[:, None], | np.arange(radial_segments) | numpy.arange |
import numpy as np
from typing import List
from database import Graph, CelestialGraph, CelestialBody
def create_circle(amount: int, offset=(0, 0), radius=1) -> np.ndarray:
step = 2*np.pi / (amount)
points = np.array([(np.sin(step * i), np.cos(step * i)) for i in range(amount)])
points = points * radius
points = points + offset
return points
def _create_circle_radial_points(radial_positions: List[float], offset=(0, 0), radius=1) -> np.ndarray:
points = np.array([(np.sin(radial), np.cos(radial)) for radial in radial_positions])
points = points * radius
points = points + offset
return points
def create_circle_graph(radial_positions: List[float], offset=(0, 0), radius=1, edges=None) -> Graph:
points = _create_circle_radial_points(
radial_positions=radial_positions,
offset=offset,
radius=radius)
if edges is None:
ad_matrix = np.ones((len(points), len(points)))
ad_matrix[np.diag_indices_from(ad_matrix)] = 0
return CelestialGraph(points, ad_matrix=ad_matrix)
return CelestialGraph(points, E=edges)
def create_random_circle(amount: int, offset=(0, 0), seed=None):
if isinstance(seed, np.random.Generator):
gen = seed
else:
gen = np.random.Generator(np.random.BitGenerator(seed))
r_pos = gen.random(amount) * 2*np.pi
points = np.array([( | np.sin(r_pos[i]) | numpy.sin |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from typing import Counter
import numpy as np
from numpy.core.fromnumeric import prod
from functools import reduce
from graph.dim import Dim
from graph.types import ConstantInputParameters, NNEdge, ReshapeParameters
from importer.common.provisional_dim import ProvisionalDim
from importer.onnx.common import logger
from ..backend_handler import BackendHandler
from ..handler import onnx_op
from importer.common.constant_mixin import ConstantMixin
@onnx_op("Reshape")
class Reshape(ConstantMixin, BackendHandler):
@classmethod
def moves_unknown(cls, inp, shape):
if None not in inp or Counter(inp)[None] > 1:
return False
if -1 not in shape or None in shape:
return False
inp_wo_nones = [dim for dim in inp if dim != None]
shape_wo_all = [dim for dim in shape if dim != -1]
if np.prod(inp_wo_nones) != np.prod(shape_wo_all):
return False
none_idx = list(inp).index(None)
return none_idx >= len(shape_wo_all) or shape_wo_all[none_idx] != -1
@classmethod
def _common(cls, node, **kwargs):
all_nodes = kwargs['all_nodes']
G = kwargs['G']
valid_name = kwargs['valid_name']
inputs = [all_nodes[inp] for inp in node.input]
if cls.SINCE_VERSION == 1:
shape = | np.array(node.attrs["shape"]) | numpy.array |
# Part of Spatial Math Toolbox for Python
# Copyright (c) 2000 <NAME>
# MIT Licence, see details in top-level file: LICENCE
"""
Classes to abstract 3D pose and orientation using matrices in SE(3) and SO(3)
To use::
from spatialmath.pose3d import *
T = SE3.Rx(0.3)
import spatialmath as sm
T = sm.SE3.Rx(0.3)
.. inheritance-diagram:: spatialmath.pose3d
:top-classes: collections.UserList
:parts: 1
.. image:: ../figs/pose-values.png
"""
# pylint: disable=invalid-name
import numpy as np
from spatialmath import base
from spatialmath.super_pose import SMPose
# ============================== SO3 =====================================#
class SO3(SMPose):
"""
SO(3) matrix class
This subclass represents rotations in 3D space. Internally it is a 3x3
orthogonal matrix belonging to the group SO(3).
.. inheritance-diagram:: spatialmath.pose3d.SO3
:top-classes: collections.UserList
:parts: 1
"""
def __init__(self, arg=None, *, check=True):
"""
Construct new SO(3) object
:rtype: SO3 instance
There are multiple call signatures:
- ``SO3()`` is an ``SO3`` instance with one value -- a 3x3 identity
matrix which corresponds to a null rotation
- ``SO3(R)`` is an ``SO3`` instance with with the value ``R`` which is a
3x3 numpy array representing an SO(3) rotation matrix. If ``check``
is ``True`` check the matrix belongs to SO(3).
- ``SO3([R1, R2, ... RN])`` is an ``SO3`` instance wwith ``N`` values
given by the elements ``Ri`` each of which is a 3x3 NumPy array
representing an SO(3) matrix. If ``check`` is ``True`` check the
matrix belongs to SO(3).
- ``SO3([X1, X2, ... XN])`` is an ``SO3`` instance with ``N`` values
given by the elements ``Xi`` each of which is an SO3 instance.
:SymPy: supported
"""
super().__init__()
if not super().arghandler(arg, check=check):
raise ValueError('bad argument to constructor')
@staticmethod
def _identity():
return np.eye(3)
# ------------------------------------------------------------------------ #
@property
def shape(self):
"""
Shape of the object's interal matrix representation
:return: (3,3)
:rtype: tuple
Each value within the ``SO3`` instance is a NumPy array of this shape.
"""
return (3, 3)
@property
def R(self):
"""
SO(3) or SE(3) as rotation matrix
:return: rotational component
:rtype: numpy.ndarray, shape=(3,3)
``x.R`` is the rotation matrix component of ``x`` as an array with
shape (3,3). If ``len(x) > 1``, return an array with shape=(N,3,3).
.. warning:: The i'th rotation matrix is ``x[i,:,:]`` or simply
``x[i]``. This is different to the MATLAB version where the i'th
rotation matrix is ``x(:,:,i)``.
Example:
.. runblock:: pycon
>>> from spatialmath import SO3
>>> x = SO3.Rx(0.3)
>>> x.R
:SymPy: supported
"""
if len(self) == 1:
return self.A[:3, :3]
else:
return np.array([x[:3, :3] for x in self.A])
@property
def n(self):
"""
Normal vector of SO(3) or SE(3)
:return: normal vector
:rtype: numpy.ndarray, shape=(3,)
This is the first column of the rotation submatrix, sometimes called the
*normal vector*. It is parallel to the x-axis of the frame defined by
this pose.
"""
return self.A[:3, 0]
@property
def o(self):
"""
Orientation vector of SO(3) or SE(3)
:return: orientation vector
:rtype: numpy.ndarray, shape=(3,)
This is the second column of the rotation submatrix, sometimes called
the *orientation vector*. It is parallel to the y-axis of the frame
defined by this pose.
"""
return self.A[:3, 1]
@property
def a(self):
"""
Approach vector of SO(3) or SE(3)
:return: approach vector
:rtype: numpy.ndarray, shape=(3,)
This is the third column of the rotation submatrix, sometimes called the
*approach vector*. It is parallel to the z-axis of the frame defined by
this pose.
"""
return self.A[:3, 2]
# ------------------------------------------------------------------------ #
def inv(self):
"""
Inverse of SO(3)
:return: inverse
:rtype: SO2 instance
Efficiently compute the inverse of each of the SO(3) values taking into
account the matrix structure. For an SO(3) matrix the inverse is the
transpose.
"""
if len(self) == 1:
return SO3(self.A.T, check=False)
else:
return SO3([x.T for x in self.A], check=False)
def eul(self, unit='rad', flip=False):
r"""
SO(3) or SE(3) as Euler angles
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:return: 3-vector of Euler angles
:rtype: ndarray(3,), ndarray(n,3)
``x.eul`` is the Euler angle representation of the rotation. Euler angles are
a 3-vector :math:`(\phi, \theta, \psi)` which correspond to consecutive
rotations about the Z, Y, Z axes respectively.
If ``len(x)`` is:
- 1, return an ndarray with shape=(3,)
- N>1, return ndarray with shape=(3,N)
:seealso: :func:`~spatialmath.pose3d.SE3.Eul`, :func:`~spatialmath.base.transforms3d.tr2eul`
:SymPy: not supported
"""
if len(self) == 1:
return base.tr2eul(self.A, unit=unit)
else:
return np.array([base.tr2eul(x, unit=unit) for x in self.A])
def rpy(self, unit='rad', order='zyx'):
"""
SO(3) or SE(3) as roll-pitch-yaw angles
:param order: angle sequence order, default to 'zyx'
:type order: str
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:return: 3-vector of roll-pitch-yaw angles
:rtype: ndarray(3,), ndarray(n,3)
``x.rpy`` is the roll-pitch-yaw angle representation of the rotation. The angles are
a 3-vector :math:`(r, p, y)` which correspond to successive rotations about the axes
specified by ``order``:
- ``'zyx'`` [default], rotate by yaw about the z-axis, then by pitch about the new y-axis,
then by roll about the new x-axis. Convention for a mobile robot with x-axis forward
and y-axis sideways.
- ``'xyz'``, rotate by yaw about the x-axis, then by pitch about the new y-axis,
then by roll about the new z-axis. Convention for a robot gripper with z-axis forward
and y-axis between the gripper fingers.
- ``'yxz'``, rotate by yaw about the y-axis, then by pitch about the new x-axis,
then by roll about the new z-axis. Convention for a camera with z-axis parallel
to the optic axis and x-axis parallel to the pixel rows.
If `len(x)` is:
- 1, return an ndarray with shape=(3,)
- N>1, return ndarray with shape=(3,N)
:seealso: :func:`~spatialmath.pose3d.SE3.RPY`, :func:`~spatialmath.base.transforms3d.tr2rpy`
:SymPy: not supported
"""
if len(self) == 1:
return base.tr2rpy(self.A, unit=unit, order=order)
else:
return np.array([base.tr2rpy(x, unit=unit, order=order) for x in self.A])
def angvec(self, unit='rad'):
r"""
SO(3) or SE(3) as angle and rotation vector
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:param check: check that rotation matrix is valid
:type check: bool
:return: :math:`(\theta, {\bf v})`
:rtype: float, numpy.ndarray, shape=(3,)
``q.angvec()`` is a tuple :math:`(\theta, v)` containing the rotation
angle and a rotation axis which is equivalent to the rotation of
the unit quaternion ``q``.
By default the angle is in radians but can be changed setting `unit='deg'`.
.. notes::
- If the input is SE(3) the translation component is ignored.
Example:
.. runblock:: pycon
>>> from spatialmath import UnitQuaternion
>>> UnitQuaternion.Rz(0.3).angvec()
:seealso: :func:`~spatialmath.quaternion.AngVec`, :func:`~angvec2r`
"""
return base.tr2angvec(self.R, unit=unit)
# ------------------------------------------------------------------------ #
@staticmethod
def isvalid(x, check=True):
"""
Test if matrix is valid SO(3)
:param x: matrix to test
:type x: numpy.ndarray
:return: ``True`` if the matrix is a valid element of SO(3), ie. it is a 3x3
orthonormal matrix with determinant of +1.
:rtype: bool
:seealso: :func:`~spatialmath.base.transform3d.isrot`
"""
return base.isrot(x, check=True)
# ---------------- variant constructors ---------------------------------- #
@classmethod
def Rx(cls, theta, unit='rad'):
"""
Construct a new SO(3) from X-axis rotation
:param θ: rotation angle about the X-axis
:type θ: float or array_like
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:return: SO(3) rotation
:rtype: SO3 instance
- ``SE3.Rx(θ)`` is an SO(3) rotation of ``θ`` radians about the x-axis
- ``SE3.Rx(θ, "deg")`` as above but ``θ`` is in degrees
If ``theta`` is an array then the result is a sequence of rotations defined by consecutive
elements.
Example:
.. runblock:: pycon
>>> from spatialmath import SO3
>>> x = SO3.Rx(np.linspace(0, math.pi, 20))
>>> len(x)
>>> x[7]
"""
return cls([base.rotx(x, unit=unit) for x in base.getvector(theta)], check=False)
@classmethod
def Ry(cls, theta, unit='rad'):
"""
Construct a new SO(3) from Y-axis rotation
:param θ: rotation angle about Y-axis
:type θ: float or array_like
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:return: SO(3) rotation
:rtype: SO3 instance
- ``SO3.Ry(θ)`` is an SO(3) rotation of ``θ`` radians about the y-axis
- ``SO3.Ry(θ, "deg")`` as above but ``θ`` is in degrees
If ``θ`` is an array then the result is a sequence of rotations defined by consecutive
elements.
Example:
.. runblock:: pycon
>>> from spatialmath import UnitQuaternion
>>> x = SO3.Ry(np.linspace(0, math.pi, 20))
>>> len(x)
>>> x[7]
"""
return cls([base.roty(x, unit=unit) for x in base.getvector(theta)], check=False)
@classmethod
def Rz(cls, theta, unit='rad'):
"""
Construct a new SO(3) from Z-axis rotation
:param θ: rotation angle about Z-axis
:type θ: float or array_like
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:return: SO(3) rotation
:rtype: SO3 instance
- ``SO3.Rz(θ)`` is an SO(3) rotation of ``θ`` radians about the z-axis
- ``SO3.Rz(θ, "deg")`` as above but ``θ`` is in degrees
If ``θ`` is an array then the result is a sequence of rotations defined by consecutive
elements.
Example:
.. runblock:: pycon
>>> from spatialmath import SE3
>>> x = SE3.Rz(np.linspace(0, math.pi, 20))
>>> len(x)
>>> x[7]
"""
return cls([base.rotz(x, unit=unit) for x in base.getvector(theta)], check=False)
@classmethod
def Rand(cls, N=1):
"""
Construct a new SO(3) from random rotation
:param N: number of random rotations
:type N: int
:return: SO(3) rotation matrix
:rtype: SO3 instance
- ``SO3.Rand()`` is a random SO(3) rotation.
- ``SO3.Rand(N)`` is a sequence of N random rotations.
Example:
.. runblock:: pycon
>>> from spatialmath import SO3
>>> x = SO3.Rand()
>>> x
:seealso: :func:`spatialmath.quaternion.UnitQuaternion.Rand`
"""
return cls([base.q2r(base.rand()) for _ in range(0, N)], check=False)
@classmethod
def Eul(cls, *angles, unit='rad'):
r"""
Construct a new SO(3) from Euler angles
:param 𝚪: Euler angles
:type 𝚪: array_like or numpy.ndarray with shape=(N,3)
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:return: SO(3) rotation
:rtype: SO3 instance
``SO3.Eul(𝚪)`` is an SO(3) rotation defined by a 3-vector of Euler
angles :math:`\Gamma = (\phi, \theta, \psi)` which correspond to
consecutive rotations about the Z, Y, Z axes respectively. If ``𝚪``
is an Nx3 matrix then the result is a sequence of rotations each
defined by Euler angles corresponding to the rows of ``angles``.
``SO3.Eul(φ, θ, ψ)`` as above but the angles are provided as three
scalars.
Example:
.. runblock:: pycon
>>> from spatialmath import SO3
>>> SO3.Eul(0.1, 0.2, 0.3)
>>> SO3.Eul([0.1, 0.2, 0.3])
>>> SO3.Eul(10, 20, 30, 'deg')
:seealso: :func:`~spatialmath.pose3d.SE3.eul`, :func:`~spatialmath.pose3d.SE3.Eul`, :func:`~spatialmath.base.transforms3d.eul2r`
"""
if len(angles) == 1:
angles = angles[0]
if base.isvector(angles, 3):
return cls(base.eul2r(angles, unit=unit), check=False)
else:
return cls([base.eul2r(a, unit=unit) for a in angles], check=False)
@classmethod
def RPY(cls, *angles, unit='rad', order='zyx', ):
r"""
Construct a new SO(3) from roll-pitch-yaw angles
:param angles: roll-pitch-yaw angles
:type angles: array_like(3), array_like(n,3)
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:param order: rotation order: 'zyx' [default], 'xyz', or 'yxz'
:type order: str
:return: SO(3) rotation
:rtype: SO3 instance
- ``SO3.RPY(angles)`` is an SO(3) rotation defined by a 3-vector of
roll, pitch, yaw angles :math:`(\alpha, \beta, \gamma)`. If ``angles``
is an Nx3 matrix then the result is a sequence of rotations each
defined by RPY angles corresponding to the rows of angles. The angles
correspond to successive rotations about the axes specified by
``order``:
- ``'zyx'`` [default], rotate by yaw about the z-axis, then by pitch about the new y-axis,
then by roll about the new x-axis. Convention for a mobile robot with x-axis forward
and y-axis sideways.
- ``'xyz'``, rotate by yaw about the x-axis, then by pitch about the new y-axis,
then by roll about the new z-axis. Convention for a robot gripper with z-axis forward
and y-axis between the gripper fingers.
- ``'yxz'``, rotate by yaw about the y-axis, then by pitch about the new x-axis,
then by roll about the new z-axis. Convention for a camera with z-axis parallel
to the optic axis and x-axis parallel to the pixel rows.
- ``SO3.RPY(⍺, β, 𝛾)`` as above but the angles are provided as three
scalars.
Example:
.. runblock:: pycon
>>> from spatialmath import SO3
>>> SO3.RPY(0.1, 0.2, 0.3)
>>> SO3.RPY([0.1, 0.2, 0.3])
>>> SO3.RPY(0.1, 0.2, 0.3, order='xyz')
>>> SO3.RPY(10, 20, 30, 'deg')
:seealso: :func:`~spatialmath.pose3d.SE3.rpy`, :func:`~spatialmath.pose3d.SE3.RPY`, :func:`spatialmath.base.transforms3d.rpy2r`
"""
if len(angles) == 1:
angles = angles[0]
# angles = base.getmatrix(angles, (None, 3))
# return cls(base.rpy2r(angles, order=order, unit=unit), check=False)
if base.isvector(angles, 3):
return cls(base.rpy2r(angles, unit=unit, order=order), check=False)
else:
return cls([base.rpy2r(a, unit=unit, order=order) for a in angles], check=False)
@classmethod
def OA(cls, o, a):
"""
Construct a new SO(3) from two vectors
:param o: 3-vector parallel to Y- axis
:type o: array_like
:param a: 3-vector parallel to the Z-axis
:type o: array_like
:return: SO(3) rotation
:rtype: SO3 instance
``SO3.OA(O, A)`` is an SO(3) rotation defined in terms of
vectors parallel to the Y- and Z-axes of its reference frame. In robotics these axes are
respectively called the *orientation* and *approach* vectors defined such that
R = [N, O, A] and N = O x A.
.. notes::
- Only the ``A`` vector is guaranteed to have the same direction in the resulting
rotation matrix
- ``O`` and ``A`` do not have to be unit-length, they are normalized
- ``O`` and ``A` do not have to be orthogonal, so long as they are not parallel
:seealso: :func:`spatialmath.base.transforms3d.oa2r`
"""
return cls(base.oa2r(o, a), check=False)
@classmethod
def AngVec(cls, theta, v, *, unit='rad'):
r"""
Construct a new SO(3) rotation matrix from rotation angle and axis
:param theta: rotation
:type theta: float
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:param v: rotation axis, 3-vector
:type v: array_like
:return: SO(3) rotation
:rtype: SO3 instance
``SO3.AngVec(theta, V)`` is an SO(3) rotation defined by
a rotation of ``THETA`` about the vector ``V``.
.. note:: :math:`\theta \eq 0` the result in an identity matrix, otherwise
``V`` must have a finite length, ie. :math:`|V| > 0`.
:seealso: :func:`~spatialmath.pose3d.SE3.angvec`, :func:`spatialmath.base.transforms3d.angvec2r`
"""
return cls(base.angvec2r(theta, v, unit=unit), check=False)
@classmethod
def EulerVec(cls, w):
r"""
Construct a new SO(3) rotation matrix from an Euler rotation vector
:param ω: rotation axis
:type ω: 3-element array_like
:return: SO(3) rotation
:rtype: SO3 instance
``SO3.EulerVec(ω)`` is a unit quaternion that describes the 3D rotation
defined by a rotation of :math:`\theta = \lVert \omega \rVert` about the
unit 3-vector :math:`\omega / \lVert \omega \rVert`.
Example:
.. runblock:: pycon
>>> from spatialmath import SO3
>>> SO3.EulerVec([0.5,0,0])
.. note:: :math:`\theta \eq 0` the result in an identity matrix, otherwise
``V`` must have a finite length, ie. :math:`|V| > 0`.
:seealso: :func:`~spatialmath.pose3d.SE3.angvec`, :func:`~spatialmath.base.transforms3d.angvec2r`
"""
assert base.isvector(w, 3), 'w must be a 3-vector'
w = base.getvector(w)
theta = base.norm(w)
return cls(base.angvec2r(theta, w), check=False)
@classmethod
def Exp(cls, S, check=True, so3=True):
r"""
Create an SO(3) rotation matrix from so(3)
:param S: Lie algebra so(3)
:type S: numpy ndarray
:param check: check that passed matrix is valid so(3), default True
:type check: bool
:return: SO(3) rotation
:rtype: SO3 instance
- ``SO3.Exp(S)`` is an SO(3) rotation defined by its Lie algebra
which is a 3x3 so(3) matrix (skew symmetric)
- ``SO3.Exp(t)`` is an SO(3) rotation defined by a 3-element twist
vector (the unique elements of the so(3) skew-symmetric matrix)
- ``SO3.Exp(T)`` is a sequence of SO(3) rotations defined by an Nx3 matrix
of twist vectors, one per row.
Note:
- if :math:`\theta \eq 0` the result in an identity matrix
- an input 3x3 matrix is ambiguous, it could be the first or third case above. In this
case the parameter `so3` is the decider.
:seealso: :func:`spatialmath.base.transforms3d.trexp`, :func:`spatialmath.base.transformsNd.skew`
"""
if base.ismatrix(S, (-1, 3)) and not so3:
return cls([base.trexp(s, check=check) for s in S], check=False)
else:
return cls(base.trexp(S, check=check), check=False)
# ============================== SE3 =====================================#
class SE3(SO3):
"""
SE(3) matrix class
This subclass represents rigid-body motion in 3D space. Internally it is a
4x4 homogeneous transformation matrix belonging to the group SE(3).
.. inheritance-diagram:: spatialmath.pose3d.SE3
:top-classes: collections.UserList
:parts: 1
"""
def __init__(self, x=None, y=None, z=None, *, check=True):
"""
Construct new SE(3) object
:rtype: SE3 instance
There are multiple call signatures:
- ``SE3()`` is an ``SE3`` instance with one value -- a 4x4 identity
matrix which corresponds to a null motion.
- ``SE3(x, y, z)`` is a pure translation of (x,y,z)
- ``SE3(T)`` is an ``SE3`` instance with the value ``T`` which is a 4x4
numpy array representing an SE(3) matrix. If ``check`` is ``True``
check the matrix belongs to SE(3).
- ``SE3(X)`` is an ``SE3`` instance with the same value as ``X``, ie.
a copy.
- ``SE3([T1, T2, ... TN])`` is an ``SE3`` instance with ``N`` values
given by the elements ``Ti`` each of which is a 4x4 NumPy array
representing an SE(3) matrix. If ``check`` is ``True`` check the
matrix belongs to SE(3).
- ``SE3([X1, X2, ... XN])`` is an ``SE3`` instance with ``N`` values
given by the elements ``Xi`` each of which is an SE3 instance.
:SymPy: supported
"""
if y is None and z is None:
# just one argument passed
if super().arghandler(x, check=check):
return
elif base.isvector(x, 3):
# SE3( [x, y, z] )
self.data = [base.transl(x)]
elif isinstance(x, np.ndarray) and x.shape[1] == 3:
# SE3( Nx3 )
self.data = [base.transl(T) for T in x]
else:
raise ValueError('bad argument to constructor')
elif y is not None and z is not None:
# SE3(x, y, z)
self.data = [base.transl(x, y, z)]
@staticmethod
def _identity():
return np.eye(4)
# ------------------------------------------------------------------------ #
@property
def shape(self):
"""
Shape of the object's internal matrix representation
:return: (4,4)
:rtype: tuple
Each value within the ``SE3`` instance is a NumPy array of this shape.
"""
return (4, 4)
@property
def t(self):
"""
Translational component of SE(3)
:return: translational component of SE(3)
:rtype: numpy.ndarray
``x.t`` is the translational component of ``x`` as an array with
shape (3,). If ``len(x) > 1``, return an array with shape=(N,3).
.. runblock:: pycon
>>> from spatialmath import UnitQuaternion
>>> x = SE3(1,2,3)
>>> x.t
array([1., 2., 3.])
>>> x = SE3([ SE3(1,2,3), SE3(4,5,6)])
>>> x.t
array([[1., 2., 3.],
[4., 5., 6.]])
:SymPy: supported
"""
if len(self) == 1:
return self.A[:3, 3]
else:
return | np.array([x[:3, 3] for x in self.A]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from FUNCS import FNS
# variable class for body frame module
class MapVar:
def __init__(self, ax, limit, origin, ret_size):
self.ax = ax
self.origin = origin
self.center = origin
self.ret_size = ret_size
self.trk_change = 0
self.offset = 0
self.ax.set_xlim(0, limit[0])
self.ax.set_ylim(0, limit[1])
self.ax.set_zlim(0, limit[2])
# target variables
self.target = np.zeros(3)
self.estimate = | np.zeros(3) | numpy.zeros |
# pylint: disable=F841
"""
unit test for GAM
Author: <NAME>
Created on 08/07/2015
"""
import os
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from scipy.linalg import block_diag
import pytest
from statsmodels.tools.linalg import matrix_sqrt
from statsmodels.gam.smooth_basis import (
UnivariatePolynomialSmoother, PolynomialSmoother, BSplines,
GenericSmoothers, UnivariateCubicSplines, CyclicCubicSplines)
from statsmodels.gam.generalized_additive_model import (
GLMGam, LogitGam, make_augmented_matrix, penalized_wls)
from statsmodels.gam.gam_cross_validation.gam_cross_validation import (
MultivariateGAMCV, MultivariateGAMCVPath, _split_train_test_smoothers)
from statsmodels.gam.gam_penalties import (UnivariateGamPenalty,
MultivariateGamPenalty)
from statsmodels.gam.gam_cross_validation.cross_validators import KFold
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families.family import Gaussian
from statsmodels.genmod.generalized_linear_model import lm
sigmoid = np.vectorize(lambda x: 1.0 / (1.0 + np.exp(-x)))
def polynomial_sample_data():
"""A polynomial of degree 4
poly = ax^4 + bx^3 + cx^2 + dx + e
second der = 12ax^2 + 6bx + 2c
integral from -1 to 1 of second der^2 is
(288 a^2)/5 + 32 a c + 8 (3 b^2 + c^2)
the gradient of the integral is der
[576*a/5 + 32 * c, 48*b, 32*a + 16*c, 0, 0]
Returns
-------
poly : smoother instance
y : ndarray
generated function values, demeaned
"""
n = 10000
x = np.linspace(-1, 1, n)
y = 2 * x ** 3 - x
y -= y.mean()
degree = [4]
pol = PolynomialSmoother(x, degree)
return pol, y
def integral(params):
d, c, b, a = params
itg = (288 * a ** 2) / 5 + (32 * a * c) + 8 * (3 * b ** 2 + c ** 2)
itg /= 2
return itg
def grad(params):
d, c, b, a = params
grd = np.array([576 * a / 5 + 32 * c, 48 * b, 32 * a + 16 * c, 0])
grd = grd[::-1]
return grd / 2
def hessian(params):
hess = np.array([[576 / 5, 0, 32, 0],
[0, 48, 0, 0],
[32, 0, 16, 0],
[0, 0, 0, 0]
])
return hess / 2
def cost_function(params, pol, y, alpha):
# this should be the MSE or log likelihood value
lin_pred = np.dot(pol.basis, params)
gaussian = Gaussian()
expval = gaussian.link.inverse(lin_pred)
loglike = gaussian.loglike(y, expval)
# this is the vale of the GAM penalty. For the example polynomial
itg = integral(params)
# return the cost function of the GAM for the given polynomial
return loglike - alpha * itg, loglike, itg
def test_gam_penalty():
"""
test the func method of the gam penalty
:return:
"""
pol, y = polynomial_sample_data()
univ_pol = pol.smoothers[0]
alpha = 1
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=univ_pol)
for _ in range(10):
params = np.random.randint(-2, 2, 4)
gp_score = gp.func(params)
itg = integral(params)
assert_allclose(gp_score, itg, atol=1.e-1)
def test_gam_gradient():
# test the gam gradient for the example polynomial
np.random.seed(1)
pol, y = polynomial_sample_data()
alpha = 1
smoother = pol.smoothers[0]
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=smoother)
for _ in range(10):
params = np.random.uniform(-2, 2, 4)
params = np.array([1, 1, 1, 1])
gam_grad = gp.deriv(params)
grd = grad(params)
assert_allclose(gam_grad, grd, rtol=1.e-2, atol=1.e-2)
def test_gam_hessian():
# test the deriv2 method of the gam penalty
np.random.seed(1)
pol, y = polynomial_sample_data()
univ_pol = pol.smoothers[0]
alpha = 1
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=univ_pol)
for _ in range(10):
params = np.random.randint(-2, 2, 5)
gam_der2 = gp.deriv2(params)
hess = hessian(params)
hess = np.flipud(hess)
hess = np.fliplr(hess)
assert_allclose(gam_der2, hess, atol=1.e-13, rtol=1.e-3)
def test_approximation():
np.random.seed(1)
poly, y = polynomial_sample_data()
alpha = 1
for _ in range(10):
params = np.random.uniform(-1, 1, 4)
cost, err, itg = cost_function(params, poly, y, alpha)
glm_gam = GLMGam(y, smoother=poly, alpha=alpha)
# TODO: why do we need pen_weight=1
gam_loglike = glm_gam.loglike(params, scale=1, pen_weight=1)
assert_allclose(err - itg, cost, rtol=1e-10)
assert_allclose(gam_loglike, cost, rtol=0.1)
def test_gam_glm():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
df = [10]
degree = [3]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
y_mgcv = np.asarray(data_from_r.y_est)
alpha = 0.1 # chosen by trial and error
glm_gam = GLMGam(y, smoother=bsplines, alpha=alpha)
res_glm_gam = glm_gam.fit(method='bfgs', max_start_irls=0,
disp=1, maxiter=10000)
y_gam0 = np.dot(bsplines.basis, res_glm_gam.params)
y_gam = np.asarray(res_glm_gam.fittedvalues)
assert_allclose(y_gam, y_gam0, rtol=1e-10)
# plt.plot(x, y_gam, '.', label='gam')
# plt.plot(x, y_mgcv, '.', label='mgcv')
# plt.plot(x, y, '.', label='y')
# plt.legend()
# plt.show()
assert_allclose(y_gam, y_mgcv, atol=1.e-2)
def test_gam_discrete():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.ybin.values
df = [10]
degree = [5]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
y_mgcv = data_from_r.ybin_est
alpha = 0.00002
# gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=bsplines)
# lg_gam = LogitGam(y, bsplines.basis, penal=gp)
#
lg_gam = LogitGam(y, bsplines, alpha=alpha)
res_lg_gam = lg_gam.fit(maxiter=10000)
y_gam = np.dot(bsplines.basis, res_lg_gam.params)
y_gam = sigmoid(y_gam)
y_mgcv = sigmoid(y_mgcv)
# plt.plot(x, y_gam, label='gam')
# plt.plot(x, y_mgcv, label='mgcv')
# plt.plot(x, y, '.', label='y')
# plt.ylim(-0.4, 1.4)
# plt.legend()
# plt.show()
assert_allclose(y_gam, y_mgcv, rtol=1.e-10, atol=1.e-1)
def multivariate_sample_data(seed=1):
n = 1000
x1 = np.linspace(-1, 1, n)
x2 = np.linspace(-10, 10, n)
x = np.vstack([x1, x2]).T
np.random.seed(seed)
y = x1 * x1 * x1 + x2 + np.random.normal(0, 0.01, n)
degree1 = 4
degree2 = 3
degrees = [degree1, degree2]
pol = PolynomialSmoother(x, degrees)
return x, y, pol
def test_multivariate_penalty():
alphas = [1, 2]
weights = [1, 1]
np.random.seed(1)
x, y, pol = multivariate_sample_data()
univ_pol1 = UnivariatePolynomialSmoother(x[:, 0], degree=pol.degrees[0])
univ_pol2 = UnivariatePolynomialSmoother(x[:, 1], degree=pol.degrees[1])
gp1 = UnivariateGamPenalty(alpha=alphas[0], univariate_smoother=univ_pol1)
gp2 = UnivariateGamPenalty(alpha=alphas[1], univariate_smoother=univ_pol2)
with pytest.warns(UserWarning, match="weights is currently ignored"):
mgp = MultivariateGamPenalty(multivariate_smoother=pol, alpha=alphas,
weights=weights)
for i in range(10):
params1 = np.random.randint(-3, 3, pol.smoothers[0].dim_basis)
params2 = np.random.randint(-3, 3, pol.smoothers[1].dim_basis)
params = np.concatenate([params1, params2])
c1 = gp1.func(params1)
c2 = gp2.func(params2)
c = mgp.func(params)
assert_allclose(c, c1 + c2, atol=1.e-10, rtol=1.e-10)
d1 = gp1.deriv(params1)
d2 = gp2.deriv(params2)
d12 = np.concatenate([d1, d2])
d = mgp.deriv(params)
assert_allclose(d, d12)
h1 = gp1.deriv2(params1)
h2 = gp2.deriv2(params2)
h12 = block_diag(h1, h2)
h = mgp.deriv2(params)
assert_allclose(h, h12)
def test_generic_smoother():
x, y, poly = multivariate_sample_data()
alphas = [0.4, 0.7]
weights = [1, 1] # noqa: F841
gs = GenericSmoothers(poly.x, poly.smoothers)
gam_gs = GLMGam(y, smoother=gs, alpha=alphas)
gam_gs_res = gam_gs.fit()
gam_poly = GLMGam(y, smoother=poly, alpha=alphas)
gam_poly_res = gam_poly.fit()
assert_allclose(gam_gs_res.params, gam_poly_res.params)
def test_multivariate_gam_1d_data():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y
df = [10]
degree = [3]
bsplines = BSplines(x, degree=degree, df=df)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
y_mgcv = data_from_r.y_est
# alpha is by manually adjustment to reduce discrepancy in fittedvalues
alpha = [0.0168 * 0.0251 / 2 * 500]
gp = MultivariateGamPenalty(bsplines, alpha=alpha) # noqa: F841
glm_gam = GLMGam(y, exog=np.ones((len(y), 1)), smoother=bsplines,
alpha=alpha)
# "nm" converges to a different params, "bfgs" params are close to pirls
# res_glm_gam = glm_gam.fit(method='nm', max_start_irls=0,
# disp=1, maxiter=10000, maxfun=5000)
res_glm_gam = glm_gam.fit(method='pirls', max_start_irls=0,
disp=1, maxiter=10000)
y_gam = res_glm_gam.fittedvalues
# plt.plot(x, y_gam, '.', label='gam')
# plt.plot(x, y_mgcv, '.', label='mgcv')
# plt.plot(x, y, '.', label='y')
# plt.legend()
# plt.show()
assert_allclose(y_gam, y_mgcv, atol=0.01)
def test_multivariate_gam_cv():
# SMOKE test
# no test is performed. It only checks that there is not any runtime error
def cost(x1, x2):
return np.linalg.norm(x1 - x2) / len(x1)
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
df = [10]
degree = [5]
bsplines = BSplines(x, degree=degree, df=df)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
alphas = [0.0251]
alphas = [2]
cv = KFold(3)
gp = MultivariateGamPenalty(bsplines, alpha=alphas) # noqa: F841
gam_cv = MultivariateGAMCV(smoother=bsplines, alphas=alphas, gam=GLMGam,
cost=cost, endog=y, exog=None, cv_iterator=cv)
gam_cv_res = gam_cv.fit() # noqa: F841
def test_multivariate_gam_cv_path():
def sample_metric(y1, y2):
return np.linalg.norm(y1 - y2) / len(y1)
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
se_from_mgcv = data_from_r.y_est_se # noqa: F841
y_mgcv = data_from_r.y_mgcv_gcv # noqa: F841
df = [10]
degree = [6]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
gam = GLMGam
alphas = [np.linspace(0, 2, 10)]
k = 3
cv = KFold(k_folds=k, shuffle=True)
# Note: kfold cv uses random shuffle
np.random.seed(123)
gam_cv = MultivariateGAMCVPath(smoother=bsplines, alphas=alphas, gam=gam,
cost=sample_metric, endog=y, exog=None,
cv_iterator=cv)
gam_cv_res = gam_cv.fit() # noqa: F841
glm_gam = GLMGam(y, smoother=bsplines, alpha=gam_cv.alpha_cv)
res_glm_gam = glm_gam.fit(method='irls', max_start_irls=0,
disp=1, maxiter=10000)
y_est = res_glm_gam.predict(bsplines.basis)
# plt.plot(x, y, '.', label='y')
# plt.plot(x, y_est, '.', label='y est')
# plt.plot(x, y_mgcv, '.', label='y mgcv')
# plt.legend()
# plt.show()
# The test compares to result obtained with GCV and not KFOLDS CV.
# This is because MGCV does not support KFOLD CV
assert_allclose(data_from_r.y_mgcv_gcv, y_est, atol=1.e-1, rtol=1.e-1)
# Note: kfold cv uses random shuffle
np.random.seed(123)
alpha_cv, res_cv = glm_gam.select_penweight_kfold(alphas=alphas, k_folds=3)
assert_allclose(alpha_cv, gam_cv.alpha_cv, rtol=1e-12)
def test_train_test_smoothers():
n = 6
x = np.zeros(shape=(n, 2))
x[:, 0] = range(6)
x[:, 1] = range(6, 12)
poly = PolynomialSmoother(x, degrees=[3, 3])
train_index = list(range(3))
test_index = list(range(3, 6))
train_smoother, test_smoother = _split_train_test_smoothers(poly.x, poly,
train_index,
test_index)
expected_train_basis = [[0., 0., 0., 6., 36., 216.],
[1., 1., 1., 7., 49., 343.],
[2., 4., 8., 8., 64., 512.]]
assert_allclose(train_smoother.basis, expected_train_basis)
expected_test_basis = [[3., 9., 27., 9., 81., 729.],
[4., 16., 64., 10., 100., 1000.],
[5., 25., 125., 11., 121., 1331.]]
assert_allclose(test_smoother.basis, expected_test_basis)
def test_get_sqrt():
n = 1000
np.random.seed(1)
x = np.random.normal(0, 1, (n, 3))
x2 = np.dot(x.T, x)
sqrt_x2 = matrix_sqrt(x2)
x2_reconstruction = np.dot(sqrt_x2.T, sqrt_x2)
assert_allclose(x2_reconstruction, x2)
def test_make_augmented_matrix():
np.random.seed(1)
n = 500
x = np.random.uniform(-1, 1, (n, 3))
s = np.dot(x.T, x)
y = np.array(list(range(n)))
w = np.random.uniform(0, 1, n)
nobs, n_columns = x.shape
# matrix_sqrt removes redundant rows,
# if alpha is zero, then no augmentation is needed
alpha = 0
aug_y, aug_x, aug_w = make_augmented_matrix(y, x, alpha * s, w)
expected_aug_x = x
assert_allclose(aug_x, expected_aug_x)
expected_aug_y = y
expected_aug_y[:nobs] = y
assert_allclose(aug_y, expected_aug_y)
expected_aug_w = w
assert_allclose(aug_w, expected_aug_w)
alpha = 1
aug_y, aug_x, aug_w = make_augmented_matrix(y, x, s, w)
rs = matrix_sqrt(alpha * s)
# alternative version to matrix_sqrt using cholesky is not available
# rs = sp.linalg.cholesky(alpha * s)
assert_allclose(np.dot(rs.T, rs), alpha * s)
expected_aug_x = np.vstack([x, rs])
assert_allclose(aug_x, expected_aug_x)
expected_aug_y = np.zeros(shape=(nobs + n_columns,))
expected_aug_y[:nobs] = y
assert_allclose(aug_y, expected_aug_y)
expected_aug_w = np.concatenate((w, [1] * n_columns), axis=0)
assert_allclose(aug_w, expected_aug_w)
def test_penalized_wls():
np.random.seed(1)
n = 20
p = 3
x = np.random.normal(0, 1, (n, 3))
y = x[:, 1] - x[:, 2] + np.random.normal(0, .1, n)
y -= y.mean()
weights = np.ones(shape=(n,))
s = np.random.normal(0, 1, (p, p))
pen_wls_res = penalized_wls(y, x, 0 * s, weights)
ls_res = lm.OLS(y, x).fit()
assert_allclose(ls_res.params, pen_wls_res.params)
def test_cyclic_cubic_splines():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results",
"cubic_cyclic_splines_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
x = data_from_r[['x0', 'x2']].values
y = data_from_r['y'].values
y_est_mgcv = data_from_r[['y_est']].values # noqa: F841
s_mgcv = data_from_r[['s(x0)', 's(x2)']].values
dfs = [10, 10]
ccs = CyclicCubicSplines(x, df=dfs)
alpha = [0.05 / 2, 0.0005 / 2]
# TODO: if alpha changes in pirls this should be updated
gam = GLMGam(y, smoother=ccs, alpha=alpha)
gam_res = gam.fit(method='pirls')
s0 = np.dot(ccs.basis[:, ccs.mask[0]],
gam_res.params[ccs.mask[0]])
# TODO: Mean has to be removed
# removing mean could be replaced by options for intercept handling
s0 -= s0.mean()
s1 = np.dot(ccs.basis[:, ccs.mask[1]],
gam_res.params[ccs.mask[1]])
s1 -= s1.mean() # TODO: Mean has to be removed
# plt.subplot(2, 1, 1)
# plt.plot(x[:, 0], s0, '.', label='s0')
# plt.plot(x[:, 0], s_mgcv[:, 0], '.', label='s0_mgcv')
# plt.legend(loc='best')
#
# plt.subplot(2, 1, 2)
# plt.plot(x[:, 1], s1, '.', label='s1_est')
# plt.plot(x[:, 1], s_mgcv[:, 1], '.', label='s1_mgcv')
# plt.legend(loc='best')
# plt.show()
assert_allclose(s0, s_mgcv[:, 0], atol=0.02)
| assert_allclose(s1, s_mgcv[:, 1], atol=0.33) | numpy.testing.assert_allclose |
"""
Author: <NAME>, <NAME>
Email: <EMAIL>, <EMAIL>
The code is adapted from
https://github.com/AtsushiSakai/PythonRobotics/tree/master/
PathTracking/model_predictive_speed_and_steer_control
"""
import numpy as np
import cvxpy
from cvxpy.expressions import constants
from pylot.control.mpc.utils import compute_curvature, Vehicle, Trajectory
class ModelPredictiveController:
def __init__(self, config):
self.reference = Trajectory(**config['reference'])
self.vehicle = Vehicle(config['vehicle'])
self.path_length = len(self.reference.s_list)
self.path_index = 0
self.t = 0.0 # [s]
initial_condition = {
't_list': [self.t], # Initial time [s]
's_list': self.reference.s_list[0:1], # Initial arc distance [m]
'x_list': self.reference.x_list[0:1], # Initial X coordinate [m]
'y_list': self.reference.y_list[0:1], # Initial Y coordinate [m]
'k_list': self.reference.k_list[0:1], # Initial curvature [1/m]
'vel_list': self.reference.vel_list[0:1], # Initial velocity [m/s]
'yaw_list': self.reference.yaw_list[0:1], # Initial yaw [rad]
'accel_list': np.asarray([]), # Initial acceleration [m/s2]
'steer_list': | np.asarray([]) | numpy.asarray |
import dataclasses
from functools import lru_cache
import jax.numpy as jnp
import numpy as np
import scipy.sparse as sp
from .typing import Size, Size3, Spacing, Optional, List, Union, Dict, Op, Tuple
from .utils import curl_fn, yee_avg, fix_dataclass_init_docs, Box
try:
DPHOX_IMPORTED = True
from dphox.pattern import Pattern
except ImportError:
DPHOX_IMPORTED = False
@fix_dataclass_init_docs
@dataclasses.dataclass
class Port:
"""Port to define where sources and measurements lie in photonic simulations.
A port defines the center and angle/orientation in a design.
Args:
x: x position of the port
y: y position of the port
a: angle (orientation) of the port (in degrees)
w: the width of the port (specified in design, mostly used for simulation)
z: z position of the port (not specified in design, mostly used for simulation)
h: the height of the port (not specified in design, mostly used for simulation)
"""
x: float
y: float
a: float = 0
w: float = 0
z: float = 0
h: float = 0
def __post_init__(self):
self.xy = (self.x, self.y)
self.xya = (self.x, self.y, self.a)
self.xyz = (self.x, self.y, self.z)
self.center = np.array(self.xyz)
@property
def size(self):
if np.mod(self.a, 90) != 0:
raise ValueError(f"Require angle to be a multiple a multiple of 90 but got {self.a}")
return np.array((self.w, 0, self.h)) if np.mod(self.a, 180) != 0 else np.array((0, self.w, self.h))
class Grid:
def __init__(self, size: Size, spacing: Spacing, eps: Union[float, np.ndarray] = 1.0):
"""Grid object accomodating any electromagnetic simulation (FDFD, FDTD, BPM, etc.)
Args:
size: Tuple of size 1, 2, or 3 representing the size of the grid
spacing: Spacing (microns) between each pixel along each axis (must be same dim as `grid_shape`)
eps: Relative permittivity (
"""
self.size = np.asarray(size)
self.spacing = spacing * np.ones(len(size)) if isinstance(spacing, int) or isinstance(spacing, float) else np.asarray(spacing)
self.ndim = len(size)
if not self.ndim == self.spacing.size:
raise AttributeError(f'Require size.size == ndim == spacing.size but got '
f'{self.size.size} != {self.spacing.size}')
self.shape = np.around(self.size / self.spacing).astype(int)
self.shape3 = np.hstack((self.shape, np.ones((3 - self.ndim,), dtype=self.shape.dtype)))
self.spacing3 = np.hstack((self.spacing, np.ones((3 - self.ndim,), dtype=self.spacing.dtype) * np.inf))
self.size3 = np.hstack((self.size, np.zeros((3 - self.ndim,), dtype=self.size.dtype)))
self.center = self.size3 / 2
self.field_shape = (3, *self.shape3)
self.n = np.prod(self.shape)
self.eps: np.ndarray = np.ones(self.shape) * eps if not isinstance(eps, np.ndarray) else eps
if not tuple(self.shape) == self.eps.shape:
raise AttributeError(f'Require grid.shape == eps.shape but got '
f'{self.shape} != {self.eps.shape}')
self.cells = [(self.spacing[i] * np.ones((self.shape[i],)) if self.ndim > 1 else self.spacing * np.ones(self.shape))
if i < self.ndim else np.ones((1,)) for i in range(3)]
self.pos = [np.hstack((0, np.cumsum(dx))) if dx.size > 1 else np.asarray((0,)) for dx in self.cells]
self.components = []
# used to handle special functions of waveguide-based components
self.port: Dict[str, Port] = {}
def fill(self, height: float, eps: float) -> "Grid":
"""Fill grid up to `height`, typically used for substrate + cladding epsilon settings
Args:
height: Maximum final dimension of the fill operation (`y` if 2D, `z` if 3D).
eps: Relative permittivity to fill.
Returns:
The modified :code:`Grid` for chaining (:code:`self`)
"""
if height > 0:
self.eps[..., :int(height / self.spacing[-1])] = eps
else:
self.eps = np.ones_like(self.eps) * eps
return self
def add(self, component: "Pattern", eps: float, zmin: float = None, thickness: float = None) -> "Grid":
"""Add a component to the grid.
Args:
component: component to add
eps: permittivity of the component being added (isotropic only, for now)
zmin: minimum z extent of the component
thickness: component thickness (`zmax = zmin + thickness`)
Returns:
The modified :code:`Grid` for chaining (:code:`self`)
"""
b = component.bounds
if not b[0] >= 0 and b[1] >= 0 and b[2] <= self.size[0] and b[3] <= self.size[1]:
raise ValueError('The pattern must have min x, y >= 0 and max x, y less than size.')
self.components.append(component)
mask = component.mask(self.shape[:2], self.spacing)[:self.eps.shape[0], :self.eps.shape[1]]
if self.ndim == 2:
self.eps[mask == 1] = eps
else:
zidx = (int(zmin / self.spacing[0]), int((zmin + thickness) / self.spacing[1]))
self.eps[mask == 1, zidx[0]:zidx[1]] = eps
self.port = {port_name: Port(*port.xya, port.w, zmin + thickness / 2, thickness)
for port_name, port in component.port.items()}
return self
def set_eps(self, center: Size3, size: Size3, eps: float):
"""Set the region specified by :code:`center`, :code:`size` (in grid units) to :code:`eps`.
Args:
center: Center of the region.
size: Size of the region.
eps: Epsilon (relative permittivity) to set.
Returns:
The modified :code:`Grid` for chaining (:code:`self`)
"""
s = self.slice(center, size, squeezed=True)
eps_3d = self.eps.reshape(self.shape3)
eps_3d[s] = eps
self.eps = eps_3d.squeeze()
return self
def mask(self, center: Size3, size: Size3):
"""Given a size and center, this function defines a mask which sets pixels in the region corresponding to
:code:`center` and :code:`size` to 1 and all other pixels to zero.
Args:
center: position of the mask in (x, y, z) in the units of the simulation (note: NOT in terms of array index)
size: size of the mask box in (x, y, z) in the units of the simulation (note: NOT in terms of array index)
Returns:
The mask array of size :code:`grid.shape`.
"""
s = self.slice(center, size, squeezed=True)
mask = np.zeros(self.shape3)
mask[s] = 1
return mask.squeeze()
def reshape(self, v: np.ndarray) -> np.ndarray:
"""A simple method to reshape flat 3d field array into the grid shape
Args:
v: vector of size :code:`3n` to rearrange into array of size :code:`(3, nx, ny, nz)`
Returns:
The reshaped array
"""
return v.reshape((3, *self.shape3))
def slice(self, center: Size3, size: Size3, squeezed: bool = True):
"""Pick a slide of this grid
Args:
center: center of the slice in (x, y, z) in the units of the simulation (note: NOT in terms of array index)
size: size of the slice in (x, y, z) in the units of the simulation (note: NOT in terms of array index)
squeezed: whether to squeeze the slice to the minimum dimension (the squeeze order is z, then y).
Returns:
The slices to access the array
"""
# if self.ndim == 1:
# raise ValueError(f"Simulation dimension ndim must be 2 or 3 but got {self.ndim}.")
if not len(size) == 3:
raise ValueError(f"For simulation that is 3d, must provide size arraylike of size 3 but got {size}")
if not len(center) == 3:
raise ValueError(f"For simulation that is 3d, must provide center arraylike of size 3 but got {center}")
c = np.around(np.asarray(center) / self.spacing3).astype(int) # assume isotropic for now...
shape = np.around(np.asarray(size) / self.spacing3).astype(int)
s0, s1, s2 = shape[0] // 2, shape[1] // 2, shape[2] // 2
c0 = c[0] if squeezed else slice(c[0], c[0] + 1)
c1 = c[1] if squeezed else slice(c[1], c[1] + 1)
c2 = c[2] if squeezed else slice(c[2], c[2] + 1)
# if s0 == s1 == s2 == 0:
# raise ValueError(f"Require the size result in a nonzero-sized shape, but got a single point in the grid"
# f"(i.e., the size {size} may be less than the spacing {self.spacing3})")
return (slice(c[0] - s0, c[0] - s0 + shape[0]) if shape[0] > 0 else c0,
slice(c[1] - s1, c[1] - s1 + shape[1]) if shape[1] > 0 else c1,
slice(c[2] - s2, c[2] - s2 + shape[2]) if shape[2] > 0 else c2)
def view_fn(self, center: Size3, size: Size3, use_jax: bool = True):
"""Return a function that views a field at specific region.
The view function is specified by center and size in the grid. This is typically used for
mode-based sources and measurements. Once a slice is found, the fields need to be reoriented
such that the field components point in the right direction despite a change in axis assignment.
This function will handle this logic automatically in 1d, 2d, and 3d cases.
Args:
center: Center of the region
size: Size of the region
use_jax: Use jax
Returns:
A view callable function that orients the field and finds the appropriate slice.
"""
if np.count_nonzero(size) == 3:
raise ValueError(f"At least one element of size must be zero, but got {size}")
s = self.slice(center, size, squeezed=False)
xp = jnp if use_jax else np
# Find the view axis (the poynting direction)
view_axis = 0
for i in range(self.ndim):
if size[i] == 0:
view_axis = i
# Find the reorientation of field axes based on view_axis
# 0 -> (1, 2, 0)
# 1 -> (0, 2, 1)
# 2 -> (0, 1, 2)
axes = [
np.asarray((1, 2, 0), dtype=int),
np.asarray((0, 2, 1), dtype=int),
np.asarray((0, 1, 2), dtype=int)
][view_axis]
def view(field):
oriented_field = xp.stack(
(field[axes[0]].reshape(self.shape3),
field[axes[1]].reshape(self.shape3),
field[axes[2]].reshape(self.shape3))
) # orient the field by axis (useful for mode calculation)
return oriented_field[:, s[0], s[1], s[2]].transpose((0, *tuple(1 + axes)))
return view
def mask_fn(self, size: Size3, center: Optional[Size3] = None):
"""Given a box with :code:`size` and :code:`center`, return a function that sets pixels in :code:`rho`,
where :code:`rho.shape == grid.eps.shape`, outside the box to :code:`eps`.
This is important in inverse design to avoid modifying the material region near the source and measurement
regions.
Args:
center: position of the mask in (x, y, z) in the units of the simulation (note: NOT in terms of array index)
size: size of the mask box in (x, y, z) in the units of the simulation (note: NOT in terms of array index)
Returns:
The mask function
"""
rho_init = self.eps
center = self.center if center is None else center
mask = self.mask(center, size)
return lambda rho: jnp.array(rho_init) * (1 - mask) + rho * mask
def block_design(self, waveguide: Box, wg_height: Optional[float] = None, sub_eps: float = 1,
sub_height: float = 0, gap: float = 0, block: Optional[Box] = None, sep: Size = (0, 0),
vertical: bool = False, rib_y: float = 0):
"""A helper function for designing a useful port or cross section for a mode solver.
Args:
waveguide: The base waveguide material and size in the form of :code:`Box`.
wg_height: The waveguide height.
sub_eps: The substrate epsilon (defaults to air)
sub_height: The height of the substrate (or the min height of the waveguide built on top of it)
gap: The coupling gap specified means we get a pair of base blocks
separated by :code:`coupling_gap`.
block: Perturbing block that is to be aligned either vertically or horizontally with waveguide (MEMS).
sep: Separation of the block from the base waveguide layer.
vertical: Whether the perturbing block moves vertically, or laterally otherwise.
rib_y: Rib section.
Returns:
The resulting :code:`Grid` with the modified :code:`eps` property.
"""
if rib_y > 0:
self.fill(rib_y + sub_height, waveguide.eps)
self.fill(sub_height, sub_eps)
waveguide.align(self.center)
if wg_height:
waveguide.valign(wg_height)
else:
wg_height = waveguide.min[1]
sep = (sep, sep) if not isinstance(sep, Tuple) else sep
d = gap / 2 + waveguide.size[0] / 2 if gap > 0 else 0
waveguides = [waveguide.copy.translate(-d), waveguide.copy.translate(d)]
blocks = []
if vertical:
blocks = [block.copy.align(waveguides[0]).valign(waveguides[0]).translate(dy=sep[0]),
block.copy.align(waveguides[1]).valign(waveguides[1]).translate(dy=sep[1])]
elif block is not None:
blocks = [block.copy.valign(wg_height).halign(waveguides[0], left=False).translate(-sep[0]),
block.copy.valign(wg_height).halign(waveguides[1]).translate(sep[1])]
for wg in waveguides + blocks:
self.set_eps((wg.center[0], wg.center[1], 0), (wg.size[0], wg.size[1], 0), wg.eps)
return self
class YeeGrid(Grid):
def __init__(self, size: Size, spacing: Spacing, eps: Union[float, np.ndarray] = 1,
bloch_phase: Union[Size, float] = 0.0, pml: Optional[Size] = None, pml_sep: int = 5,
pml_params: Size3 = (4, -16, 1.0), name: str = 'simgrid'):
"""The base :code:`YeeGrid` class (adding things to :code:`Grid` like Yee grid support, Bloch phase,
PML shape, etc.).
Args:
size: Tuple of size 1, 2, or 3 representing the size of the grid
spacing: Spacing (microns) between each pixel along each axis (must be same dim as `grid_shape`)
eps: Relative permittivity :math:`\\epsilon_r`
bloch_phase: Bloch phase (generally useful for angled scattering sims)
pml: Perfectly matched layer (PML) of thickness on both sides of the form :code:`(x_pml, y_pml, z_pml)`
pml_sep: Specifies the number of pixels that any source must be placed away from a PML region.
pml_params: The parameters of the form :code:`(exp_scale, log_reflectivity, pml_eps)`.
"""
super(YeeGrid, self).__init__(size, spacing, eps)
self.pml = pml
self.pml_sep = pml_sep
self.pml_shape = pml if pml is None else (np.asarray(pml, dtype=float) / self.spacing).astype(np.int)
self.pml_params = pml_params
self.name = name
if self.pml_shape is not None:
if np.any(self.pml_shape <= 3) or | np.any(self.pml_shape >= self.shape // 2) | numpy.any |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import itertools
import os
import logging
from collections import OrderedDict
import string
import numpy as np
import contextlib
import torch
logger = logging.getLogger(__name__)
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
data_utils,
encoders,
indexed_dataset,
CatLanguagePairDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
TokenBlockDataset,
RoundRobinZipDatasets,
LanguageClozeDataset,
CvtClozeDataset,
FewSubsampleDataset,
)
from fairseq.data.language_cvt_dataset import collate_dyn_src_tokens, collate_dyn_targets
from fairseq.models import FairseqMultiModel
from .translation import TranslationTask
from . import FairseqTask, register_task
from .translation_from_pretrained_bart import TranslationFromPretrainedBARTTask
from fairseq import options
from fairseq import utils
def load_langpair_dataset(
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
left_pad_source, left_pad_target, max_source_positions,
max_target_positions, prepend_bos=False, load_alignments=False,
truncate_source=False, append_source_id=False,
num_buckets=0, mono=None,
blocks=False,
categories=False,
cat_dict=None,
process_target=True,
args=None,
task_type=None,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
end_token_src = None
end_token_tgt = None
if append_source_id:
if mono is not None:
end_token_tgt = tgt_dict.index('[{}]'.format(mono))
end_token_src = src_dict.index('[{}]'.format(mono))
else:
end_token_tgt = tgt_dict.index("[{}]".format(tgt))
end_token_src = src_dict.index('[{}]'.format(src))
src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
if blocks:
# create continuous blocks of tokens
src_dataset = TokenBlockDataset(
src_dataset,
src_dataset.sizes,
50000, # one less for <s>
pad=src_dict.pad(),
eos=end_token_src,
break_mode="complete_doc", document_sep_len=1,
)
reserved_on_truncation = 1
reserved_on_truncation += 1 if prepend_bos else 0
reserved_on_truncation += 1 if append_source_id else 0
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - reserved_on_truncation,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = None
if process_target:
tgt_dataset = data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)
if blocks:
tgt_dataset = TokenBlockDataset(
tgt_dataset,
tgt_dataset.sizes,
10000, # one less for <s>
pad=tgt_dict.pad(),
eos=end_token_tgt,
break_mode="complete_doc", document_sep_len=1,
)
print(tgt_dataset)
print('blocks', blocks, len(src_datasets) ,len(tgt_datasets))
if tgt_dataset is not None:
#if truncate_source:
# tgt_dataset = AppendTokenDataset(
# TruncateDataset(
# StripTokenDataset(tgt_dataset, tgt_dict.eos()),
# max_target_positions - reserved_on_truncation,
# ),
# tgt_dict.eos(),
# )
tgt_datasets.append(tgt_dataset)
logger.info('{} {} {}-{} {} examples'.format(
data_path, split_k, src, tgt, len(src_datasets[-1])
))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
# havent checked for this (I wont use it), so flag if comming here
raise NotImplementedError
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
if mono is not None:
src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(mono)))
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(mono)))
eos = tgt_dict.index('[{}]'.format(mono))
elif 'cvt' in task_type :
src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))
#eos = src_dict.index('[{}]'.format(src))
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))
eos = tgt_dict.index('[{}]'.format(tgt))
else:
src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))
eos = tgt_dict.index('[{}]'.format(tgt))
print('*\t define eos: ', eos)
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
### do subsample and prepare few instances for few shot fine-tuning
if args.cvt_few > 0 :
assert args.cvt_few > 0, "For cvt few ratio should be given"
assert args.cvt_few_ratio <= 1
ratio = args.cvt_few_ratio if task_type else args.cvt_mono_ratio
few = (np.ceil(args.cvt_few / 3).astype(int) if split=='valid' \
else (np.ceil(args.cvt_few * (2/3))).astype(int)) \
if task_type else None
# assume that src/tgt must have same nb!:
if task_type=='cvt':
if split=='valid': #if valid total size and few inst should be same
actual_size = | np.ceil(args.cvt_few / 3) | numpy.ceil |
# -*- coding: utf-8 -*-
import numpy as np
def load_streamflow(path):
"""load streamflow into memory
Args:
path (str|DataFrame): path of streamflow csv file, or pandas DataFrame
Returns:
tuple: (date of np.datetime64, streamflow of float)
"""
if isinstance(path, str):
date, Q = np.loadtxt(
path,
delimiter=",",
skiprows=1,
unpack=True,
dtype=[("date", "datetime64[D]"), ("Q", float)],
converters={0: np.datetime64},
encoding="utf8",
)
year = date.astype("datetime64[Y]").astype(int) + int(
str(np.datetime64(0, "Y"))
)
month = date.astype("datetime64[M]").astype(int) % 12 + 1
day = (date - date.astype("datetime64[M]")).astype(int) + 1
date = np.rec.fromarrays(
[year, month, day], dtype=[("Y", "i4"), ("M", "i4"), ("D", "i4")]
)
else:
df_date = path.iloc[:, 0].astype("datetime64")
date = np.rec.fromarrays(
[df_date.dt.year, df_date.dt.month, df_date.dt.day],
dtype=[("Y", "i4"), ("M", "i4"), ("D", "i4")],
)
Q = path.iloc[:, 1].values.astype(float)
return clean_streamflow(date, Q)
def clean_streamflow(date, Q):
Q[np.isnan(Q)] = 0
Q = np.abs(Q)
year = date["Y"]
year_unique = np.unique(year)
year_delete = clean_streamflow_jit(year, year_unique, Q)
idx_delete = np.isin(year, year_delete)
return Q[~idx_delete], date[~idx_delete]
def clean_streamflow_jit(year, year_unique, Q):
year_delete = []
for y in year_unique:
if (Q[year == y] >= 0).sum() < 120:
year_delete.append(y)
return year_delete
def moving_average(x, w):
res = np.convolve(x, np.ones(w)) / w
return res[w - 1 : -w + 1]
def multi_arange_steps(starts, stops, steps):
pos = 0
cnt = np.sum((stops - starts + steps - np.sign(steps)) // steps, dtype=np.int64)
res = np.zeros((cnt,), dtype=np.int64)
for i in range(starts.size):
v, stop, step = starts[i], stops[i], steps[i]
if step > 0:
while v < stop:
res[pos] = v
pos += 1
v += step
elif step < 0:
while v > stop:
res[pos] = v
pos += 1
v += step
assert pos == cnt
return res
def multi_arange(starts, stops):
pos = 0
cnt = np.sum(stops - starts, dtype=np.int64)
res = np.zeros((cnt,), dtype=np.int64)
for i in range(starts.size):
num = stops[i] - starts[i]
res[pos : pos + num] = np.arange(starts[i], stops[i])
pos += num
return res
def NSE(Q_obs, Q_sim):
SS_res = np.sum( | np.square(Q_obs - Q_sim) | numpy.square |
import os
import gzip
import shutil
import cv2
import numpy as np
import SimpleITK
class ImageParser():
def __init__(self, path_utrech='../Utrecht/subjects',
path_singapore='../Singapore/subjects',
path_amsterdam='../GE3T/subjects'):
self.path_utrech = path_utrech
self.path_singapore = path_singapore
self.path_amsterdam = path_amsterdam
def get_all_image_paths(self):
paths = []
for root, dirs, files in os.walk('../'):
for file in files:
filepath = root + '/' + file
if file.endswith('.gz') and file[:-3] not in files:
with gzip.open(filepath, 'rb') as f_in:
with open(filepath[:-3], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if file.startswith('brain') and file.endswith('.nii'):
paths.append(filepath)
return paths
def get_all_images_and_labels(self):
utrech_dataset = self.get_images_and_labels(self.path_utrech)
singapore_dataset = self.get_images_and_labels(self.path_singapore)
amsterdam_dataset = self.get_images_and_labels(self.path_amsterdam)
return utrech_dataset, singapore_dataset, amsterdam_dataset
def get_images_and_labels(self, path):
full_dataset = []
data_and_labels = {}
package_limit = 8
for root, dirs, files in os.walk(path):
for file in files:
filepath = os.path.join(root, file)
key = self.get_key(file)
if file == 'wmh.nii.gz':
data_and_labels[key] = filepath
length = len(data_and_labels)
if '/pre/' in filepath and self.is_file_desired(file) and length < package_limit and length > 0:
data_and_labels[key] = filepath
if len(data_and_labels) == package_limit:
full_dataset.append(data_and_labels.copy())
print(data_and_labels)
data_and_labels.clear()
return full_dataset
def get_all_sets_paths(self, dataset_paths):
t1 = [row["t1_coreg_brain"] for row in dataset_paths]
flair = [row["new_flair_enhanced"] for row in dataset_paths]
labels = [row["label"] for row in dataset_paths]
common_mask = [row["common_mask"] for row in dataset_paths]
return t1, flair, labels, common_mask
def preprocess_dataset_t1(self, data_t1, slice_shape, masks, remove_pct_top, remove_pct_bot):
data_t1 = np.asanyarray(data_t1) * np.asanyarray(masks)
resized_t1 = self.resize_slices([data_t1], slice_shape)
resized_t1 = self.remove_top_bot_slices([ | np.asanyarray(resized_t1) | numpy.asanyarray |
# Library of routines for working with ASKAPsoft Self Calibration data, e.g. cont_gains_cal_SB10944_GASKAP_M344-11B_T0-0A.beam00.tab.
# These are mostly focussed around plotting the phase solutions and identifying jumps or failures in these solutions. Note that this module requires CASA support.
# The code is based on work by <NAME> and <NAME>.
# Author <NAME>
# Date 18 Oct 2020
import glob
import os
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from casacore.tables import *
import seaborn as sns
class SelfCalSolutions:
# phase is [time, beam, ant, pol]
def __init__(self):
"""Initialises parameters for reading a selfcal table
"""
self.nsol = None
self.nant = None
self.nbeam = 36
self.npol = None
# selfcal is an array in order [time, beam, ant, pol] of phase angle and amplitude value
self.selfcal = None
self.selfcal_times = None
self.selfcal_flags = None
self.field = None
def load(self, base_dir):
flist = glob.glob(base_dir + "/cont_gains*tab")
flist.sort()
filename = flist[0]
print (filename)
pos = filename.find("beam")
if pos == -1:
raise Exception("Can't find beam information in " + filename)
wildcard = filename[:pos+4] + "??" + filename[pos+6:]
flist = glob.glob(wildcard)
flist.sort()
first_beam = flist[0]
tb = table(first_beam, readonly=True, ack=False)
t_vals = tb.getcol("TIME")
sc_vals = tb.getcol("GAIN",1,1)
self.selfcal_times = t_vals[1:]
self.nsol = t_vals.shape[0] - 1
gain_shape = sc_vals.shape
self.npol = gain_shape[3]
self.nant = gain_shape[2]
tb.close()
self.selfcal = np.zeros((self.nsol, 36, self.nant, self.npol), dtype=np.complex)
self.selfcal_flags = np.zeros((self.nsol, 36, self.nant, self.npol), dtype=np.bool)
for beam in range(self.nbeam):
fname = wildcard.replace("??", "%02d" %(beam))
if os.path.exists(fname) == False:
continue
tb = table(fname, readonly=True, ack=False)
t_vals = tb.getcol("TIME", 1, self.nsol)
sc_vals = tb.getcol("GAIN", 1, self.nsol)
flag_vals = tb.getcol("GAIN_VALID", 1, self.nsol)
for index in range(self.nsol):
self.selfcal[index, beam] = sc_vals[index, 0, :, :]
self.selfcal_flags[index, beam] = np.invert(flag_vals[index, 0, :, :])
self.selfcal[np.where(self.selfcal_flags)] = np.nan
self.field = os.path.basename(base_dir)
print("Read %d solutions, %d antennas, %d beams, %d polarisations" %(self.nsol, self.nant, self.nbeam, self.npol))
def plotGains(self, ant, outFile = None):
fig = plt.figure(figsize=(14, 14))
amplitudes = np.abs(self.selfcal)
phases = np.angle(self.selfcal, deg=True)
times = np.array(range(self.nsol))
plt.subplot(1, 1, 1)
if self.nant == 36:
plt.title("ak%02d" %(ant+1), fontsize=8)
else:
plt.title("ant%02d" %(ant), fontsize=8)
for beam in range(self.nbeam):
plt.plot(times, phases[:,beam,ant,0], marker=None, label="beam %d" %(beam))
# plt.plot(times, phases[:,ant,beam,1], marker=None, color="red")
plt.ylim(-200.0, 200.0)
#rms = np.sqrt(np.mean(np.square(phases[:,beam,ant,0])))
#print ("ant ak{:02d} beam {:02d} rms={:.2f}".format(ant+1, beam, rms))
plt.legend()
plt.tight_layout()
if outFile == None:
plt.show()
else:
plt.savefig(outFile)
plt.close()
def _plot_ant_phase(sc, ant, outFile = None):
fig = plt.figure(figsize=(14, 14))
amplitudes = np.abs(sc.selfcal)
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
ax = plt.subplot(1, 1, 1)
if sc.nant == 36:
plt.title("ak%02d" %(ant+1), fontsize=8)
else:
plt.title("ant%02d" %(ant), fontsize=8)
low = | np.nanpercentile(phases[:,:,ant,0], 2.5, axis=(1)) | numpy.nanpercentile |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 10:08:14 2020
@author: dmattox
"""
import os, collections, glob, time
import numpy as np
import scipy.spatial
import Zernike
np.random.seed(27)
def getMol2Pnts(mol2FH):
''' Reads in the mol2 file specified by the argument and returns a list of the cooridantes of all atoms within that mol2 file '''
out = []
with open(mol2FH, 'r') as inFH:
strt = False
for line in inFH:
if strt == False: # Stil looking for the start of the points
if line.strip() == '@<TRIPOS>ATOM':
strt = True
continue
else:
line = line.split('\t')
out.append([float(c) for c in line[2:5]])
return out
def getCentroid(atmArr):
'''Given an array of coordiantes, returns the centroid of their coordinates as an array'''
out = np.array([0,0,0], dtype = 'float32') # Holds the centroid
for a in atmArr:
out += a
out = out/len(atmArr)
return out
def eucDist(coord1, coord2):
''' Calculate the euclidean distance between a pair of 3D coordinates in separate lists '''
return np.sqrt((coord1[0]-coord2[0])**2 + (coord1[1] - coord2[1])**2 + (coord1[2] - coord2[2])**2)
#################
res = 64
momNum = 5
clusterRun = False
if clusterRun:
path = os.getcwd()
if path[-1] != '/': path += '/'
pocketDir = '/dartfs-hpc/rc/home/y/f002tsy/cbklab/Mattox/glycans/unilec3d/structures/bSites/bsitePockets/'
outFile = path + '3DZD_' + str(momNum) + 'ord.csv'
momentDir = '/dartfs-hpc/rc/home/y/f002tsy/cbklab/Mattox/glycans/unilec3d/voxels/moments'+ str(momNum) +'/'
structDir = '/dartfs-hpc/rc/home/y/f002tsy/cbklab/Mattox/glycans/unilec3d/structures/'
else:
pocketDir = '/Users/dmattox/cbk/glycan_binding/data/unilectin/structures/bSites/bsitePockets/'
outFile = '/Users/dmattox/cbk/glycan_binding/analysis/prelim/prelim3/3DZD_test'+ str(momNum) +'.csv'
momentDir = '/Users/dmattox/cbk/glycan_binding/analysis/prelim/prelim3/zernikeMoments'+ str(momNum) +'/'
structDir = '/Users/dmattox/cbk/glycan_binding/data/unilectin/structures/'
if not os.path.exists(momentDir):
os.makedirs(momentDir)
#################
# pdb = '2CL8'
# bs = 'BGC:A:1247'
gridPnts = [] # initialize grid pnts and KDTree for grid
for x in xrange(res):
for y in xrange(res):
for z in xrange(res):
gridPnts.append([x,y,z])
gridTree = scipy.spatial.KDTree( | np.array(gridPnts) | numpy.array |
import numpy as np
import pandas as pd
import tensorflow as tf
import scipy.misc
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, Sequential
from keras.layers import Input, Dropout, Activation, LSTM, Conv2D, Conv2DTranspose, Dense, TimeDistributed, Flatten, Reshape, Cropping2D, GaussianNoise, Concatenate, BatchNormalization, SeparableConv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.losses import mean_squared_error
from keras.optimizers import Adadelta, RMSprop
from keras import backend as K
from keras.layers.advanced_activations import LeakyReLU
from keras.models import load_model
#K.set_learning_phase(1) #set learning phase
sequences_per_batch = 1
epochs = 100
image_size = 240
sequence_length = 155
sequence_start = 0
train_seq = 1
train_cnt = int(sequence_length / train_seq)
file_list = 'val.txt'
input_mode = 'test'
input_data = 4
input_attention = 3
input_dimension = input_data + input_attention
output_dimension = 3
base = 42
folder = 'data'
# load data list
files = np.genfromtxt(file_list, dtype='str')
# define model
def conv_block(m, dim, acti, bn, res, do=0.2):
n = TimeDistributed(Conv2D(dim, 6, padding='same'))(m)
n = TimeDistributed(LeakyReLU())(n)
n = BatchNormalization()(n) if bn else n
n = TimeDistributed(Dropout(do))(n) if do else n
n = TimeDistributed(Conv2D(dim, 6, padding='same'))(n)
n = TimeDistributed(LeakyReLU())(n)
n = BatchNormalization()(n) if bn else n
return Concatenate()([m, n]) if res else n
def level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
if depth > 0:
n = conv_block(m, dim, acti, bn, res)
m = TimeDistributed(MaxPooling2D())(n) if mp else TimeDistributed(Conv2D(dim, 4, strides=2, padding='same'))(n)
print(n.shape)
print(m.shape)
m = level_block(m, int(inc*dim), depth-1, inc, acti, do, bn, mp, up, res)
if up:
m = TimeDistributed(UpSampling2D())(m)
m = TimeDistributed(Conv2D(dim, 4, padding='same'))(m)
m = TimeDistributed(LeakyReLU())(m)
else:
m = TimeDistributed(Conv2DTranspose(dim, 4, strides=2, padding='same'))(m)
m = TimeDistributed(LeakyReLU())(m)
n = Concatenate()([n, m])
m = conv_block(n, dim, acti, bn, res)
else:
m = conv_block(m, dim, acti, bn, res, do)
l = TimeDistributed(Flatten())(m)
#l = LSTM(4 * 4 * 128, stateful=True, return_sequences=True)(l)
l = LSTM(2048, stateful=True, return_sequences=True)(l)
l = TimeDistributed(Reshape((2, 2, 2048/4)))(l)
m = l
#m = Concatenate()([l, m])
m = conv_block(m, dim, acti, bn, res, do)
return m
def UNet(input_shape, out_ch=1, start_ch=64, depth=7, inc_rate=1.5, activation='relu',
dropout=0.4, batchnorm=True, maxpool=True, upconv=True, residual=False):
i = Input(batch_shape=input_shape)
o = TimeDistributed(ZeroPadding2D(padding=8))(i)
o = TimeDistributed(SeparableConv2D(start_ch, 7, padding='same'))(o)
o = level_block(o, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
o = TimeDistributed(Cropping2D(cropping=8))(o)
o = TimeDistributed(Conv2D(out_ch, 1, activation='tanh'))(o)
return Model(inputs=i, outputs=o)
model = UNet((sequences_per_batch, train_seq, image_size, image_size, input_dimension), out_ch=6, start_ch=base)
model.load_weights('v2.h5')
model.compile(loss='mean_squared_error', optimizer=RMSprop())
for k in model.layers:
print(k.output_shape)
plot_model(model, to_file='model.png')
def load_sequence(p, is_train=False):
pattern = p.decode("utf-8")
val = []
for s in xrange(sequence_length):
name = pattern.format('test', sequence_start + s, folder)
try:
input_img = scipy.misc.imread(name, mode='L').astype(np.float)
except:
val.append(np.zeros((1, image_size, image_size, input_dimension + output_dimension)))
continue
images = np.split(input_img, input_dimension + output_dimension, axis=1)
half_offset = 4
offset = half_offset * 2
hypersize = image_size + offset
fullsize = 256 + offset
h1 = int(np.ceil(np.random.uniform(1e-2, offset)))
w1 = int(np.ceil(np.random.uniform(1e-2, offset)))
conv = []
for image in images:
top = int((fullsize - image.shape[1]) / 2)
bottom = fullsize - image.shape[1] - top
image = np.append( | np.zeros((image.shape[0], top)) | numpy.zeros |
"""Provides statistical utilities functions used by the simulator
"""
from __future__ import division
import math
import random
import collections
import numpy as np
import scipy.stats as ss
__all__ = [
'DiscreteDist',
'TruncatedZipfDist',
'means_confidence_interval',
'proportions_confidence_interval',
'cdf',
'pdf',
]
class DiscreteDist(object):
"""Implements a discrete distribution with finite population.
The support must be a finite discrete set of contiguous integers
{1, ..., N}. This definition of discrete distribution.
"""
def __init__(self, pdf, seed=None):
"""
Constructor
Parameters
----------
pdf : array-like
The probability density function
seed : any hashable type (optional)
The seed to be used for random number generation
"""
if np.abs(sum(pdf) - 1.0) > 0.001:
raise ValueError('The sum of pdf values must be equal to 1')
random.seed(seed)
self._pdf = np.asarray(pdf)
self._cdf = np.cumsum(self._pdf)
# set last element of the CDF to 1.0 to avoid rounding errors
self._cdf[-1] = 1.0
def __len__(self):
"""Return the cardinality of the support
Returns
-------
len : int
The cardinality of the support
"""
return len(self._pdf)
@property
def pdf(self):
"""
Return the Probability Density Function (PDF)
Returns
-------
pdf : Numpy array
Array representing the probability density function of the
distribution
"""
return self._pdf
@property
def cdf(self):
"""
Return the Cumulative Density Function (CDF)
Returns
-------
cdf : Numpy array
Array representing cdf
"""
return self._cdf
def rv(self):
"""Get rand value from the distribution
"""
rv = random.random()
# This operation performs binary search over the CDF to return the
# random value. Worst case time complexity is O(log2(n))
return int(np.searchsorted(self._cdf, rv) + 1)
class TruncatedZipfDist(DiscreteDist):
"""Implements a truncated Zipf distribution, i.e. a Zipf distribution with
a finite population, which can hence take values of alpha > 0.
"""
def __init__(self, alpha=1.0, n=1000, seed=None):
"""Constructor
Parameters
----------
alpha : float
The value of the alpha parameter (it must be positive)
n : int
The size of population
seed : any hashable type, optional
The seed to be used for random number generation
"""
# Validate parameters
if alpha <= 0:
raise ValueError('alpha must be positive')
if n < 0:
raise ValueError('n must be positive')
# This is the PDF i. e. the array that contains the probability that
# content i + 1 is picked
pdf = | np.arange(1.0, n + 1.0) | numpy.arange |
# source contrast get averaged
# reset -f
import os
import numpy
import numpy as np
import mne
from mne.io import read_raw_fif
from scipy import stats as stats
from mne.stats import permutation_t_test
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from sklearn.base import clone
from mne.connectivity import spectral_connectivity, seed_target_indices
from operator import itemgetter
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
import re
from mne.connectivity import envelope_correlation
from mne.stats import permutation_cluster_1samp_test
# fs source space
src_fs = mne.read_source_spaces('/Users/boo/Desktop/MEG_data_script/PreProcessed_data/fsaverage-src.fif')
fsave_vertices = [s['vertno'] for s in src_fs]
stc_template = mne.read_source_estimate(
'/Users/boo/Desktop/MEG_data_script/analysis_source_result/stc_template-rh.stc')
stc_template.subject = 'fsaverage'
# label
label_name_list_mtl = ['Hippocampus', 'ParaHippocampal', 'Enterinal', 'Perirhinal']
hemi_pool = ['_lh', '_rh']
label_list_path = []
for r, d, f in os.walk('/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/'):
for ith_hemi in list(range(0, len(hemi_pool))):
for ith_label_path in list(range(0, len(label_name_list_mtl))):
for file in f:
if hemi_pool[ith_hemi] in file and label_name_list_mtl[ith_label_path] in file:
label_list_path.append(os.path.join(r, file))
label_list = []
label_parietal = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Parietal_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Parietal_lh.label')
label_precuneus = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Precuneus_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/Precuneus_lh.label')
label_SMA = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/SMA_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/SMA_lh.label')
label_FEF = mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/FEF_rh.label') + mne.read_label(
'/Users/boo/Desktop/MEG_data_script/aal_51/labels_conn_each_band_fs/FEF_lh.label')
label_list.append(label_parietal)
label_list.append(label_precuneus)
label_list.append(label_SMA)
label_list.append(label_FEF)
for ith_label in list(range(0, len(label_list_path))):
label_list.append(mne.read_label(label_list_path[ith_label]))
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
# band
iter_freqs = [
('Alpha', 8, 13),
('Beta', 13, 30),
('Low gamma', 30, 60),
('High gamma', 60, 99)
]
method_pool = ['pli'] #'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
# the maximum point for b-lr is 0.28
# the maximum point for lr-b is 0.76
# 150 200 250 300 350 400
time_seed_pool = [0.28, 0.76]
time_sep_pool = [0.375, 0.4, 0.5, 0.6, 0.7] #0.15, 0.2, 0.25, 0.3, 0.35, 0.4
tmin_pool = []
tmax_pool = []
for ith_prep1 in list(range(0, len(time_seed_pool))):
for ith_prep2 in list(range(0, len(time_sep_pool))):
tmin_pool.append(time_seed_pool[ith_prep1] - time_sep_pool[ith_prep2] / 2)
tmax_pool.append(time_seed_pool[ith_prep1] + time_sep_pool[ith_prep2] / 2)
curr_tp = 0
for ith_tp in list(range(0, len(tmin_pool))):
curr_tmin = round(tmin_pool[ith_tp], 3)
curr_tmax = round(tmax_pool[ith_tp], 3)
for ith_method in list(range(0, len(method_pool))):
curr_method = method_pool[ith_method]
for ith_band in list(range(0, len(iter_freqs))):
curr_fre_info = iter_freqs[ith_band]
band_name = curr_fre_info[0]
vmin = curr_fre_info[1]
vmax = curr_fre_info[2]
for ith_condition in list(range(0, len(naming_list))):
curr_condition = naming_list[ith_condition]
index_sub = 0
output_array = np.zeros((len(list(range(2, 14))), len(label_list), len(label_list)))
for ith_sub in list(range(2, 14)):
stcs_epoch_morphed_nocrop = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn/stc_ego_epoch_sub' +
str(ith_sub) + '_200hz_' + curr_condition +
'.npy', allow_pickle=True)
stcs_evoke_morphed_nocrop = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn/stc_sourceEstimate_ego_evoke_sub' +
str(ith_sub) + '_200hz_' + curr_condition +
'.npy', allow_pickle=True)
stcs_epoch_morphed_nocrop = stcs_epoch_morphed_nocrop.tolist()
stcs_evoke_morphed_nocrop = stcs_evoke_morphed_nocrop.tolist()
# crop time period
stcs_epoch_morphed = []
for ith_ele in list(range(0, len(stcs_epoch_morphed_nocrop))):
stcs_epoch_morphed.append(
stcs_epoch_morphed_nocrop[ith_ele].crop(tmin=curr_tmin, tmax=curr_tmax))
stcs_evoke_morphed = stcs_evoke_morphed_nocrop.crop(tmin=curr_tmin, tmax=curr_tmax)
seed_idx_pool = []
for ith_seed in list(range(0, len(yaxis_label_list))):
# search max vertice
seed_pool_ts_evoke = stcs_evoke_morphed.in_label(label_list[ith_seed])
src_pow = np.sum(seed_pool_ts_evoke.data ** 2, axis=1)
total_seed_vertice_list = seed_pool_ts_evoke.vertices[0].tolist() + seed_pool_ts_evoke.vertices[
1].tolist()
seed_vertno = total_seed_vertice_list[np.argmax(src_pow)]
total_wb_vertice_list = stcs_evoke_morphed.vertices[0].tolist() + stcs_evoke_morphed.vertices[
1].tolist()
seed_idx_pool.append(np.searchsorted(total_wb_vertice_list, seed_vertno))
# create max epoch array for conn
conn_array = np.zeros((len(yaxis_label_list), len(yaxis_label_list), 1))
for ith_curr_seed in list(range(0, len(yaxis_label_list))):
max_epoch_array = np.zeros(
(np.shape(stcs_epoch_morphed)[0], 1, np.shape(stcs_evoke_morphed)[1]))
epoch_array = np.zeros(
(np.shape(stcs_epoch_morphed)[0], len(yaxis_label_list), np.shape(stcs_evoke_morphed)[1]))
for ith_epoch in list(range(0, np.shape(stcs_epoch_morphed)[0])):
max_epoch_array[ith_epoch, 0, ...] = stcs_epoch_morphed[ith_epoch].data[
seed_idx_pool[ith_curr_seed], ...]
for ith_other_seed in list(range(0, len(yaxis_label_list))):
epoch_array[ith_epoch, ith_other_seed, ...] = stcs_epoch_morphed[ith_epoch].data[
seed_idx_pool[ith_other_seed], ...]
# create indices
comb_ts = list(zip(max_epoch_array, epoch_array))
indices = seed_target_indices([0], np.arange(1, 13))
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
comb_ts, method=curr_method, sfreq=200, fmin=vmin, fmax=vmax, mode='fourier',
indices=indices, faverage=True) # fourier
conn_array[ith_curr_seed, ...] = con
output_array[index_sub, ...] = conn_array[..., 0]
index_sub = index_sub + 1
np.save('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + curr_condition + '_' + str(curr_tmin) + '_' + str(curr_tmax) + '.npy',
output_array)
curr_tp = curr_tp + 1
## watching
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
method_pool = ['pli'] #'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
iter_freqs = [
('Alpha', 8, 13),
('Beta', 13, 30),
('Low gamma', 30, 60),
('High gamma', 60, 99)
]
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
yaxis_label = ['Parietal-SMA', 'Parietal-FEF', 'Precuneus-SMA','Precuneus-FEF',
'ERC(R)-SMA', 'ERC(R)-FEF', 'ERC(R)-Parietal', 'ERC(R)-Precuneus']
fontsize = 7
time_seed_pool = [0.28, 0.76]
time_sep_pool = [0.375, 0.4, 0.5, 0.6, 0.7] #[0.15, 0.2, 0.25, 0.3, 0.35, 0.4]
tmin_pool = []
tmax_pool = []
for ith_prep1 in list(range(0, len(time_seed_pool))):
for ith_prep2 in list(range(0, len(time_sep_pool))):
tmin_pool.append(time_seed_pool[ith_prep1] - time_sep_pool[ith_prep2] / 2)
tmax_pool.append(time_seed_pool[ith_prep1] + time_sep_pool[ith_prep2] / 2)
for ith_band in list(range(0, len(iter_freqs))):
curr_fre_info = iter_freqs[ith_band]
band_name = curr_fre_info[0]
plot_array = np.zeros((10, len(yaxis_label)))
title_array = np.array(range(10), dtype='<U20')
ith_position=0
for ith_method in list(range(0, len(method_pool))):
curr_method = method_pool[ith_method]
for ith_tp in list(range(0, len(tmin_pool))):
curr_tmin = round(tmin_pool[ith_tp], 3)
curr_tmax = round(tmax_pool[ith_tp], 3)
curr_array_b = np.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_b' + '_' + str(curr_tmin) + '_' + str(curr_tmax) + '.npy')
curr_array_l = np.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_l' + '_' + str(curr_tmin) + '_' + str(curr_tmax) + '.npy')
curr_array_r = np.load('/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' +
band_name + '_' + 't_r' + '_' + str(curr_tmin) + '_' + str(curr_tmax) + '.npy')
output_array_b_lr = curr_array_b - (curr_array_l + curr_array_r) / 2
statistic, pvalue = stats.ttest_1samp(output_array_b_lr, 0, axis=0)
plot_array[ith_position, ...] = np.array(
(statistic[0][2], statistic[0][3], statistic[1][2], statistic[1][3],
statistic[10][2], statistic[10][3], statistic[10][0], statistic[10][1]))
title_array[ith_position]= np.array((str(curr_tmin) + '-' + str(curr_tmax) + 's(' + curr_method + ')'))
ith_position = ith_position+1
fig, axes = plt.subplots(nrows=1, ncols=10, figsize=(30, 3)) # figsize=(16, 8.5)
ith_plot = 0
for ax in axes.flat:
ax.set_xticklabels(yaxis_label, rotation=90, fontsize=fontsize)
ax.set_xticks(np.arange(len(yaxis_label)))
ax.bar(yaxis_label, plot_array[ith_plot], width=0.6, color='0.5', edgecolor='black', linewidth=1, capsize=10)
ax.set_ylim([-3, 3])
ax.axhline(y=2.2, ls='--', linewidth=1, color='r')
ax.axhline(y=-2.2, ls='--', linewidth=1, color='r')
ax.set_title(title_array[ith_plot], fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ith_plot = ith_plot+1
plt.subplots_adjust(left=.03, right=.97, top=0.9, bottom=0.35, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/connectivity_' + band_name + '.png') # bbox_inches='tight'
plt.close()
## make figure horizontal bar
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
fontsize = 17
time_seed_pool = [0.28, 0.76]
band_name = 'Beta'
curr_method = 'pli'
tmin_t1 = round(time_seed_pool[0] - 0.2, 3)
tmax_t1 = round(time_seed_pool[0] + 0.2, 3)
tmin_t2 = round(time_seed_pool[1] - 0.2, 3)
tmax_t2 = round(time_seed_pool[1] + 0.2, 3)
curr_array_b_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_l_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_r_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_b_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
curr_array_l_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
curr_array_r_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
output_array_b_lr_t1 = curr_array_b_t1 - (curr_array_l_t1 + curr_array_r_t1) / 2
output_array_b_lr_t2 = curr_array_b_t2 - (curr_array_l_t2 + curr_array_r_t2) / 2
statistic_t1, pvalue_t1 = stats.ttest_1samp(output_array_b_lr_t1, 0, axis=0)
statistic_t2, pvalue_t2 = stats.ttest_1samp(output_array_b_lr_t2, 0, axis=0)
mean_t1 = np.mean(output_array_b_lr_t1, axis=0)
mean_t2 = np.mean(output_array_b_lr_t2, axis=0)
se_t1 = np.std(output_array_b_lr_t1, axis=0)/ np.sqrt(12)
se_t2 = np.std(output_array_b_lr_t2, axis=0)/ np.sqrt(12)
# stats.ttest_rel(output_array_b_lr_t1[..., 10,0], output_array_b_lr_t2[..., 10,0])
stats.ttest_1samp(output_array_b_lr_t2[..., 3,0], 0)
# plot_array_t1 = [statistic_t1[3][0], statistic_t1[2][0], statistic_t1[8][0], statistic_t1[9][0], statistic_t1[11][0], statistic_t1[10][0]]
# plot_array_t2 = [statistic_t2[3][0], statistic_t2[2][0], statistic_t2[8][0], statistic_t2[9][0], statistic_t2[11][0], statistic_t2[10][0]]
t1_str = str(tmin_t1)+' ~ '+str(tmax_t1)+'s'
t2_str = str(tmin_t2)+' ~ '+str(tmax_t2)+'s'
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)']
# yaxis_label = ['FEF-Parietal', 'SMA-Parietal', 'HPC(R)-Parietal', 'PHC(R)-Parietal', 'PRC(R)-Parietal',
# 'ERC(R)-Parietal']
yaxis_label = ['FEF-Precuneus', 'SMA-Precuneus', 'HPC(R)-Precuneus', 'PHC(R)-Precuneus', 'PRC(R)-Precuneus',
'ERC(R)-Precuneus']
ith_region = 1
dataFrame_mean = pd.DataFrame(data=[[mean_t1[3][ith_region], mean_t2[3][ith_region]], [mean_t1[2][ith_region], mean_t2[2][ith_region]], \
[mean_t1[8][ith_region], mean_t2[8][ith_region]], [mean_t1[9][ith_region], mean_t2[9][ith_region]], \
[mean_t1[11][ith_region], mean_t2[11][ith_region]], [mean_t1[10][ith_region], mean_t2[10][ith_region]]],
index=yaxis_label,
columns=[t1_str, t2_str])
dataFrame_se = pd.DataFrame(data=[[se_t1[3][ith_region], se_t2[3][ith_region]], [se_t1[2][ith_region], se_t2[2][ith_region]], \
[se_t1[8][ith_region], se_t2[8][ith_region]], [se_t1[9][ith_region], se_t2[9][ith_region]], \
[se_t1[11][ith_region], se_t2[11][ith_region]], [se_t1[10][ith_region], se_t2[10][ith_region]]],
index=yaxis_label,
columns=[t1_str, t2_str])
handle = dataFrame_mean.plot.barh(xerr=dataFrame_se, figsize=(6, 6), legend=False, color=['darkgreen', 'red'])
handle.spines['right'].set_visible(False)
handle.spines['top'].set_visible(False)
handle.set_yticklabels(yaxis_label, rotation=0, fontsize=fontsize)
handle.set_xticks([-0.15, 0, 0.1])
handle.set_xlabel('t value', fontsize=fontsize)
handle.axvline(x=0, ls='-', linewidth=0.5, color='black')
handle.invert_yaxis() # labels read top-to-bottom
handle.tick_params(labelsize=fontsize)
handle.set_aspect('auto')
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.35, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_Precuneus_roi_' + band_name + '_' + '.png') # bbox_inches='tight'
plt.close()
## make figure vertical bar - old
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
for ith_time_p in list(range(0, len(time_seed_pool))):
band_name = band_list[ith_band]
tmin = round(time_seed_pool[ith_time_p] - 0.2, 3)
tmax = round(time_seed_pool[ith_time_p] + 0.2, 3)
curr_array_b = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin) + '_' + str(tmax) + '.npy')
curr_array_l = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin) + '_' + str(tmax) + '.npy')
curr_array_r = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin) + '_' + str(tmax) + '.npy')
if ith_time_p == 0:
# color = 'red'
output_array_contrast = curr_array_b - (curr_array_l + curr_array_r) / 2
if ith_time_p == 1:
# color = 'darkgreen'
output_array_contrast = (curr_array_l + curr_array_r) / 2 - curr_array_b
mean = np.mean(output_array_contrast, axis=0)
se = np.std(output_array_contrast, axis=0) / np.sqrt(12)
# statistic
statistic, pvalue = stats.ttest_1samp(output_array_contrast, 0, axis=0)
# stats.ttest_rel(output_array_b_lr_t1[..., 10,0], output_array_b_lr_t2[..., 10,0])
stat_fef, pval_fef = stats.ttest_1samp(output_array_contrast[..., 3, ith_region], 0)
stat_sma, pval_sma = stats.ttest_1samp(output_array_contrast[..., 2, ith_region], 0)
stat_hpc, pval_hpc = stats.ttest_1samp(output_array_contrast[..., 8, ith_region], 0)
stat_phc, pval_phc = stats.ttest_1samp(output_array_contrast[..., 9, ith_region], 0)
stat_prc, pval_prc = stats.ttest_1samp(output_array_contrast[..., 11, ith_region], 0)
stat_erc, pval_erc = stats.ttest_1samp(output_array_contrast[..., 10, ith_region], 0)
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
label_x = ['FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC']
color = ['limegreen', 'limegreen', 'red', 'red', 'red', 'red']
value_y = [mean[3][ith_region], mean[2][ith_region],
mean[8][ith_region], mean[9][ith_region],
mean[11][ith_region], mean[10][ith_region]]
value_errorbar = [se[3][ith_region], se[2][ith_region],
se[8][ith_region], se[9][ith_region],
se[11][ith_region], se[10][ith_region]]
fig, ax = plt.subplots(figsize=(7, 5.5))
ax.bar([1, 2, 4, 5, 6, 7], value_y, width=0.5, yerr=value_errorbar, capsize=3, color=color) # (89/255, 88/255, 89/255)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([1, 2, 4, 5, 6, 7])
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
ax.set_yticks([-0.08, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('PLI', fontsize=fontsize)
# ax.axvline(x=0, ls='-', linewidth=0.5, color='black')
# ax.invert_xaxis() # labels read top-to-bottom
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '_' + str(time_seed_pool[ith_time_p]) + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## make figure vertical bar - new - paired t test
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tmin_early = round(time_seed_pool[0] - 0.2, 3)
tmax_early = round(time_seed_pool[0] + 0.2, 3)
tmin_late = round(time_seed_pool[1] - 0.2, 3)
tmax_late = round(time_seed_pool[1] + 0.2, 3)
curr_array_b_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_l_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_r_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_b_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
curr_array_l_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
curr_array_r_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
output_array_contrast_early = curr_array_b_early - (curr_array_l_early + curr_array_r_early) / 2
output_array_contrast_late = curr_array_b_late - (curr_array_l_late + curr_array_r_late) / 2
mean_early = np.mean(output_array_contrast_early, axis=0)
mean_late = np.mean(output_array_contrast_late, axis=0)
se_early = np.std(output_array_contrast_early, axis=0) / np.sqrt(12)
se_late = np.std(output_array_contrast_late, axis=0) / np.sqrt(12)
# two sample t test
# statistic, pvalue = stats.ttest_1samp(output_array_contrast_early, 0, axis=0)
# # stats.ttest_rel(output_array_b_lr_t1[..., 10,0], output_array_b_lr_t2[..., 10,0])
# stat_fef, pval_fef = stats.ttest_1samp(, 0)
# stat_sma, pval_sma = stats.ttest_1samp(output_array_contrast_early[..., 2, ith_region], 0)
# stat_hpc, pval_hpc = stats.ttest_1samp(output_array_contrast_early[..., 8, ith_region], 0)
# stat_phc, pval_phc = stats.ttest_1samp(output_array_contrast_early[..., 9, ith_region], 0)
# stat_prc, pval_prc = stats.ttest_1samp(output_array_contrast_early[..., 11, ith_region], 0)
# stat_erc, pval_erc = stats.ttest_1samp(output_array_contrast_early[..., 10, ith_region], 0)
# paired t test
stat_fef, pval_fef = stats.ttest_rel(output_array_contrast_early[..., 3, ith_region], output_array_contrast_late[..., 3, ith_region])
stat_sma, pval_sma = stats.ttest_rel(output_array_contrast_early[..., 2, ith_region], output_array_contrast_late[..., 2, ith_region])
stat_hpc, pval_hpc = stats.ttest_rel(output_array_contrast_early[..., 8, ith_region], output_array_contrast_late[..., 8, ith_region])
stat_phc, pval_phc = stats.ttest_rel(output_array_contrast_early[..., 9, ith_region], output_array_contrast_late[..., 9, ith_region])
stat_erc, pval_erc = stats.ttest_rel(output_array_contrast_early[..., 10, ith_region], output_array_contrast_late[..., 10, ith_region])
stat_prc, pval_prc = stats.ttest_rel(output_array_contrast_early[..., 11, ith_region], output_array_contrast_late[..., 11, ith_region])
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' fef' + ' tval:' + str(stat_fef) + ' pval:' + str(pval_fef))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' sma' + ' tval:' + str(stat_sma) + ' pval:' + str(pval_sma))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' hpc' + ' tval:' + str(stat_hpc) + ' pval:' + str(pval_hpc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' phc' + ' tval:' + str(stat_phc) + ' pval:' + str(pval_phc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' prc' + ' tval:' + str(stat_prc) + ' pval:' + str(pval_prc))
print('seed:' + seed_pool[ith_region] + ' band:' + band_list[ith_band] + ' erc' + ' tval:' + str(stat_erc) + ' pval:' + str(pval_erc))
# reference
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
# array
label_x = ['HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA']
color_early = ['skyblue', 'skyblue', 'skyblue', 'skyblue', 'gold', 'gold']
color_late = ['blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod']
value_y_early = [mean_early[8][ith_region], mean_early[9][ith_region], mean_early[11][ith_region], mean_early[10][ith_region],
mean_early[3][ith_region], mean_early[2][ith_region]]
value_y_late = [mean_late[8][ith_region], mean_late[9][ith_region], mean_late[11][ith_region], mean_late[10][ith_region],
mean_late[3][ith_region], mean_late[2][ith_region]]
value_errorbar_early = [se_early[8][ith_region], se_early[9][ith_region], se_early[11][ith_region], se_early[10][ith_region],
se_early[3][ith_region], se_early[2][ith_region]]
value_errorbar_late = [se_late[8][ith_region], se_late[9][ith_region], se_late[11][ith_region], se_late[10][ith_region],
se_late[3][ith_region], se_late[2][ith_region]]
width = 0.25 # the width of the bars
ind = np.arange(len(value_y_early))
fig, ax = plt.subplots(figsize=(10, 4))
ax.bar(ind - width / 2, value_y_early, width, yerr=value_errorbar_early, capsize=3, color=color_early)
ax.bar(ind + width / 2, value_y_late, width, yerr=value_errorbar_late, capsize=3, color=color_late)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks(ind)
if ith_band==0:
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
else:
ax.set_xticklabels([])
ax.set_yticks([-0.17, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('Back - Left/Right', fontsize=fontsize)
# ax.axvline(x=0, ls='-', linewidth=0.5, color='black')
# ax.invert_xaxis() # labels read top-to-bottom
# handle.legend(loc='upper right', prop={'size': fontsize})
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## make figure vertical bar - new - anova-like
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
fontsize = 29
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
for ith_region in list(range(0, 2)): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tmin_early = round(time_seed_pool[0] - 0.2, 3)
tmax_early = round(time_seed_pool[0] + 0.2, 3)
tmin_late = round(time_seed_pool[1] - 0.2, 3)
tmax_late = round(time_seed_pool[1] + 0.2, 3)
curr_array_b_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_l_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_r_early = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_early) + '_' + str(tmax_early) + '.npy')
curr_array_b_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
curr_array_l_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
curr_array_r_late = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_late) + '_' + str(tmax_late) + '.npy')
output_array_contrast_early = curr_array_b_early - (curr_array_l_early + curr_array_r_early) / 2
output_array_contrast_late = curr_array_b_late - (curr_array_l_late + curr_array_r_late) / 2
mean_early = np.mean(output_array_contrast_early, axis=0)
mean_late = np.mean(output_array_contrast_late, axis=0)
se_early = np.std(output_array_contrast_early, axis=0) / np.sqrt(12)
se_late = np.std(output_array_contrast_late, axis=0) / np.sqrt(12)
# reference
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
# array
label_x = ['HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC', 'FEF', 'SMA']
color = ['blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod', 'blue', 'blue', 'blue', 'blue', 'darkgoldenrod', 'darkgoldenrod']
value_y = [mean_early[8][ith_region], mean_early[9][ith_region], mean_early[11][ith_region], mean_early[10][ith_region],
mean_early[3][ith_region], mean_early[2][ith_region], mean_late[8][ith_region], mean_late[9][ith_region],
mean_late[11][ith_region], mean_late[10][ith_region], mean_late[3][ith_region], mean_late[2][ith_region]]
value_errorbar = [se_early[8][ith_region], se_early[9][ith_region], se_early[11][ith_region], se_early[10][ith_region],
se_early[3][ith_region], se_early[2][ith_region], se_late[8][ith_region], se_late[9][ith_region],
se_late[11][ith_region], se_late[10][ith_region], se_late[3][ith_region], se_late[2][ith_region]]
width = 0.5 # the width of the bars
ind = np.arange(len(value_y))
fig, ax = plt.subplots(figsize=(12, 4))
ax.bar([1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14], value_y, width, yerr=value_errorbar, capsize=3, color=color)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14])
if ith_band==0:
ax.set_xticklabels(label_x, rotation=45, fontsize=fontsize-3)
else:
ax.set_xticklabels([])
ax.set_yticks([-0.17, 0, 0.14])
ax.tick_params(labelsize=fontsize)
ax.set_aspect('auto')
ax.set_ylabel('Back - Left/Right', fontsize=fontsize)
plt.subplots_adjust(left=.25, right=.97, top=0.97, bottom=0.15, wspace=0.5, hspace=0)
plt.savefig(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/Fig_6_seed_' + seed_pool[ith_region] + '_band_' + band_name + '.png', bbox_inches='tight') # bbox_inches='tight'
plt.close()
## anova two way
import os
import numpy
import numpy as np
from scipy import stats
import matplotlib.pylab as plt
import pandas as pd
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.multicomp import (pairwise_tukeyhsd, MultiComparison)
fontsize = 25
method_pool = ['pli'] # 'plv', 'coh', 'pli'
naming_list = ['t_b', 't_l', 't_r', 't_nc', 't_tpc', 't_fpc']
band_list = ['Alpha', 'Beta', 'Low gamma', 'High gamma']
seed_pool = ['Parietal', 'Precuneus']
time_seed_pool = [0.28, 0.76]
curr_method = 'pli'
yaxis_label_list = ['Parietal', 'Precuneus', 'SMA', 'FEF',
'HPC(L)', 'PHC(L)', 'ERC(L)', 'PRC(L)',
'HPC(R)', 'PHC(R)', 'ERC(R)', 'PRC(R)'] # for reference
label_x = ['FEF', 'SMA', 'HPC', 'PHC', 'PRC', 'ERC']
for ith_region in list(range(0, len(seed_pool))): # 1 for precuneus 0 for parietal cortex
for ith_band in list(range(0, len(band_list))):
band_name = band_list[ith_band]
tmin_t1 = round(time_seed_pool[0] - 0.2, 3)
tmax_t1 = round(time_seed_pool[0] + 0.2, 3)
tmin_t2 = round(time_seed_pool[1] - 0.2, 3)
tmax_t2 = round(time_seed_pool[1] + 0.2, 3)
curr_array_b_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_l_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_r_t1 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_t1) + '_' + str(tmax_t1) + '.npy')
curr_array_b_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_b' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
curr_array_l_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_l' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
curr_array_r_t2 = np.load(
'/Users/boo/Desktop/MEG_data_script/analysis_conn_figures/' + curr_method + '_' + band_name + '_' + 't_r' + '_' + str(
tmin_t2) + '_' + str(tmax_t2) + '.npy')
array_t1_fef = curr_array_b_t1[..., 3, ith_region] - (curr_array_l_t1[..., 3, ith_region] + curr_array_r_t1[..., 3, ith_region])/2
array_t1_sma = curr_array_b_t1[..., 2, ith_region] - (curr_array_l_t1[..., 2, ith_region] + curr_array_r_t1[..., 2, ith_region])/2
array_t1_hpc = curr_array_b_t1[..., 8, ith_region] - (curr_array_l_t1[..., 8, ith_region] + curr_array_r_t1[..., 8, ith_region])/2
array_t1_phc = curr_array_b_t1[..., 9, ith_region] - (curr_array_l_t1[..., 9, ith_region] + curr_array_r_t1[..., 9, ith_region])/2
array_t1_prc = curr_array_b_t1[..., 11, ith_region] - (curr_array_l_t1[..., 11, ith_region] + curr_array_r_t1[..., 11, ith_region])/2
array_t1_erc = curr_array_b_t1[..., 10, ith_region] - (curr_array_l_t1[..., 10, ith_region] + curr_array_r_t1[..., 10, ith_region])/2
array_t2_fef = curr_array_b_t2[..., 3, ith_region] - (curr_array_l_t2[..., 3, ith_region] + curr_array_r_t2[..., 3, ith_region])/2
array_t2_sma = curr_array_b_t2[..., 2, ith_region] - (curr_array_l_t2[..., 2, ith_region] + curr_array_r_t2[..., 2, ith_region])/2
array_t2_hpc = curr_array_b_t2[..., 8, ith_region] - (curr_array_l_t2[..., 8, ith_region] + curr_array_r_t2[..., 8, ith_region])/2
array_t2_phc = curr_array_b_t2[..., 9, ith_region] - (curr_array_l_t2[..., 9, ith_region] + curr_array_r_t2[..., 9, ith_region])/2
array_t2_prc = curr_array_b_t2[..., 11, ith_region] - (curr_array_l_t2[..., 11, ith_region] + curr_array_r_t2[..., 11, ith_region])/2
array_t2_erc = curr_array_b_t2[..., 10, ith_region] - (curr_array_l_t2[..., 10, ith_region] + curr_array_r_t2[..., 10, ith_region])/2
statistic, pvalue = stats.ttest_1samp(array_t2_sma, 0, axis=0)
create_array = {'value': np.concatenate((array_t1_fef, array_t1_sma, array_t1_hpc, array_t1_phc, array_t1_prc, array_t1_erc,
array_t2_fef, array_t2_sma, array_t2_hpc, array_t2_phc, array_t2_prc, array_t2_erc)),
'area': np.concatenate((np.repeat('fef', 12), np.repeat('sma', 12), np.repeat('hpc', 12), np.repeat('phc', 12), np.repeat('prc', 12), | np.repeat('erc', 12) | numpy.repeat |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os.path
import numpy as np
from numpy.testing import assert_allclose
from scipy import signal
import pytest
from pambox import utils
from pambox.utils import fftfilt
__DATA_ROOT__ = os.path.join(os.path.dirname(__file__), 'data')
@pytest.mark.parametrize('x, ac, offset, axis, target', [
([0], True, 0, -1, -np.inf),
([1], False, 0, -1, 0),
([1], False, 100, -1, 100),
([1], True, 0, -1, -np.inf),
([10], False, 0, -1, 20),
([10, 10], False, 0, -1, 20),
([10, 10], False, 0, 1, [20, 20]),
])
def test_dbspl(x, ac, offset, axis, target):
assert_allclose(utils.dbspl(x, ac=ac, offset=offset,
axis=axis), target)
@pytest.mark.parametrize('x, ac, axis, target', [
([0, 1, 2, 3, 4, 5, 6], True, -1, 2),
([[0, 1, 2, 3, 4, 5, 6]], True, 0, [0, 0, 0, 0, 0, 0, 0]),
([[0, 1, 2, 3, 4, 5, 6]], True, 1, 2),
([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], True, -1, [2, 2]),
([0, 1, 2, 3, 4, 5, 6], False, -1, 3.60555128),
([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]], False, -1,
[3.60555128, 3.60555128]),
])
def test_rms_do_ac(x, ac, axis, target):
out = utils.rms(x, ac=ac, axis=axis)
assert_allclose(out, target)
@pytest.mark.parametrize('x, ac, axis, target', [
([0], True, -1, 0),
([1], True, -1, 0),
([1], False, -1, 1),
([-1], False, -1, 1),
([-1], True, -1, 0),
([10, 10], False, -1, 10),
([10, 10], True, -1, 0),
([[0, 1], [0, 1]], True, -1, [0.5, 0.5]),
([[0, 1], [0, 1]], False, -1, [0.70710678, 0.70710678]),
([[0, 1], [0, 1]], True, 0, [0, 0]),
([[0, 1], [0, 1]], False, 0, [0, 1]),
([[0, 1], [0, 1]], True, 1, [0.5, 0.5]),
([[0, 1], [0, 1]], False, 1, [0.70710678, 0.70710678]),
])
def test_rms(x, ac, axis, target):
assert_allclose(utils.rms(x, ac=ac, axis=axis), target)
@pytest.mark.parametrize("x, level, offset, target", [
((0, 1), 65, 100, (0., 0.02514867)),
((0, 1), 65, 0, (0., 2514.86685937)),
((0, 1), 100, 100, (0., 1.41421356)),
])
def test_set_level(x, level, offset, target):
y = utils.setdbspl(x, level, offset=offset)
assert_allclose(y, target, atol=1e-4)
# Can't be done programmatically, because the exact third-octave spacing is not
# exactly the same as the one commonly used.
@pytest.mark.xfail(run=False, reason="Real 3rd-oct != common ones")
def test_third_oct_center_freq_bet_63_12500_hz():
"""Test returns correct center frequencies for third-octave filters
Between 63 and 12500 Hz.
"""
center_f = (63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000,
1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000)
assert utils.noctave_center_freq(63, 12500, width=3) == center_f
def test_find_calculate_srt_when_found():
x = np.arange(10)
y = 20 * x + 4
assert 2.3 == utils.int2srt(x, y, srt_at=50)
def test_find_calculate_srt_when_not_found():
x = np.arange(10)
y = 2 * x + 4
assert np.isnan(utils.int2srt(x, y, srt_at=50))
def test_find_srt_when_srt_at_index_zero():
x = [0, 1]
y = [50, 51]
assert 0 == utils.int2srt(x, y, srt_at=50)
@pytest.mark.parametrize("inputs, targets", [
(([1], [1, 1]), ([1, 0], [1, 1])),
(([1, 1], [1, 1]), ([1, 1], [1, 1])),
(([1, 1], [1]), ([1, 1], [1, 0])),
(([1], [1, 1], False), ([1], [1])),
])
def test_make_same_length_with_padding(inputs, targets):
assert_allclose(utils.make_same_length(*inputs), targets)
def test_psy_fn():
x = -3.0
mu = 0.
sigma = 1.0
target = 0.13498980316300957
y = utils.psy_fn(x, mu, sigma)
assert_allclose(y, target)
class _TestFFTFilt():
dt = None
def test_fftfilt(self):
dt = 1e-6
fs = 1/dt
u = np.random.rand(10**6)
f = 10**4
b = signal.firwin(50, f/fs)
u_lfilter = signal.lfilter(b, 1, u)
u_fftfilt = fftfilt(b, u)
assert_allclose(u_lfilter, u_fftfilt)
def test_rank1(self):
# pytest.mark.skipif(self.dt in [np.longdouble, np.longcomplex],
# reason="Type %s is not supported by fftpack" % self.dt)
# dec.knownfailureif(
# self.dt in [np.longdouble, np.longcomplex],
# "Type %s is not supported by fftpack" % self.dt)(lambda: None)()
x = np.arange(6).astype(self.dt)
# Test simple FIR
b = np.array([1, 1]).astype(self.dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
assert_allclose(fftfilt(b, x), y_r, atol=1e-6)
# Test simple FIR with FFT length
b = np.array([1, 1]).astype(self.dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
n = 12
assert_allclose(fftfilt(b, x, n), y_r, atol=1e-6)
# Test simple FIR with FFT length which is a power of 2
b = np.array([1, 1]).astype(self.dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
n = 32
assert_allclose(fftfilt(b, x, n), y_r, atol=1e-6)
# Test simple FIR with FFT length
b = np.array( | np.ones(6) | numpy.ones |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from imp import reload
import alexREPO.fitting as fitting
reload(fitting)
import alexREPO.circlefinder as circlefinder
def grayscale(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def cut_out(img,x,y,r):
"""
takes x,y coordinates in terms of pixels and a radius in pixels.
Cuts a boolean array that acts as cutout on the actual image.
"""
[lenx,leny] = img.shape
xcoords = np.outer(np.array(range(lenx)),np.ones(leny))
ycoords = np.outer(np.ones(lenx),np.array(range(leny)))
distancetoXY = np.sqrt((xcoords-x)**2 + (ycoords-y)**2)
return distancetoXY < r
def histogram(img,x,y,r):
#Plot Histogram of cut-out and calculate the area
image_2 = img*cut_out(img,x,y,r)
im = image_2.ravel()
img = im[np.nonzero(im)]
n,bins,patches = plt.hist(img,100, color='black')
return n,bins
def fit_histogram(x,n):
"""
takes input array with gray scale histogram and fits a gaussian.
returns a value that lies two standard deviations off to brighter values
"""
print('give the following parameters')
print(np.amax(n),x[ | np.argmax(n) | numpy.argmax |
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
#import requests
from io import BytesIO
from PIL import Image
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import numpy as np
from coco import COCO
import os
import cv2
import json
def load(url):
"""
Given an url of an image, downloads the image and
returns a PIL image
"""
response = requests.get(url)
pil_image = Image.open(BytesIO(response.content)).convert("RGB")
# convert to BGR format
image = np.array(pil_image)[:, :, [2, 1, 0]]
return image
def imshow(img):
plt.imshow(img[:, :, [2, 1, 0]])
plt.axis("off")
if __name__ == "__main__":
# this makes our figures bigger
pylab.rcParams['figure.figsize'] = 20, 12
config_file = "../configs/predict.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.85,
)
testPath = "../datasets/val"
coco=COCO("../datasets/annotations/val.json")
confusionMatrix = np.zeros((4,4))
gt_numbers = [0,0,0,0]
IoUThreshold = 0.3
locationError = 0
pred_numbers = np.zeros(4)
F1_avg = np.zeros(4) # for computing average of F1
prec_avg = np.zeros(4)
recall_avg = np.zeros(4)
# Loop all testing images
for image_name in os.listdir(testPath):
print(image_name)
correctList = np.zeros(4)
gt_numbers_singleImage = np.zeros(4) # record the number of gt defects in each image
pred_numbers_singleImage = np.zeros(4) # record the number of predicted defects in each image
gt_mask_list = [[],[],[],[]]
#print(img)
#image_name = "grid1_roi2_500kx_0p5nm_haadf1_0039.jpg"
image = cv2.imread("../datasets/val/" + image_name)
#imshow(image)
# prepare gt mask
catIds = coco.getCatIds()
imgIds = coco.getImgIds(catIds=catIds )
labels = list()
allgtBG = np.zeros((1024,1024))
allpredBG = np.zeros((1024,1024))
with open('../datasets/annotations/val.json') as json_data:
annotation = json.loads(json_data.read())
images = annotation['images']
imgId = 0
for i in range(len(images)):
if(images[i]["file_name"] == image_name):
imgId = images[i]["id"]
seg = annotation['annotations']
for i in range(len(seg)):
if seg[i]['image_id'] == imgId:
labels.append(seg[i]['category_id'])
img = coco.loadImgs(imgId)[0]
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
# get the mask for each class
for i in range(len(anns)):
gt_numbers_singleImage[labels[i] - 1] += 1
if labels[i] == 1:
gt_mask_list[0].append(coco.annToMask(anns[i]))
gt_numbers[0] += 1
if labels[i] == 2:
gt_mask_list[1].append(coco.annToMask(anns[i]))
gt_numbers[1] += 1
if labels[i] == 3:
gt_mask_list[2].append(coco.annToMask(anns[i]))
gt_numbers[2] += 1
if labels[i] == 4:
gt_mask_list[3].append(coco.annToMask(anns[i]))
gt_numbers[3] += 1
#plt.imshow(gt_allMask)
# begin predication
# compute predictions
predictions = coco_demo.run_on_opencv_image(image)
cv2.imwrite(image_name, predictions)
#imshow(predictions)
mask, labels = coco_demo.get_predicted_mask_labels(image)
#print(mask[0])
# TODO : new_labels is the pred_labels to avoid labels for gt
new_labels = np.zeros(len(labels))
for i in range(len(labels)):
new_labels[i] = labels[i].item()
#print(new_labels)
#pred_numbers = np.zeros(4)
for i in new_labels:
# print(type(i))
item = int(i)
pred_numbers[item-1] += 1
pred_numbers_singleImage[item - 1] += 1
# generate predict mask
for i in range(len(new_labels)):
maxIoU = 0
maxLabel = 0
currentPredMask = mask[i][0]
allpredBG = allpredBG + currentPredMask
for j in range(len(gt_mask_list)):
for gtMask in gt_mask_list[j]:
union = np.count_nonzero(gtMask + currentPredMask)
intersection = np.count_nonzero((gtMask + currentPredMask) == 2)
tmpIoU = 1.0 * intersection / union
if tmpIoU > maxIoU:
maxIoU = tmpIoU
maxlabel = j + 1
# loop all gt masks
# check if location error
if maxIoU > IoUThreshold :
#print(new_labels[i] -1)
#print(maxlabel - 1)
if new_labels[i] == maxlabel:
correctList[maxlabel - 1] += 1
confusionMatrix[int(new_labels[i] -1) ][maxlabel - 1] += 1
else:
locationError += 1
for j in range(len(gt_mask_list)):
for gtMask in gt_mask_list[j]:
allgtBG = allgtBG + gtMask
addAllBG = allgtBG + allpredBG
BGIntersection = 1024*1024 - np.count_nonzero(addAllBG)
UnionHelperMat = np.zeros((1024,1024))
UnionHelperMat[np.where(allgtBG == 0)] = 1
UnionHelperMat[np.where(allpredBG == 0)] = 1
print("background intersection number:", BGIntersection)
print("background union number:", np.count_nonzero(UnionHelperMat))
print("gt non background:", | np.count_nonzero(allgtBG > 0) | numpy.count_nonzero |
# License: BSD 3 clause
import gc
import unittest
import weakref
import numpy as np
import scipy
from scipy.sparse import csr_matrix
from tick.array.build.array import tick_double_sparse2d_from_file
from tick.array.build.array import tick_double_sparse2d_to_file
from tick.array_test.build import array_test as test
class Test(unittest.TestCase):
def test_varray_smart_pointer_in_cpp(self):
"""...Test C++ reference counter
"""
vcc = test.VarrayContainer()
self.assertEqual(vcc.nRef(), 0)
vcc.initVarray()
self.assertEqual(vcc.nRef(), 1)
cu1 = test.VarrayUser()
cu1.setArray(vcc)
self.assertEqual(vcc.nRef(), 2)
cu1.setArray(vcc)
self.assertEqual(vcc.nRef(), 2)
cu2 = test.VarrayUser()
cu2.setArray(vcc)
self.assertEqual(vcc.nRef(), 3)
del cu1
self.assertEqual(vcc.nRef(), 2)
cu3 = test.VarrayUser()
cu3.setArray(vcc)
self.assertEqual(vcc.nRef(), 3)
del cu3, cu2
self.assertEqual(vcc.nRef(), 1)
# we cannot check it will go to 0 after vcc deletion in Python
cu4 = test.VarrayUser()
cu4.setArray(vcc)
self.assertEqual(vcc.nRef(), 2)
del vcc
self.assertEqual(cu4.nRef(), 1)
# we cannot check it will go to 0 after cu4 deletion in Python
del cu4
def test_varray_smart_pointer_deletion1(self):
"""...Test that varray is still alive after deletion in Python
"""
vcc = test.VarrayContainer()
vcc.initVarray()
# Now mix with some Python
a = vcc.varrayPtr
# This does not increment C++ reference counter
self.assertEqual(vcc.nRef(), 1)
# Get a weak ref of the array
r = weakref.ref(a)
del a
np.testing.assert_array_almost_equal(r(), vcc.varrayPtr)
del vcc
self.assertIsNone(r())
def test_varray_smart_pointer_deletion2(self):
"""...Test that base is deleted after a double assignment in Python
"""
vcc = test.VarrayContainer()
vcc.initVarray()
a = vcc.varrayPtr
b = vcc.varrayPtr
r = weakref.ref(b)
del a, vcc, b
self.assertIsNone(r())
def test_varray_smart_pointer_deletion3(self):
"""...Test that base is deleted after a double assignment in Python
"""
vcc = test.VarrayContainer()
vcc.initVarray()
# Now mix with some Python
a = vcc.varrayPtr
a_sum = np.sum(a)
# This does not increment C++ reference counter
self.assertEqual(vcc.nRef(), 1)
# Get a weak ref of the array
r = weakref.ref(vcc.varrayPtr)
del vcc
np.testing.assert_array_almost_equal(a_sum, np.sum(a))
self.assertIsNone(r())
del a
def test_sarray_memory_leaks(self):
"""...Test brute force method in order to see if we have a memory leak
during typemap out
"""
import os
try:
import psutil
except ImportError:
print('Without psutils we cannot ensure we have no memory leaks')
return
def get_memory_used():
"""Returns memory used by current process
"""
process = psutil.Process(os.getpid())
return process.memory_info()[0]
initial_memory = get_memory_used()
size = int(1e6)
# The size in memory of an array of ``size`` doubles
bytes_size = size * 8
a = test.test_typemap_out_SArrayDoublePtr(size)
first_filled_memory = get_memory_used()
# Check that new memory is of the correct order (10%)
self.assertAlmostEqual(first_filled_memory - initial_memory,
bytes_size, delta=1.1 * bytes_size)
for _ in range(10):
del a
a = test.test_typemap_out_SArrayDoublePtr(size)
filled_memory = get_memory_used()
# Check memory is not increasing
self.assertAlmostEqual(first_filled_memory - initial_memory,
filled_memory - initial_memory,
delta=1.1 * bytes_size)
#print("\nfirst_filled_memory %.2g, filled_memory %.2g, initial_memory %.2g, array_bytes_size %.2g" % (first_filled_memory, filled_memory, initial_memory, bytes_size))
def test_sarray_memory_leaks2(self):
"""...Test brute force method in order to see if we have a memory leak
during typemap in or out
"""
import os
try:
import psutil
except ImportError:
print('Without psutils we cannot ensure we have no memory leaks')
return
def get_memory_used():
"""Returns memory used by current process
"""
process = psutil.Process(os.getpid())
return process.memory_info()[0]
size = int(1e6)
a, b = np.ones(size), | np.arange(size, dtype=float) | numpy.arange |
import unittest
import numpy as np
import transformations as trans
import open3d as o3
from probreg import filterreg
from probreg import transformation as tf
def estimate_normals(pcd, params):
pcd.estimate_normals(search_param=params)
pcd.orient_normals_to_align_with_direction()
class FilterRegTest(unittest.TestCase):
def setUp(self):
pcd = o3.io.read_point_cloud('data/horse.ply')
pcd = pcd.voxel_down_sample(voxel_size=0.01)
estimate_normals(pcd, o3.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=10))
self._source = | np.asarray(pcd.points) | numpy.asarray |
import numpy as np
import pandas as pd
from specusticc.data_postprocessing.postprocessed_data import PostprocessedData
from specusticc.data_preprocessing.preprocessed_data import PreprocessedData
from specusticc.model_testing.prediction_results import PredictionResults
class DataPostprocessor:
def __init__(
self, preprocessed_data: PreprocessedData, test_results: PredictionResults
):
self.preprocessed_data = preprocessed_data
self.test_results: PredictionResults = test_results
self.postprocessed_data = PostprocessedData()
def get_data(self) -> PostprocessedData:
self._postprocess()
return self.postprocessed_data
def _postprocess(self):
self.reverse_train_detrend()
self.reverse_tests_detrend()
self._retrieve_train_dataframe()
self._retrieve_test_dataframes()
def reverse_train_detrend(self):
scaler = self.preprocessed_data.train_set.output_scaler
true_samples = self.preprocessed_data.train_set.output
predicted_samples = self.test_results.train_output
reversed_true_samples = | np.empty(true_samples.shape) | numpy.empty |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
import numpy as np
from scipy import sparse
"""
Dependency:
Scipy 0.10 or later for sparse matrix support
Original Author: <NAME>
Date: Feb-01-2019
"""
class TriaMesh:
"""A class representing a triangle mesh"""
def __init__(self, v, t, fsinfo=None):
"""
:param v - vertices List of lists of 3 float coordinates
t - triangles List of lists of 3 int of indices (>=0) into v array
Ordering is important: All triangles should be
oriented the same way (counter-clockwise, when
looking from above)
fsinfo optional, FreeSurfer Surface Header Info
"""
self.v = np.array(v)
self.t = np.array(t)
# transpose if necessary
if self.v.shape[0] < self.v.shape[1]:
self.v = self.v.T
if self.t.shape[0] < self.t.shape[1]:
self.t = self.t.T
# Check a few things
vnum = np.max(self.v.shape)
if np.max(self.t) >= vnum:
raise ValueError('Max index exceeds number of vertices')
if self.t.shape[1] != 3:
raise ValueError('Triangles should have 3 vertices')
if self.v.shape[1] != 3:
raise ValueError('Vertices should have 3 coordinates')
# Compute adjacency matrices
self.adj_sym = self._construct_adj_sym()
self.adj_dir = self._construct_adj_dir()
self.fsinfo = fsinfo # place for Freesurfer Header info
def _construct_adj_sym(self):
"""
Constructs symmetric adjacency matrix (edge graph) of triangle mesh t
Operates only on triangles.
:return: Sparse symmetric CSC matrix
The non-directed adjacency matrix
will be symmetric. Each inner edge (i,j) will have
the number of triangles that contain this edge.
Inner edges usually 2, boundary edges 1. Higher
numbers can occur when there are non-manifold triangles.
The sparse matrix can be binarized via:
adj.data = np.ones(adj.data.shape)
"""
t0 = self.t[:, 0]
t1 = self.t[:, 1]
t2 = self.t[:, 2]
i = np.column_stack((t0, t1, t1, t2, t2, t0)).reshape(-1)
j = np.column_stack((t1, t0, t2, t1, t0, t2)).reshape(-1)
dat = np.ones(i.shape)
n = self.v.shape[0]
return sparse.csc_matrix((dat, (i, j)), shape=(n, n))
def _construct_adj_dir(self):
"""
Constructs directed adjacency matrix (edge graph) of triangle mesh t
Operates only on triangles.
:return: Sparse CSC matrix
The directed adjacency matrix is not symmetric if
boundaries exist or if mesh is non-manifold.
For manifold meshes, there are only entries with
value 1. Symmetric entries are inner edges. Non-symmetric
are boundary edges. The direction prescribes a direction
on the boundary loops. Adding the matrix to its transpose
creates the non-directed version.
"""
t0 = self.t[:, 0]
t1 = self.t[:, 1]
t2 = self.t[:, 2]
i = np.column_stack((t0, t1, t2)).reshape(-1)
j = np.column_stack((t1, t2, t0)).reshape(-1)
dat = np.ones(i.shape)
n = self.v.shape[0]
return sparse.csc_matrix((dat, (i, j)), shape=(n, n))
def construct_adj_dir_tidx(self):
"""
Constructs directed adjacency matrix (edge graph) of triangle mesh t
containing the triangle indices (only for non-manifold meshes)
Operates only on triangles.
:return: Sparse CSC matrix
Similar ot adj_dir, but stores the tria idx+1 instead
of one in the matrix (allows lookup of vertex to tria).
"""
if not self.is_oriented():
raise ValueError('Error: Can only tidx matrix for oriented triangle meshes!')
t0 = self.t[:, 0]
t1 = self.t[:, 1]
t2 = self.t[:, 2]
i = np.column_stack((t0, t1, t2)).reshape(-1)
j = np.column_stack((t1, t2, t0)).reshape(-1)
# store tria idx +1 (zero means no edge here)
dat = np.repeat(np.arange(1, self.t.shape[0] + 1), 3)
n = self.v.shape[0]
return sparse.csc_matrix((dat, (i, j)), shape=(n, n))
def is_closed(self):
"""
Check if triangle mesh is closed (no boundary edges)
Operates only on triangles
:return: closed bool True if no boundary edges in adj matrix
"""
return 1 not in self.adj_sym.data
def is_manifold(self):
"""
Check if triangle mesh is manifold (no edges with >2 triangles)
Operates only on triangles
:return: manifold bool True if no edges wiht > 2 triangles
"""
return np.max(self.adj_sym.data) <= 2
def is_oriented(self):
"""
Check if triangle mesh is oriented. True if all triangles are oriented
counter-clockwise, when looking from above.
Operates only on triangles
:return: oriented bool True if max(adj_directed)=1
"""
return np.max(self.adj_dir.data) == 1
def euler(self):
"""
Computes the Euler Characteristic (=#V-#E+#T)
Operates only on triangles
:return: euler Euler Characteristic (2=sphere,0=torus)
"""
# v can contain unused vertices so we get vnum from trias
vnum = len(np.unique(self.t.reshape(-1)))
tnum = np.max(self.t.shape)
enum = int(self.adj_sym.nnz / 2)
return vnum - enum + tnum
def tria_areas(self):
"""
Computes the area of triangles using Heron's formula
:return: areas ndarray with areas of each triangle
"""
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv1 = v2 - v1
v0mv2 = v0 - v2
a = np.sqrt(np.sum(v1mv0 * v1mv0, axis=1))
b = np.sqrt(np.sum(v2mv1 * v2mv1, axis=1))
c = np.sqrt(np.sum(v0mv2 * v0mv2, axis=1))
ph = 0.5 * (a+b+c)
areas = np.sqrt(ph * (ph-a) * (ph-b) * (ph-c))
return areas
def area(self):
"""
Computes the total surface area of triangle mesh
:return: area Total surface area
"""
areas = self.tria_areas()
return np.sum(areas)
def volume(self):
"""
Computes the volume of closed triangle mesh, summing tetrahedra at origin
:return: volume Total enclosed volume
"""
if not self.is_closed():
return 0.0
if not self.is_oriented():
raise ValueError('Error: Can only compute volume for oriented triangle meshes!')
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv0 = v2 - v0
cr = np.cross(v1mv0, v2mv0)
spatvol = np.sum(v0 * cr, axis=1)
vol = np.sum(spatvol) / 6.0
return vol
def vertex_degrees(self):
"""
Computes the vertex degrees (number of edges at each vertex)
:return: vdeg Array of vertex degrees
"""
vdeg = np.bincount(self.t.reshape(-1))
return vdeg
def vertex_areas(self):
"""
Computes the area associated to each vertex (1/3 of one-ring trias)
:return: vareas Array of vertex areas
"""
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv0 = v2 - v0
cr = np.cross(v1mv0, v2mv0)
area = 0.5 * np.sqrt(np.sum(cr * cr, axis=1))
area3 = np.repeat(area[:, np.newaxis], 3, 1)
# varea = accumarray(t(:),area3(:))./3;
vareas = np.bincount(self.t.reshape(-1), area3.reshape(-1)) / 3.0
return vareas
def avg_edge_length(self):
"""
Computes the average edge length of the mesh
:return: edgelength Avg. edge length
"""
# get only upper off-diag elements from symmetric adj matrix
triadj = sparse.triu(self.adj_sym, 1, format='coo')
edgelens = np.sqrt(((self.v[triadj.row, :] - self.v[triadj.col, :]) ** 2).sum(1))
return edgelens.mean()
def tria_normals(self):
"""
Computes triangle normals
Ordering of trias is important: counterclockwise when looking
:return: n - normals (num triangles X 3 )
"""
import sys
# Compute vertex coordinates and a difference vectors for each triangle:
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv0 = v2 - v0
# Compute cross product
n = np.cross(v1mv0, v2mv0)
ln = np.sqrt(np.sum(n * n, axis=1))
ln[ln < sys.float_info.epsilon] = 1 # avoid division by zero
n = n / ln.reshape(-1, 1)
# lni = np.divide(1.0, ln)
# n[:, 0] *= lni
# n[:, 1] *= lni
# n[:, 2] *= lni
return n
def vertex_normals(self):
"""
get_vertex_normals(v,t) computes vertex normals
Triangle normals around each vertex are averaged, weighted
by the angle that they contribute.
Ordering is important: counterclockwise when looking
at the triangle from above.
:return: n - normals (num vertices X 3 )
"""
if not self.is_oriented():
raise ValueError('Error: Vertex normals are meaningless for un-oriented triangle meshes!')
import sys
# Compute vertex coordinates and a difference vector for each triangle:
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv1 = v2 - v1
v0mv2 = v0 - v2
# Compute cross product at every vertex
# will all point in the same direction but have different lengths depending on spanned area
cr0 = np.cross(v1mv0, -v0mv2)
cr1 = np.cross(v2mv1, -v1mv0)
cr2 = np.cross(v0mv2, -v2mv1)
# Add normals at each vertex (there can be duplicate indices in t at vertex i)
n = np.zeros(self.v.shape)
np.add.at(n, self.t[:, 0], cr0)
np.add.at(n, self.t[:, 1], cr1)
np.add.at(n, self.t[:, 2], cr2)
# Normalize normals
ln = np.sqrt(np.sum(n * n, axis=1))
ln[ln < sys.float_info.epsilon] = 1 # avoid division by zero
n = n / ln.reshape(-1, 1)
# lni = np.divide(1.0, ln)
# n[:, 0] *= lni
# n[:, 1] *= lni
# n[:, 2] *= lni
return n
def has_free_vertices(self):
"""
Checks if the vertex list has more vertices than what is used in tria
:return: bool
"""
vnum = np.max(self.v.shape)
vnumt = len(np.unique(self.t.reshape(-1)))
return vnum != vnumt
def tria_qualities(self):
"""
Computes triangle quality for each triangle in mesh where
q = 4 sqrt(3) A / (e1^2 + e2^2 + e3^2 )
where A is the triangle area and ei the edge length of the three edges.
This measure is used by FEMLAB and can also be found in:
R.E. Bank, PLTMG ..., Frontiers in Appl. Math. (7), 1990.
Constants are chosen so that q=1 for the equilateral triangle.
:return: ndarray with triangle qualities
"""
# Compute vertex coordinates and a difference vectors for each triangle:
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v1mv0 = v1 - v0
v2mv1 = v2 - v1
v0mv2 = v0 - v2
# Compute cross product
n = np.cross(v1mv0, -v0mv2)
# compute length (2*area)
ln = np.sqrt(np.sum(n * n, axis=1))
q = 2.0 * np.sqrt(3) * ln
es = (v1mv0 * v1mv0).sum(1) + (v2mv1 * v2mv1).sum(1) + (v0mv2 * v0mv2).sum(1)
return q / es
def boundary_loops(self):
"""
Computes a tuple of boundary loops. Meshes can have 0 or more boundary
loops, which are cycles in the directed adjacency graph of the boundary
edges.
Works on trias only. Could fail if loops are connected via a single
vertex (like a figure 8). That case needs debugging.
:return: loops List of lists with boundary loops
"""
if not self.is_manifold():
raise ValueError('Error: tria not manifold (edges with more than 2 triangles)!')
if self.is_closed():
return []
# get directed matrix of only boundary edges
inneredges = (self.adj_sym == 2)
if not self.is_oriented():
raise ValueError('Error: tria not oriented !')
adj = self.adj_dir.copy()
adj[inneredges] = 0
adj.eliminate_zeros()
# find loops
# get first column index with an entry:
firstcol = np.nonzero(adj.indptr)[0][0] - 1
loops = []
# loop while we have more first columns:
while not firstcol == []:
# start the new loop with this index
loop = [firstcol]
# delete this entry from matrix (visited)
adj.data[adj.indptr[firstcol]] = 0
# get the next column (=row index of the first entry (and only, hopefully)
ncol = adj.indices[adj.indptr[firstcol]]
# as long as loop is not closed walk through it
while not ncol == firstcol:
loop.append(ncol)
adj.data[adj.indptr[ncol]] = 0 # visited
ncol = adj.indices[adj.indptr[ncol]]
# get rid of the visited nodes, store loop and check for another one
adj.eliminate_zeros()
loops.append(loop)
nz = np.nonzero(adj.indptr)[0]
if len(nz) > 0:
firstcol = nz[0] - 1
else:
firstcol = []
return loops
def centroid(self):
"""
Computes centroid of triangle mesh as a weighted average of triangle
centers. The weight is determined by the triangle area.
(This could be done much faster if a FEM lumped mass matrix M is
already available where this would be M*v, because it is equivalent
with averaging vertices weighted by vertex area)
:return: centroid The centroid of the mesh
totalarea The total area of the mesh
"""
v0 = self.v[self.t[:, 0], :]
v1 = self.v[self.t[:, 1], :]
v2 = self.v[self.t[:, 2], :]
v2mv1 = v2 - v1
v0mv2 = v0 - v2
# Compute cross product and area for each triangle:
cr = np.cross(v2mv1, v0mv2)
areas = 0.5 * np.sqrt(np.sum(cr * cr, axis=1))
totalarea = areas.sum()
areas = areas / totalarea
centers = (1.0 / 3.0) * (v0 + v1 + v2)
c = (centers * areas[:, np.newaxis])
return np.sum(c, axis=0), totalarea
def edges(self, with_boundary=False):
"""
Compute vertices and adjacent triangle ids for each edge
:param with_boundary also work on boundary half edges, default ignore
:return: vids 2 column array with starting and end vertex for each
unique inner edge
tids 2 column array with triangle containing the half edge
from vids[0,:] to vids [1,:] in first column and the
neighboring triangle in the second column
bdrvids if with_boundary is true: 2 column array with each
boundary half-edge
bdrtids if with_boundary is true: 1 column array with the
associated triangle to each boundary edge
"""
if not self.is_oriented():
raise ValueError('Error: Can only compute edge information for oriented meshes!')
adjtria = self.construct_adj_dir_tidx().tolil()
# for boundary edges, we can just remove those edges (implicitly a zero angle)
bdredges = []
bdrtrias = []
if 1 in self.adj_sym.data:
bdredges = (self.adj_sym == 1)
bdrtrias = adjtria[bdredges].toarray().ravel() - 1
adjtria[bdredges] = 0
# get transpose adjTria matrix and keep only upper triangular matrices
adjtria2 = adjtria.transpose()
adjtriu1 = sparse.triu(adjtria, 0, format='csr')
adjtriu2 = sparse.triu(adjtria2, 0, format='csr')
vids = np.array(np.nonzero(adjtriu1)).T
tids = np.empty(vids.shape, dtype=np.int32)
tids[:, 0] = adjtriu1.data - 1
tids[:, 1] = adjtriu2.data - 1
if not with_boundary or bdredges.size == 0:
return vids, tids
bdrv = np.array(np.nonzero(bdredges)).T
nzids = bdrtrias > -1
bdrv = bdrv[nzids, :]
bdrtrias = bdrtrias[nzids].reshape(-1, 1)
return vids, tids, bdrv, bdrtrias
def curvature(self, smoothit=3):
"""
Compute various curvature values at vertices.
For the algorithm see e.g.
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Anisotropic Polygonal Remeshing.
ACM Transactions on Graphics, 2003.
:param smoothit smoothing iterations on vertex functions
:return: u_min minimal curvature directions (vnum x 3)
u_max maximal curvature directions (vnum x 3)
c_min minimal curvature
c_max maximal curvature
c_mean mean curvature: (c_min + c_max) / 2.0
c_gauss Gauss curvature: c_min * c_max
normals normals (vnum x 3)
"""
# import warnings
# warnings.filterwarnings('error')
import sys
# get edge information for inner edges (vertex ids and tria ids):
vids, tids = self.edges()
# compute normals for each tria
tnormals = self.tria_normals()
# compute dot product of normals at each edge
sprod = np.sum(tnormals[tids[:, 0], :] * tnormals[tids[:, 1], :], axis=1)
# compute unsigned angles (clamp to ensure range)
angle = np.maximum(sprod, -1)
angle = np.minimum(angle, 1)
angle = np.arccos(angle)
# compute edge vectors and lengths
edgevecs = self.v[vids[:, 1], :] - self.v[vids[:, 0], :]
edgelen = np.sqrt(np.sum(edgevecs**2, axis=1))
# get sign (if normals face towards each other or away, across each edge)
cp = np.cross(tnormals[tids[:, 0], :], tnormals[tids[:, 1], :])
si = -np.sign(np.sum(cp*edgevecs, axis=1))
angle = angle * si
# normalized edges
edgelen[edgelen < sys.float_info.epsilon] = 1 # avoid division by zero
edgevecs = edgevecs / edgelen.reshape(-1, 1)
# adjust edgelengths so that mean is 1 for numerics
edgelen = edgelen / np.mean(edgelen)
# symmetric edge matrix (3x3, upper triangular matrix entries):
ee = np.empty([edgelen.shape[0], 6])
ee[:, 0] = edgevecs[:, 0] * edgevecs[:, 0]
ee[:, 1] = edgevecs[:, 0] * edgevecs[:, 1]
ee[:, 2] = edgevecs[:, 0] * edgevecs[:, 2]
ee[:, 3] = edgevecs[:, 1] * edgevecs[:, 1]
ee[:, 4] = edgevecs[:, 1] * edgevecs[:, 2]
ee[:, 5] = edgevecs[:, 2] * edgevecs[:, 2]
# scale angle by edge lengths
angle = angle * edgelen
# multiply scaled angle with matrix entries
ee = ee * angle.reshape(-1, 1)
# map to vertices
vnum = self.v.shape[0]
vv = np.zeros([vnum, 6])
np.add.at(vv, vids[:, 0], ee)
np.add.at(vv, vids[:, 1], ee)
vdeg = np.zeros([vnum])
np.add.at(vdeg, vids[:, 0], 1)
np.add.at(vdeg, vids[:, 1], 1)
# divide by vertex degree (maybe better by edge length sum??)
vdeg[vdeg == 0] = 1
vv = vv / vdeg.reshape(-1, 1)
# smooth vertex functions
vv = self.smooth_vfunc(vv, smoothit)
# create vnum 3x3 symmetric matrices at each vertex
mats = np.empty([vnum, 3, 3])
mats[:, 0, :] = vv[:, [0, 1, 2]]
mats[:, [1, 2], 0] = vv[:, [1, 2]]
mats[:, 1, [1, 2]] = vv[:, [3, 4]]
mats[:, 2, 1] = vv[:, 4]
mats[:, 2, 2] = vv[:, 5]
# compute eigendecomposition (real for symmetric matrices)
evals, evecs = np.linalg.eig(mats)
evals = np.real(evals)
evecs = np.real(evecs)
# sort evals ascending
# this is instable in perfectly planar regions
# (normal can lie in tangential plane)
# i = np.argsort(np.abs(evals), axis=1)
# instead we find direction that aligns with vertex normals as first
# the other two will be sorted later anyway
vnormals = self.vertex_normals()
dprod = - np.abs(np.squeeze(np.sum(evecs * vnormals[:, :, np.newaxis], axis=1)))
i = np.argsort(dprod, axis=1)
evals = np.take_along_axis(evals, i, axis=1)
it = np.tile(i.reshape((vnum, 1, 3)), (1, 3, 1))
evecs = np.take_along_axis(evecs, it, axis=2)
# pull min and max curv. dirs
u_min = np.squeeze(evecs[:, :, 2])
u_max = np.squeeze(evecs[:, :, 1])
c_min = evals[:, 1]
c_max = evals[:, 2]
normals = np.squeeze(evecs[:, :, 0])
c_mean = (c_min + c_max) / 2.0
c_gauss = c_min * c_max
# enforce that min<max
i = np.squeeze(np.where(c_min > c_max))
c_min[i], c_max[i] = c_max[i], c_min[i]
u_min[i, :], u_max[i, :] = u_max[i, :], u_min[i, :]
# flip normals to point towards vertex normals
s = np.sign(np.sum(normals * vnormals, axis=1)).reshape(-1, 1)
normals = normals * s
# (here we could also project to tangent plane at vertex (using v_normals)
# as the normals above are not really good v_normals)
# flip u_max so that cross(u_min , u_max) aligns with normals
u_cross = np.cross(u_min, u_max)
d = np.sum(np.multiply(u_cross, normals), axis=1)
i = np.squeeze(np.where(d < 0))
u_max[i, :] = -u_max[i, :]
return u_min, u_max, c_min, c_max, c_mean, c_gauss, normals
def curvature_tria(self, smoothit=3):
"""
Compute min and max curvature and directions (orthognal and in tria plane)
for each triangle. First we compute these values on vertices and then smooth
there. Finally they get mapped to the trias (averaging) and projected onto
the triangle plane, and orthogonalized.
:param smoothit: number of smoothing iterations for curvature computation on vertices
:return: u_min : min curvature direction on triangles
u_max : max curvature direction on triangles
c_min : min curvature on triangles
c_max : max curvature on triangles
"""
u_min, u_max, c_min, c_max, c_mean, c_gauss, normals = self.curvature(smoothit)
# pool vertex functions (u_min and u_max) to triangles:
tumin = self.map_vfunc_to_tfunc(u_min)
# tumax = self.map_vfunc_to_tfunc(u_max)
tcmin = self.map_vfunc_to_tfunc(c_min)
tcmax = self.map_vfunc_to_tfunc(c_max)
# some Us are almost collinear, strange
# print(np.max(np.abs(np.sum(tumin * tumax, axis=1))))
# print(np.sum(tumin * tumax, axis=1))
# project onto triangle plane:
e0 = self.v[self.t[:, 1], :] - self.v[self.t[:, 0], :]
e1 = self.v[self.t[:, 2], :] - self.v[self.t[:, 0], :]
tn = np.cross(e0, e1)
tnl = np.sqrt(np.sum(tn * tn, axis=1)).reshape(-1, 1)
tn = tn / np.maximum(tnl, 1e-8)
# project tumin back to tria plane and normalize
tumin2 = tumin - tn * (np.sum(tn * tumin, axis=1)).reshape(-1, 1)
tuminl = np.sqrt(np.sum(tumin2 * tumin2, axis=1)).reshape(-1, 1)
tumin2 = tumin2 / np.maximum(tuminl, 1e-8)
# project tumax back to tria plane and normalize (will not be orthogonal to tumin)
# tumax1 = tumax - tn * (np.sum(tn * tumax, axis=1)).reshape(-1, 1)
# in a second step orthorgonalize to tumin
# tumax1 = tumax1 - tumin * (np.sum(tumin * tumax1, axis=1)).reshape(-1, 1)
# normalize
# tumax1l = np.sqrt(np.sum(tumax1 * tumax1, axis=1)).reshape(-1, 1)
# tumax1 = tumax1 / np.maximum(tumax1l, 1e-8)
# or simply create vector that is orthogonal to both normal and tumin
tumax2 = np.cross(tn, tumin2)
# if really necessary flip direction if that is true for inputs
# tumax3 = np.sign(np.sum(np.cross(tumin, tumax) * tn, axis=1)).reshape(-1, 1) * tumax2
# I wonder how much changes, if we first map umax to tria and then find orhtogonal umin next?
return tumin2, tumax2, tcmin, tcmax
def normalize_(self):
"""
Normalizes TriaMesh to unit surface area with a centroid at the origin.
Modifies the vertices.
"""
centroid, area = self.centroid()
self.v = (1.0 / np.sqrt(area)) * (self.v - centroid)
def rm_free_vertices_(self):
"""
Remove unused (free) vertices from v and t. These are vertices that are not
used in any triangle. They can produce problems when constructing, e.g.,
Laplace matrices.
Will update v and t in mesh.
:return: vkeep Indices (from original list) of kept vertices
vdel Indices of deleted (unused) vertices
"""
tflat = self.t.reshape(-1)
vnum = np.max(self.v.shape)
if np.max(tflat) >= vnum:
raise ValueError('Max index exceeds number of vertices')
# determine which vertices to keep
vkeep = np.full(vnum, False, dtype=bool)
vkeep[tflat] = True
# list of deleted vertices (old indices)
vdel = np.nonzero(~vkeep)[0]
# if nothing to delete return
if len(vdel) == 0:
return np.arange(vnum), []
# delete unused vertices
vnew = self.v[vkeep, :]
# create lookup table
tlookup = np.cumsum(vkeep) - 1
# reindex tria
tnew = tlookup[self.t]
# convert vkeep to index list
vkeep = np.nonzero(vkeep)[0]
# set new vertices and tria and re-init adj matrices
self.__init__(vnew, tnew)
return vkeep, vdel
def refine_(self, it=1):
"""
Refines the triangle mesh by placing new vertex on each edge midpoint
and thus creating 4 similar triangles from one parent triangle.
:param it : iterations (default 1)
:return: none, modifies mesh in place
"""
for x in range(it):
# make symmetric adj matrix to upper triangle
adjtriu = sparse.triu(self.adj_sym, 0, format='csr')
# create new vertex index for each edge
edgeno = adjtriu.data.shape[0]
vno = self.v.shape[0]
adjtriu.data = np.arange(vno, vno + edgeno)
# get vertices at edge midpoints:
rows, cols = adjtriu.nonzero()
vnew = 0.5 * (self.v[rows, :] + self.v[cols, :])
vnew = np.append(self.v, vnew, axis=0)
# make adj symmetric again
adjtriu = adjtriu + adjtriu.T
# create 4 new triangles for each old one
e1 = np.asarray(adjtriu[self.t[:, 0], self.t[:, 1]].flat)
e2 = np.asarray(adjtriu[self.t[:, 1], self.t[:, 2]].flat)
e3 = np.asarray(adjtriu[self.t[:, 2], self.t[:, 0]].flat)
t1 = np.column_stack((self.t[:, 0], e1, e3))
t2 = np.column_stack((self.t[:, 1], e2, e1))
t3 = | np.column_stack((self.t[:, 2], e3, e2)) | numpy.column_stack |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 2 11:52:51 2019
@author: sdenaro
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
#import scipy.stats as st
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
df_temp.columns=['Time','SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
his_temp_matrix = df_temp.values
###############################
# Synthetic HDD CDD calculation
# Simulation data
#sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
sim_temperature=df_temp
sim_temperature=sim_temperature.drop(['Time'], axis=1)
sim_temperature=sim_temperature.values
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = np.zeros((num_sim_days,num_cities))
CDD_sim = np.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = np.max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=np.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=np.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=np.sum(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=np.sum(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_days = len(his_temp_matrix)
# daily records
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = np.max((0,his_temp_matrix[i,j+1] - 65))
# annual sums
annual_HDD=np.zeros((int(len(HDD)/365),num_cities))
annual_CDD=np.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=np.sum(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=np.sum(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheetname='Inflows',header=0)
Hoover_streamflow=pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0)
CA_streamflow=pd.read_excel('Synthetic_streamflows/CA_hist_streamflow.xlsx',header=0)
Willamette_streamflow=pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0)
# headings
name_Will=list(Willamette_streamflow.loc[:,'Albany':])
name_CA = list(CA_streamflow.loc[:,'ORO_fnf':])
name_BPA = list(BPA_streamflow.loc[:,'1M':])
# number of streamflow gages considered
num_BPA = len(name_BPA)
num_CA = len(name_CA)
num_Will = len(name_Will)
num_gages= num_BPA + num_CA + num_Will + 1
# Calculate historical totals for 1953-2007
years = range(1953,2008)
for y in years:
y_index = years.index(y)
BPA = BPA_streamflow.loc[BPA_streamflow['year'] ==y,'1M':]
CA = CA_streamflow.loc[CA_streamflow['year'] == y,'ORO_fnf':]
WB = Willamette_streamflow.loc[Willamette_streamflow['year'] == y,'Albany':]
HO = Hoover_streamflow.loc[Hoover_streamflow['year'] == y,'Discharge']
BPA_sums = np.reshape(np.sum(BPA,axis= 0).values,(1,num_BPA))
CA_sums = np.reshape(np.sum(CA,axis=0).values,(1,num_CA))
WB_sums = np.reshape(np.sum(WB,axis=0).values,(1,num_Will))
HO_sums = np.reshape(np.sum(HO,axis=0),(1,1))
# matrix of annual flows for each stream gage
joined = np.column_stack((BPA_sums,CA_sums,WB_sums,HO_sums))
if y_index < 1:
hist_totals = joined
else:
hist_totals = np.vstack((hist_totals,joined))
BPA_headers = np.reshape(list(BPA_streamflow.loc[:,'1M':]),(1,num_BPA))
CA_headers = np.reshape(list(CA_streamflow.loc[:,'ORO_fnf':]),(1,num_CA))
WB_headers = np.reshape(list(Willamette_streamflow.loc[:,'Albany':]),(1,num_Will))
HO_headers = np.reshape(['Hoover'],(1,1))
headers = np.column_stack((BPA_headers,CA_headers,WB_headers,HO_headers))
# annual streamflow totals for 1953-2007
df_hist_totals = pd.DataFrame(hist_totals)
df_hist_totals.columns = headers[0,:]
df_hist_totals.loc[38,'83L']=df_hist_totals.loc[36,'83L']
added_value=abs(np.min((df_hist_totals)))+5
log_hist_total=np.log(df_hist_totals+abs(added_value))
#########################################
# annual flow regression - predicts annual flows at each site as a function
# of total annual HDD and CDD across every weather station
#train on historical data
M = np.column_stack((annual_CDD,annual_HDD))
#streamflow gages
H = list(headers[0])
# number of weather stations
z = np.shape(M)
num_w_fields = z[1]
# iterate through sites
count = 0
rsquared = []
DE=[]
for h in H:
N=added_value[h]
# form linear regression model
S = log_hist_total.loc[:,h]
name='reg' + h
locals()[name] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name].fit(M,S)
score=locals()[name].score(M,S)
print(name,score)
predicted = []
# predicted values
for i in range(0,len(M)):
m = M[i,:]
x = np.reshape(m,(1,num_w_fields))
p = locals()[name].predict(x)
predicted = np.append(predicted,p)
DE.append(predicted)
residuals = predicted -S
if count < 1:
E = residuals
else:
E = np.column_stack((E,residuals))
count = count + 1
# Now iterate through sites and use sythetic HDD, CDD data to simulated new
# annual streamflow values
count = 0
X_CDD = annual_CDD_sim
X_HDD = annual_HDD_sim
M = np.column_stack((X_CDD,X_HDD))
# for each site
for h in H:
N=added_value[h]
# load simulated temperature data
# Simulate using synthetic CDD, HDD data
predicted = []
# predicted values
for i in range(0,len(M)):
m = M[i,:]
x = np.reshape(m,(1,num_w_fields))
name='reg' + h
x=np.nan_to_num(x)
p = locals()[name].predict(x)
predicted = np.append(predicted,p)
predicted=np.exp(predicted)-N
if count < 1:
P = predicted
else:
P = | np.column_stack((P,predicted)) | numpy.column_stack |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import copy
import numpy as np
import theano.tensor as tt
from scipy.linalg import cholesky
from scipy.special import logsumexp
from scipy.stats import multivariate_normal, median_abs_deviation
from scipy.optimize import minimize, approx_fprime
from theano import function as theano_function
import arviz as az
import jax
import jax.numpy as jnp
from jax.experimental import optimizers as jax_optimizers
import time
import pymc3 as pm
import pymc3.nfmc.posdef as posdef
from pymc3.tuning.scaling import find_hessian
from pymc3.tuning.starting import find_MAP
from pymc3.backends.ndarray import NDArray, point_list_to_multitrace
from pymc3.blocking import ArrayOrdering, DictToArrayBijection
from pymc3.model import Point, modelcontext, set_data
from pymc3.distributions.distribution import draw_values, to_tuple
from pymc3.sampling import sample_prior_predictive
from pymc3.theanof import (
floatX,
inputvars,
join_nonshared_inputs,
make_shared_replacements,
gradient,
hessian,
)
from pymc3.util import (
check_start_vals,
get_default_varnames,
get_var_name,
update_start_vals,
)
from pymc3.vartypes import discrete_types, typefilter
# SINF code for fitting the normalizing flow.
from pymc3.sinf.GIS import GIS
import torch
# This is a global variable used to store the optimization steps.
# Presumably there's a nicer way to do this.
param_store = []
class NFMC:
"""Sequential type normalizing flow based sampling/global approx."""
def __init__(
self,
draws=500,
init_draws=500,
resampling_draws=500,
init_ess=100,
sample_mode='reinit',
cull_lowp_tol=0.05,
model=None,
init_method='prior',
init_samples=None,
start=None,
init_EL2O='adam',
use_hess_EL2O=False,
mean_field_EL2O=False,
absEL2O=1e-10,
fracEL2O=1e-2,
EL2O_draws=100,
maxiter_EL2O=500,
EL2O_optim_method='L-BFGS-B',
scipy_map_method='L-BFGS-B',
adam_lr=1e-3,
adam_b1=0.9,
adam_b2=0.999,
adam_eps=1.0e-8,
adam_steps=1000,
simulator=None,
model_data=None,
sim_data_cov=None,
sim_size=None,
sim_params=None,
sim_start=None,
sim_optim_method='lbfgs',
sim_tol=0.01,
local_thresh=3,
local_step_size=0.1,
local_grad=True,
init_local=True,
nf_local_iter=0,
max_line_search=100,
random_seed=-1,
chain=0,
frac_validate=0.1,
iteration=None,
final_iteration=None,
alpha=(0,0),
final_alpha=(0.75,0.75),
optim_iter=1000,
ftol=2.220446049250313e-9,
gtol=1.0e-5,
k_trunc=0.25,
verbose=False,
n_component=None,
interp_nbin=None,
KDE=True,
bw_factor_min=0.5,
bw_factor_max=2.5,
bw_factor_num=11,
edge_bins=None,
ndata_wT=None,
MSWD_max_iter=None,
NBfirstlayer=True,
logit=False,
Whiten=False,
batchsize=None,
nocuda=False,
patch=False,
shape=[28,28,1],
redraw=True,
):
self.draws = draws
self.init_draws = init_draws
self.resampling_draws = resampling_draws
self.init_ess = init_ess
self.sample_mode = sample_mode
self.cull_lowp_tol = cull_lowp_tol
self.model = model
# Init method params.
self.init_method = init_method
self.init_samples = init_samples
self.start = start
self.init_EL2O = init_EL2O
self.mean_field_EL2O = mean_field_EL2O
self.use_hess_EL2O = use_hess_EL2O
self.absEL2O = absEL2O
self.fracEL2O = fracEL2O
self.EL2O_draws = EL2O_draws
self.maxiter_EL2O = maxiter_EL2O
self.EL2O_optim_method = EL2O_optim_method
self.scipy_map_method = scipy_map_method
self.adam_lr = adam_lr
self.adam_b1 = adam_b1
self.adam_b2 = adam_b2
self.adam_eps = adam_eps
self.adam_steps = adam_steps
self.simulator = simulator
self.model_data = model_data
self.sim_data_cov = sim_data_cov
self.sim_size = sim_size
self.sim_params = sim_params
self.sim_start = sim_start
self.sim_optim_method = sim_optim_method
self.sim_tol = sim_tol
# Local exploration params.
self.local_thresh = local_thresh
self.local_step_size = local_step_size
self.local_grad = local_grad
self.init_local = init_local
self.nf_local_iter = nf_local_iter
self.max_line_search = max_line_search
self.random_seed = random_seed
self.chain = chain
# Set the torch seed.
if self.random_seed != 1:
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Separating out so I can keep track. These are SINF params.
assert 0.0 <= frac_validate <= 1.0
self.frac_validate = frac_validate
self.iteration = iteration
self.final_iteration = final_iteration
self.alpha = alpha
self.final_alpha = final_alpha
self.optim_iter = optim_iter
self.ftol = ftol
self.gtol = gtol
self.k_trunc = k_trunc
self.verbose = verbose
self.n_component = n_component
self.interp_nbin = interp_nbin
self.KDE = KDE
self.bw_factors = | np.logspace(bw_factor_min, bw_factor_max, bw_factor_num) | numpy.logspace |
# -*- coding: utf-8 -*-
"""
scripts to test the repair strategy
"""
__version__ = '1.0'
__author__ = '<NAME>'
import numpy as np
import pandas as pd
import time
import sys
sys.path.append(r'C:\RELAY')
from src.constraints import Constraints
from src.materials import Material
from src.parameters import Parameters
from src.ABD import A_from_lampam, B_from_lampam, D_from_lampam, filter_ABD
from src.excel import autofit_column_widths
from src.excel import delete_file
from src.save_set_up import save_constraints_LAYLA
from src.save_set_up import save_materials
from src.repair_diso_contig import repair_diso_contig
from src.repair_flexural import repair_flexural
from src.lampam_functions import calc_lampam
from src.repair_10_bal import repair_10_bal
from src.repair_10_bal import calc_mini_10
from src.repair_10_bal import is_equal
from src.repair_membrane import repair_membrane
from src.repair_membrane_1_no_ipo import calc_lampamA_ply_queue
from src.pretty_print import print_lampam, print_ss
#==============================================================================
# Input file
#==============================================================================
guidelines = 'none'
n_plies = 150
fibre_angles = 'trad'
fibre_angles = '3060'
fibre_angles = '15'
file_to_open = '/RELAY/pop/'\
+ fibre_angles + '-' + guidelines + '-' + str(n_plies) + 'plies.xlsx'
result_filename = 'repair-' + fibre_angles + '-' + guidelines \
+ '-' + str(n_plies) + 'plies.xlsx'
delete_file(result_filename)
#==============================================================================
# Material properties
#==============================================================================
data = pd.read_excel(
file_to_open, sheet_name='Materials', index_col=0, header=None)
data = data.transpose()
E11 = data.loc[1, 'E11']
E22 = data.loc[1, 'E22']
nu12 = data.loc[1, 'nu12']
G12 = data.loc[1, 'G12']
ply_t = data.loc[1, 'ply thickness']
mat = Material(E11=E11, E22=E22, G12=G12, nu12=nu12, ply_t=ply_t)
#print(data)
#==============================================================================
# Design & manufacturing constraints
#==============================================================================
data = pd.read_excel(
file_to_open, sheet_name='Constraints', index_col=0, header=None)
data = data.transpose()
#print(data)
sym = data.loc[1, 'symmetry']
bal = True
ipo = True
oopo = data.loc[1, 'out-of-plane orthotropy']
dam_tol = data.loc[1, 'damage tolerance']
rule_10_percent = True
percent_0 = 10
percent_90 = 10
percent_45 = 0
percent_135 = 0
percent_45_135 = 10
diso = True
delta_angle = 45
contig = True
n_contig = 4
set_of_angles = np.array(data.loc[1, 'fibre orientations'].split()).astype(int)
constraints = Constraints(
sym=sym,
bal=bal,
ipo=ipo,
oopo=oopo,
dam_tol=dam_tol,
rule_10_percent=rule_10_percent,
percent_0=percent_0,
percent_45=percent_45,
percent_90=percent_90,
percent_135=percent_135,
percent_45_135=percent_45_135,
diso=diso,
contig=contig,
n_contig=n_contig,
delta_angle=delta_angle,
set_of_angles=set_of_angles)
#==============================================================================
# Parameters
#==============================================================================
# lamination parameter weightings during membrane property refinement
in_plane_coeffs = np.array([1, 1, 0, 0])
# percentage of laminate thickness for plies that can be modified during
# the refinement of membrane properties
p_A = 80
# number of plies in the last permutation during repair for disorientation
# and/or contiguity
n_D1 = 6
# number of ply shifts tested at each step of the re-designing process during
# refinement of flexural properties
n_D2 = 10
# number of times the algorithms 1 and 2 are repeated during the flexural
# property refinement
n_D3 = 2
# lamination parameter weightings during flexural property refinement
out_of_plane_coeffs = np.array([1, 1, 1, 0])
table_param = pd.DataFrame()
table_param.loc[0, 'in_plane_coeffs'] \
= ' '.join(np.array(in_plane_coeffs, dtype=str))
table_param.loc[0, 'out_of_plane_coeffs'] \
= ' '.join(np.array(out_of_plane_coeffs, dtype=str))
table_param.loc[0, 'p_A'] = p_A
table_param.loc[0, 'n_D1'] = n_D1
table_param.loc[0, 'n_D2'] = n_D2
table_param.loc[0, 'n_D3'] = n_D3
table_param = table_param.transpose()
parameters = Parameters(
constraints=constraints,
p_A=p_A,
n_D1=n_D1,
n_D2=n_D2,
n_D3=n_D3,
repair_membrane_switch=True,
repair_flexural_switch=True)
#==============================================================================
# Tests
#==============================================================================
table_10_bal = pd.DataFrame()
table_membrane = pd.DataFrame()
table_diso_contig = pd.DataFrame()
table_flexural = pd.DataFrame()
data = pd.read_excel(file_to_open, sheet_name='stacks', index_col=0)
#print(data)
t_cummul_10_bal = 0
t_cummul_membrane = 0
t_cummul_diso_contig = 0
t_cummul_flexural = 0
table_10_bal.loc[0, 'average time repair-10-bal (s)'] = 0
table_membrane.loc[0, 'average time repair-membrane (s)'] = 0
table_diso_contig.loc[0, 'average time repair diso-contig (s)'] = 0
table_flexural.loc[0, 'average time repair flexural (s)'] = 0
table_diso_contig.loc[0, 'success rate inward repair diso contig'] = 0
table_diso_contig.loc[0, 'success rate overall repair diso contig'] = 0
n_success_inward_repair_diso_contig = 0
n_success_outward_repair_diso_contig = 0
for ind in range(0, 50):
print('ind', ind)
#==========================================================================
# Read inputs
#==========================================================================
n_plies = data.loc[ind, 'ply_count']
lampam_ini = np.empty((12,), float)
lampam_ini[0] = data.loc[ind, 'lampam[1]']
lampam_ini[1] = data.loc[ind, 'lampam[2]']
lampam_ini[2] = data.loc[ind, 'lampam[3]']
lampam_ini[3] = data.loc[ind, 'lampam[4]']
lampam_ini[4] = data.loc[ind, 'lampam[5]']
lampam_ini[5] = data.loc[ind, 'lampam[6]']
lampam_ini[6] = data.loc[ind, 'lampam[7]']
lampam_ini[7] = data.loc[ind, 'lampam[8]']
lampam_ini[8] = data.loc[ind, 'lampam[9]']
lampam_ini[9] = data.loc[ind, 'lampam[10]']
lampam_ini[10] = data.loc[ind, 'lampam[11]']
lampam_ini[11] = data.loc[ind, 'lampam[12]']
lampam_target = lampam_ini
ss_ini = np.array(data.loc[ind, 'ss'].split()).astype(int)
# print('ss_ini')
# print_ss(ss_ini, 200)
A11_ini = data.loc[ind, 'A11']
A22_ini = data.loc[ind, 'A22']
A12_ini = data.loc[ind, 'A12']
A66_ini = data.loc[ind, 'A66']
A16_ini = data.loc[ind, 'A16']
A26_ini = data.loc[ind, 'A26']
D11_ini = data.loc[ind, 'D11']
D22_ini = data.loc[ind, 'D22']
D12_ini = data.loc[ind, 'D12']
D66_ini = data.loc[ind, 'D66']
D16_ini = data.loc[ind, 'D16']
D26_ini = data.loc[ind, 'D26']
#==========================================================================
# Repair for balance and 10% rule
#==========================================================================
t = time.time()
mini_10 = calc_mini_10(constraints, ss_ini.size)
ss, ply_queue = repair_10_bal(ss_ini, mini_10, constraints)
# print('ss after repair balance/10')
# print_ss(ss)
# print(ply_queue)
elapsed_10_bal = time.time() - t
t_cummul_10_bal += elapsed_10_bal
lampamA = calc_lampamA_ply_queue(ss, n_plies, ply_queue, constraints)
table_10_bal.loc[ind, 'ply_count'] = n_plies
table_10_bal.loc[ind, 'time (s)'] = elapsed_10_bal
table_10_bal.loc[ind, 'no change in ss'] = is_equal(
ss, ply_queue, ss_ini, constraints.sym)
table_10_bal.loc[ind, 'f_A ini'] = sum(
in_plane_coeffs * ((lampam_ini[0:4] - lampam_target[0:4]) ** 2))
table_10_bal.loc[ind, 'f_A solution'] = sum(
in_plane_coeffs * ((lampamA - lampam_target[0:4]) ** 2))
table_10_bal.loc[ind, 'diff lampam 1'] = abs(lampam_ini[0]-lampamA[0])
table_10_bal.loc[ind, 'diff lampam 2'] = abs(lampam_ini[1]-lampamA[1])
table_10_bal.loc[ind, 'diff lampam 3'] = abs(lampam_ini[2]-lampamA[2])
table_10_bal.loc[ind, 'diff lampam 4'] = abs(lampam_ini[3]-lampamA[3])
table_10_bal.loc[ind, 'lampam[1]'] = lampamA[0]
table_10_bal.loc[ind, 'lampam[2]'] = lampamA[1]
table_10_bal.loc[ind, 'lampam[3]'] = lampamA[2]
table_10_bal.loc[ind, 'lampam[4]'] = lampamA[3]
table_10_bal.loc[ind, 'lampam_ini[1]'] = lampam_ini[0]
table_10_bal.loc[ind, 'lampam_ini[2]'] = lampam_ini[1]
table_10_bal.loc[ind, 'lampam_ini[3]'] = lampam_ini[2]
table_10_bal.loc[ind, 'lampam_ini[4]'] = lampam_ini[3]
ss_flatten = ' '.join(np.array(ss, dtype=str))
table_10_bal.loc[ind, 'ss'] = ss_flatten
ply_queue_flatten = ' '.join(np.array(ply_queue, dtype=str))
table_10_bal.loc[ind, 'ply_queue'] = ply_queue_flatten
ss_ini_flatten = ' '.join(np.array(ss_ini, dtype=str))
table_10_bal.loc[ind, 'ss_ini'] = ss_ini_flatten
A = A_from_lampam(lampamA, mat)
filter_ABD(A=A)
A11 = A[0, 0]
A22 = A[1, 1]
A12 = A[0, 1]
A66 = A[2, 2]
A16 = A[0, 2]
A26 = A[1, 2]
if A11_ini:
table_10_bal.loc[ind, 'diff A11 percentage'] \
= 100 * abs((A11 - A11_ini)/A11_ini)
else:
table_10_bal.loc[ind, 'diff A11 percentage'] = 0
if A22_ini:
table_10_bal.loc[ind, 'diff A22 percentage'] \
= 100 * abs((A22 - A22_ini)/A22_ini)
else:
table_10_bal.loc[ind, 'diff A22 percentage'] = 0
if A12_ini:
table_10_bal.loc[ind, 'diff A12 percentage'] \
= 100 * abs((A12 - A12_ini)/A12_ini)
else:
table_10_bal.loc[ind, 'diff A12 percentage'] = 0
if A66_ini:
table_10_bal.loc[ind, 'diff A66 percentage'] \
= 100 * abs((A66 - A66_ini)/A66_ini)
else:
table_10_bal.loc[ind, 'diff A66 percentage'] = 0
if A16_ini:
table_10_bal.loc[ind, 'diff A16 percentage'] \
= 100 * abs((A16 - A16_ini)/A16_ini)
else:
table_10_bal.loc[ind, 'diff A16 percentage'] = 0
if A26_ini:
table_10_bal.loc[ind, 'diff A26 percentage'] \
= 100 * abs((A26 - A26_ini)/A26_ini)
else:
table_10_bal.loc[ind, 'diff A26 percentage'] = 0
#==========================================================================
# Refinement for membrane properties
#==========================================================================
t = time.time()
ss_list, ply_queue_list, lampamA2_list = repair_membrane(
ss=ss,
ply_queue=ply_queue,
mini_10=mini_10,
in_plane_coeffs=in_plane_coeffs,
parameters=parameters,
constraints=constraints,
lampam_target=lampam_target)
ss2 = ss_list[0]
ply_queue2 = ply_queue_list[0]
lampamA2 = lampamA2_list[0]
# print('ss after repair membrane')
# print_ss(ss2)
# print(ply_queue2)
elapsed_membrane = time.time() - t
t_cummul_membrane += elapsed_membrane
lampamA2_check = calc_lampamA_ply_queue(
ss2, n_plies, ply_queue2, constraints)
if not (abs(lampamA2_check - lampamA2) < 1e-10).all():
raise Exception('This should not happen')
table_membrane.loc[ind, 'ply_count'] = n_plies
table_membrane.loc[ind, 'time (s)'] = elapsed_membrane
table_membrane.loc[ind, 'no change in ss'] \
= (abs(lampamA - lampamA2) < 1e-10).all()
f_A_ini = sum(in_plane_coeffs * ((lampamA - lampam_target[0:4]) ** 2))
table_membrane.loc[ind, 'f_A ini'] = f_A_ini
f_A_sol = sum(in_plane_coeffs * ((lampamA2 - lampam_target[0:4]) ** 2))
table_membrane.loc[ind, 'f_A solution'] = f_A_sol
table_membrane.loc[ind, 'diff lampam 1 solution'] = abs(
lampamA2[0]-lampam_target[0])
table_membrane.loc[ind, 'diff lampam 2 solution'] = abs(
lampamA2[1]-lampam_target[1])
table_membrane.loc[ind, 'diff lampam 3 solution'] = abs(
lampamA2[2]-lampam_target[2])
table_membrane.loc[ind, 'diff lampam 4 solution'] = abs(
lampamA2[3]-lampam_target[3])
table_membrane.loc[ind, 'diff lampam 1 before'] = abs(
lampam_target[0]-lampamA[0])
table_membrane.loc[ind, 'diff lampam 2 before'] = abs(
lampam_target[1]-lampamA[1])
table_membrane.loc[ind, 'diff lampam 3 before'] = abs(
lampam_target[2]-lampamA[2])
table_membrane.loc[ind, 'diff lampam 4 before'] = abs(
lampam_target[3]-lampamA[3])
table_membrane.loc[ind, 'lampam[1]'] = lampamA2[0]
table_membrane.loc[ind, 'lampam[2]'] = lampamA2[1]
table_membrane.loc[ind, 'lampam[3]'] = lampamA2[2]
table_membrane.loc[ind, 'lampam[4]'] = lampamA2[3]
table_membrane.loc[ind, 'lampam_target[1]'] = lampam_target[0]
table_membrane.loc[ind, 'lampam_target[2]'] = lampam_target[1]
table_membrane.loc[ind, 'lampam_target[3]'] = lampam_target[2]
table_membrane.loc[ind, 'lampam_target[4]'] = lampam_target[3]
table_membrane.loc[ind, 'lampam_before[1]'] = lampamA[0]
table_membrane.loc[ind, 'lampam_before[2]'] = lampamA[1]
table_membrane.loc[ind, 'lampam_before[3]'] = lampamA[2]
table_membrane.loc[ind, 'lampam_before[4]'] = lampamA[3]
ss_flatten = ' '.join(np.array(ss2, dtype=str))
table_membrane.loc[ind, 'ss'] = ss_flatten
ply_queue_flatten = ' '.join(np.array(ply_queue2, dtype=str))
table_membrane.loc[ind, 'ply_queue'] = ply_queue_flatten
ss_flatten = ' '.join( | np.array(ss, dtype=str) | numpy.array |