prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import warnings
import numpy as np
import pandas as pd
import xarray
import scipy.stats as st
import numba
try:
import pymc3 as pm
except:
pass
import arviz as az
import arviz.plots.plot_utils
import scipy.ndimage
import skimage
import matplotlib._contour
from matplotlib.pyplot import get_cmap as mpl_get_cmap
import bokeh.application
import bokeh.application.handlers
import bokeh.models
import bokeh.palettes
import bokeh.plotting
import colorcet
try:
import datashader as ds
import datashader.bokeh_ext
except ImportError as e:
warnings.warn(
f"""DataShader import failed with error "{e}".
Features requiring DataShader will not work and you will get exceptions."""
)
from . import utils
from . import image
from . import az_utils
try:
from . import stan
except:
warnings.warn(
"Could not import `stan` submodule. Perhaps pystan or cmdstanpy is not properly installed."
)
def plot_with_error_bars(
centers, confs, names, marker_kwargs={}, line_kwargs={}, **kwargs
):
"""Make a horizontal plot of centers/conf ints with error bars.
Parameters
----------
centers : array_like, shape (n,)
Array of center points for error bar plot.
confs : array_like, shape (n, 2)
Array of low and high values of confidence intervals
names : list of strings
Names of the variables for the plot. These give the y-ticks.
marker_kwargs : dict, default {}
Kwargs to be passed to p.circle() for plotting centers.
line_kwargs : dict, default {}
Kwargs passsed to p.line() to plot the confidence interval.
kwargs : dict
Any addition kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Plot of error bars.
"""
n = len(names)
if len(centers) != n:
raise ValueError("len(centers) ≠ len(names)")
if confs.shape != (n, 2):
raise ValueError("Shape of `confs` must be (len(names), 2).")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 50 * n
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 450
line_width = kwargs.pop("line_width", 2)
p = bokeh.plotting.figure(y_range=names[::-1], **kwargs)
p.circle(x=centers, y=names, **marker_kwargs)
for conf, name in zip(confs, names):
p.line(x=conf, y=[name, name], line_width=2)
return p
def fill_between(
x1=None,
y1=None,
x2=None,
y2=None,
show_line=True,
patch_kwargs={},
line_kwargs={},
p=None,
**kwargs,
):
"""
Create a filled region between two curves.
Parameters
----------
x1 : array_like
Array of x-values for first curve
y1 : array_like
Array of y-values for first curve
x2 : array_like
Array of x-values for second curve
y2 : array_like
Array of y-values for second curve
show_line : bool, default True
If True, show the lines on the edges of the fill.
patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill.
line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure() in
creating the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with fill-between.
"""
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 350
if p is None:
p = bokeh.plotting.figure(**kwargs)
line_width = patch_kwargs.pop("line_width", 0)
line_alpha = patch_kwargs.pop("line_alpha", 0)
p.patch(
x=np.concatenate((x1, x2[::-1])),
y=np.concatenate((y1, y2[::-1])),
line_width=line_width,
line_alpha=line_alpha,
**patch_kwargs,
)
if show_line:
line_width = line_kwargs.pop("line_width", 2)
p.line(x1, y1, line_width=line_width, **line_kwargs)
p.line(x2, y2, line_width=line_width, **line_kwargs)
return p
def qqplot(
data,
gen_fun,
n_samples=1000,
args=(),
patch_kwargs={},
line_kwargs={},
diag_kwargs={},
p=None,
**kwargs,
):
"""
Parameters
----------
data : array_like, shape (N,)
Array of data to be used in making Q-Q plot.
gen_fun : function
Function to randomly draw a new data set out of the model
distribution parametrized by the MLE. Must have call
signature `gen_fun(*args, size)`. `size` is the number of
samples to draw.
n_samples : int, default 1000
Number of samples to draw using gen_fun().
args : tuple, default ()
Arguments to be passed to gen_fun().
show_line : bool, default True
If True, show the lines on the edges of the filled region.
patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill.
line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill.
diag_kwargs : dict
Any kwargs to be passed into p.line() in generating diagonal
reference line of Q-Q plot.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure() in
creating the figure.
"""
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 350
x = np.sort(data)
theor_x = np.array([np.sort(gen_fun(*args, len(x))) for _ in range(n_samples)])
# Upper and lower bounds
low_theor, up_theor = np.percentile(theor_x, (2.5, 97.5), axis=0)
if p is None:
p = bokeh.plotting.figure(**kwargs)
if "fill_alpha" not in patch_kwargs:
patch_kwargs["fill_alpha"] = 0.5
p = fill_between(
x,
up_theor,
x,
low_theor,
patch_kwargs=patch_kwargs,
line_kwargs=line_kwargs,
show_line=True,
p=p,
)
# Plot 45 degree line
color = diag_kwargs.pop("color", "black")
alpha = diag_kwargs.pop("alpha", 0.5)
line_width = diag_kwargs.pop("line_width", 4)
p.line([0, x.max()], [0, x.max()], line_width=line_width, color=color, alpha=alpha)
return p
def ecdf(
data=None,
p=None,
x_axis_label=None,
y_axis_label="ECDF",
title=None,
plot_height=300,
plot_width=450,
staircase=False,
complementary=False,
x_axis_type="linear",
y_axis_type="linear",
**kwargs,
):
"""
Create a plot of an ECDF.
Parameters
----------
data : array_like
One-dimensional array of data. Nan's are ignored.
conf_int : bool, default False
If True, display a confidence interval on the ECDF.
ptiles : list, default [2.5, 97.5]
The percentiles to use for the confidence interval. Ignored it
`conf_int` is False.
n_bs_reps : int, default 1000
Number of bootstrap replicates to do to compute confidence
interval. Ignored if `conf_int` is False.
fill_color : str, default 'lightgray'
Color of the confidence interbal. Ignored if `conf_int` is
False.
fill_alpha : float, default 1
Opacity of confidence interval. Ignored if `conf_int` is False.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
x_axis_label : str, default None
Label for the x-axis. Ignored if `p` is not None.
y_axis_label : str, default 'ECDF' or 'ECCDF'
Label for the y-axis. Ignored if `p` is not None.
title : str, default None
Title of the plot. Ignored if `p` is not None.
plot_height : int, default 300
Height of plot, in pixels. Ignored if `p` is not None.
plot_width : int, default 450
Width of plot, in pixels. Ignored if `p` is not None.
staircase : bool, default False
If True, make a plot of a staircase ECDF (staircase). If False,
plot the ECDF as dots.
complementary : bool, default False
If True, plot the empirical complementary cumulative
distribution functon.
x_axis_type : str, default 'linear'
Either 'linear' or 'log'.
y_axis_type : str, default 'linear'
Either 'linear' or 'log'.
kwargs
Any kwargs to be passed to either p.circle or p.line, for
`staircase` being False or True, respectively.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with ECDF.
"""
# Check data to make sure legit
data = utils._convert_data(data)
# Data points on ECDF
x, y = _ecdf_vals(data, staircase, complementary)
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "ECCDF" if complementary else "ECDF")
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_axis_type=x_axis_type,
y_axis_type=y_axis_type,
title=title,
)
if staircase:
# Line of steps
p.line(x, y, **kwargs)
# Rays for ends
if complementary:
p.ray(x[0], 1, None, np.pi, **kwargs)
p.ray(x[-1], 0, None, 0, **kwargs)
else:
p.ray(x[0], 0, None, np.pi, **kwargs)
p.ray(x[-1], 1, None, 0, **kwargs)
else:
p.circle(x, y, **kwargs)
return p
def histogram(
data=None,
bins=10,
p=None,
density=False,
kind="step",
line_kwargs={},
patch_kwargs={},
**kwargs,
):
"""
Make a plot of a histogram of a data set.
Parameters
----------
data : array_like
1D array of data to make a histogram out of
bins : int, array_like, or one of 'exact' or 'integer' default 10
Setting for `bins` kwarg to be passed to `np.histogram()`. If
`'exact'`, then each unique value in the data gets its own bin.
If `integer`, then integer data is assumed and each integer gets
its own bin.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
density : bool, default False
If True, normalized the histogram. Otherwise, base the histogram
on counts.
kind : str, default 'step'
The kind of histogram to display. Allowed values are 'step' and
'step_filled'.
line_kwargs : dict
Any kwargs to be passed to p.line() in making the line of the
histogram.
patch_kwargs : dict
Any kwargs to be passed to p.patch() in making the fill of the
histogram.
kwargs : dict
All other kwargs are passed to bokeh.plotting.figure()
Returns
-------
output : Bokeh figure
Figure populated with histogram.
"""
if data is None:
raise RuntimeError("Input `data` must be specified.")
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "density" if density else "count")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
y_range = kwargs.pop("y_range", bokeh.models.DataRange1d(start=0))
p = bokeh.plotting.figure(y_axis_label=y_axis_label, y_range=y_range, **kwargs)
if bins == "exact":
a = np.unique(data)
if len(a) == 1:
bins = np.array([a[0] - 0.5, a[0] + 0.5])
else:
bins = np.concatenate(
(
(a[0] - (a[1] - a[0]) / 2,),
(a[1:] + a[:-1]) / 2,
(a[-1] + (a[-1] - a[-2]) / 2,),
)
)
elif bins == "integer":
if np.any(data != np.round(data)):
raise RuntimeError("'integer' bins chosen, but data are not integer.")
bins = np.arange(data.min() - 1, data.max() + 1) + 0.5
# Compute histogram
f, e = np.histogram(data, bins=bins, density=density)
e0 = np.empty(2 * len(e))
f0 = np.empty(2 * len(e))
e0[::2] = e
e0[1::2] = e
f0[0] = 0
f0[-1] = 0
f0[1:-1:2] = f
f0[2:-1:2] = f
if kind == "step":
p.line(e0, f0, **line_kwargs)
if kind == "step_filled":
x2 = [e0.min(), e0.max()]
y2 = [0, 0]
p = fill_between(e0, f0, x2, y2, show_line=True, p=p, patch_kwargs=patch_kwargs)
return p
def predictive_ecdf(
samples,
data=None,
diff=False,
percentiles=[80, 60, 40, 20],
color="blue",
data_color="orange",
data_staircase=True,
data_size=2,
x=None,
discrete=False,
p=None,
**kwargs,
):
"""Plot a predictive ECDF from samples.
Parameters
----------
samples : Numpy array or xarray, shape (n_samples, n) or xarray DataArray
A Numpy array containing predictive samples.
data : Numpy array, shape (n,) or xarray DataArray
If not None, ECDF of measured data is overlaid with predictive
ECDF.
diff : bool, default True
If True, the ECDFs minus median of the predictive ECDF are
plotted.
percentiles : list, default [80, 60, 40, 20]
Percentiles for making colored envelopes for confidence
intervals for the predictive ECDFs. Maximally four can be
specified.
color : str, default 'blue'
One of ['green', 'blue', 'red', 'gray', 'purple', 'orange'].
There are used to make the color scheme of shading of
percentiles.
data_color : str, default 'orange'
String representing the color of the data to be plotted over the
confidence interval envelopes.
data_staircase : bool, default True
If True, plot the ECDF of the data as a staircase.
Otherwise plot it as dots.
data_size : int, default 2
Size of marker (if `data_line` if False) or thickness of line
(if `data_staircase` is True) of plot of data.
x : Numpy array, default None
Points at which to evaluate the ECDF. If None, points are
automatically generated based on the data range.
discrete : bool, default False
If True, the samples take on discrete values.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Figure populated with glyphs describing range of values for the
ECDF of the samples. The shading goes according to percentiles
of samples of the ECDF, with the median ECDF plotted as line in
the middle.
"""
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("Samples can only be Numpy arrays and xarrays.")
if len(percentiles) > 4:
raise RuntimeError("Can specify maximally four percentiles.")
# Build ptiles
percentiles = np.sort(percentiles)[::-1]
ptiles = [pt for pt in percentiles if pt > 0]
ptiles = (
[50 - pt / 2 for pt in percentiles]
+ [50]
+ [50 + pt / 2 for pt in percentiles[::-1]]
)
ptiles_str = [str(pt) for pt in ptiles]
if color not in ["green", "blue", "red", "gray", "purple", "orange", "betancourt"]:
raise RuntimeError(
"Only allowed colors are 'green', 'blue', 'red', 'gray', 'purple', 'orange'"
)
colors = {
"blue": ["#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"],
"green": ["#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"],
"red": ["#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"],
"orange": ["#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"],
"purple": ["#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"],
"gray": ["#bdbdbd", "#969696", "#737373", "#525252", "#252525"],
"betancourt": [
"#DCBCBC",
"#C79999",
"#B97C7C",
"#A25050",
"#8F2727",
"#7C0000",
],
}
data_range = samples.max() - samples.min()
if discrete and x is None:
x = np.arange(samples.min(), samples.max() + 1)
elif x is None:
x = np.linspace(
samples.min() - 0.05 * data_range, samples.max() + 0.05 * data_range, 400
)
ecdfs = np.array([_ecdf_arbitrary_points(sample, x) for sample in samples])
df_ecdf = pd.DataFrame()
for ptile in ptiles:
df_ecdf[str(ptile)] = np.percentile(
ecdfs, ptile, axis=0, interpolation="higher"
)
df_ecdf["x"] = x
if data is not None and diff:
ecdfs = np.array(
[_ecdf_arbitrary_points(sample, np.sort(data)) for sample in samples]
)
ecdf_data_median = np.percentile(ecdfs, 50, axis=0, interpolation="higher")
if diff:
for ptile in filter(lambda item: item != "50", ptiles_str):
df_ecdf[ptile] -= df_ecdf["50"]
df_ecdf["50"] = 0.0
if p is None:
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "ECDF difference" if diff else "ECDF")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 325
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
for i, ptile in enumerate(ptiles_str[: len(ptiles_str) // 2]):
if discrete:
x, y1 = cdf_to_staircase(df_ecdf["x"].values, df_ecdf[ptile].values)
_, y2 = cdf_to_staircase(
df_ecdf["x"].values, df_ecdf[ptiles_str[-i - 1]].values
)
else:
x = df_ecdf["x"]
y1 = df_ecdf[ptile]
y2 = df_ecdf[ptiles_str[-i - 1]]
fill_between(
x,
y1,
x,
y2,
p=p,
show_line=False,
patch_kwargs=dict(color=colors[color][i]),
)
# The median as a solid line
if discrete:
x, y = cdf_to_staircase(df_ecdf["x"], df_ecdf["50"])
else:
x, y = df_ecdf["x"], df_ecdf["50"]
p.line(x, y, line_width=2, color=colors[color][-1])
# Overlay data set
if data is not None:
x_data, y_data = _ecdf_vals(data, staircase=False)
if diff:
# subtracting off median wrecks y-coords for duplicated x-values...
y_data -= ecdf_data_median
# ...so take only unique values,...
unique_x = np.unique(x_data)
# ...find the (correct) max y-value for each...
unique_inds = np.searchsorted(x_data, unique_x, side="right") - 1
# ...and use only that going forward
y_data = y_data[unique_inds]
x_data = unique_x
if data_staircase:
x_data, y_data = cdf_to_staircase(x_data, y_data)
p.line(x_data, y_data, color=data_color, line_width=data_size)
else:
p.circle(x_data, y_data, color=data_color, size=data_size)
return p
def predictive_regression(
samples,
samples_x,
data=None,
diff=False,
percentiles=[80, 60, 40, 20],
color="blue",
data_kwargs={},
p=None,
**kwargs,
):
"""Plot a predictive regression plot from samples.
Parameters
----------
samples : Numpy array, shape (n_samples, n_x) or xarray DataArray
Numpy array containing predictive samples of y-values.
sample_x : Numpy array, shape (n_x,)
data : Numpy array, shape (n, 2) or xarray DataArray
If not None, the measured data. The first column is the x-data,
and the second the y-data. These are plotted as points over the
predictive plot.
diff : bool, default True
If True, the predictive y-values minus the median of the
predictive y-values are plotted.
percentiles : list, default [80, 60, 40, 20]
Percentiles for making colored envelopes for confidence
intervals for the predictive ECDFs. Maximally four can be
specified.
color : str, default 'blue'
One of ['green', 'blue', 'red', 'gray', 'purple', 'orange'].
There are used to make the color scheme of shading of
percentiles.
data_kwargs : dict
Any kwargs to be passed to p.circle() when plotting the data
points.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Figure populated with glyphs describing range of values for the
the samples. The shading goes according to percentiles of
samples, with the median plotted as line in the middle.
"""
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("Samples can only be Numpy arrays and xarrays.")
if type(samples_x) != np.ndarray:
if type(samples_x) == xarray.core.dataarray.DataArray:
samples_x = samples_x.squeeze().values
else:
raise RuntimeError("`samples_x` can only be Numpy array or xarray.")
if len(percentiles) > 4:
raise RuntimeError("Can specify maximally four percentiles.")
# Build ptiles
percentiles = np.sort(percentiles)[::-1]
ptiles = [pt for pt in percentiles if pt > 0]
ptiles = (
[50 - pt / 2 for pt in percentiles]
+ [50]
+ [50 + pt / 2 for pt in percentiles[::-1]]
)
ptiles_str = [str(pt) for pt in ptiles]
if color not in ["green", "blue", "red", "gray", "purple", "orange", "betancourt"]:
raise RuntimeError(
"Only allowed colors are 'green', 'blue', 'red', 'gray', 'purple', 'orange'"
)
colors = {
"blue": ["#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"],
"green": ["#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"],
"red": ["#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"],
"orange": ["#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"],
"purple": ["#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"],
"gray": ["#bdbdbd", "#969696", "#737373", "#525252", "#252525"],
"betancourt": [
"#DCBCBC",
"#C79999",
"#B97C7C",
"#A25050",
"#8F2727",
"#7C0000",
],
}
if samples.shape[1] != len(samples_x):
raise ValueError(
"`samples_x must have the same number of entries as `samples` does columns."
)
# It's useful to have data as a data frame
if data is not None:
if type(data) == tuple and len(data) == 2 and len(data[0]) == len(data[1]):
data = np.vstack(data).transpose()
df_data = pd.DataFrame(data=data, columns=["__data_x", "__data_y"])
df_data = df_data.sort_values(by="__data_x")
# Make sure all entries in x-data in samples_x
if diff:
if len(samples_x) != len(df_data) or not np.allclose(
np.sort(samples_x), df_data["__data_x"].values
):
raise ValueError(
"If `diff=True`, then samples_x must match the x-values of `data`."
)
df_pred = pd.DataFrame(
data=np.percentile(samples, ptiles, axis=0).transpose(),
columns=[str(ptile) for ptile in ptiles],
)
df_pred["__x"] = samples_x
df_pred = df_pred.sort_values(by="__x")
if p is None:
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "y difference" if diff else "y")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 325
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
for i, ptile in enumerate(ptiles_str[: len(ptiles_str) // 2]):
if diff:
y1 = df_pred[ptile] - df_pred["50"]
y2 = df_pred[ptiles_str[-i - 1]] - df_pred["50"]
else:
y1 = df_pred[ptile]
y2 = df_pred[ptiles_str[-i - 1]]
fill_between(
x1=df_pred["__x"],
x2=df_pred["__x"],
y1=y1,
y2=y2,
p=p,
show_line=False,
patch_kwargs=dict(fill_color=colors[color][i]),
)
# The median as a solid line
if diff:
p.line(
df_pred["__x"],
np.zeros_like(samples_x),
line_width=2,
color=colors[color][-1],
)
else:
p.line(df_pred["__x"], df_pred["50"], line_width=2, color=colors[color][-1])
# Overlay data set
if data is not None:
data_color = data_kwargs.pop("color", "orange")
data_alpha = data_kwargs.pop("alpha", 1.0)
data_size = data_kwargs.pop("size", 2)
if diff:
p.circle(
df_data["__data_x"],
df_data["__data_y"] - df_pred["50"],
color=data_color,
size=data_size,
alpha=data_alpha,
**data_kwargs,
)
else:
p.circle(
df_data["__data_x"],
df_data["__data_y"],
color=data_color,
size=data_size,
alpha=data_alpha,
**data_kwargs,
)
return p
def sbc_rank_ecdf(
sbc_output=None,
parameters=None,
diff=True,
ptile=99.0,
bootstrap_envelope=False,
n_bs_reps=None,
show_envelope=True,
show_envelope_line=True,
color_by_warning_code=False,
staircase=False,
p=None,
marker_kwargs={},
envelope_patch_kwargs={},
envelope_line_kwargs={},
palette=None,
show_legend=True,
**kwargs,
):
"""Make a rank ECDF plot from simulation-based calibration.
Parameters
----------
sbc_output : DataFrame
Output of bebi103.stan.sbc() containing results from an SBC
calculation.
parameters : list, default None
List of parameters to include in the SBC rank ECDF plot. If
None, use all parameters.
diff : bool, default True
If True, plot the ECDF minus the ECDF of a Uniform distribution.
Otherwise, plot the ECDF of the rank statistic from SBC.
ptile : float, default 99
Which precentile to use as the envelope in the plot.
bootstrap_envelope : bool, default False
If True, use bootstrapping on the appropriate Uniform
distribution to compute the envelope. Otherwise, use the
Gaussian approximation for the envelope.
n_bs_reps : bool, default None
Number of bootstrap replicates to use when computing the
envelope. If None, n_bs_reps is determined from the formula
int(max(n, max(L+1, 100/(100-ptile))) * 100), where n is the
number of simulations used in the SBC calculation.
show_envelope : bool, default True
If True, display the envelope encompassing the ptile percent
confidence interval for the SBC ECDF.
show_envelope_line : bool, default True
If True, and `show_envelope` is also True, plot a line around
the envelope.
color_by_warning_code : bool, default False
If True, color glyphs by diagnostics warning code instead of
coloring the glyphs by parameter
staircase : bool, default False
If True, plot the ECDF as a staircase. Otherwise, plot with
dots.
p : bokeh.plotting.Figure instance, default None
Plot to which to add the SBC rank ECDF plot. If None, create a
new figure.
marker_kwargs : dict, default {}
Dictionary of kwargs to pass to `p.circle()` or `p.line()` when
plotting the SBC ECDF.
envelope_patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill of
the envelope.
envelope_line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill of the envelope.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
show_legend : bool, default True
If True, show legend.
kwargs : dict
Any kwargs passed to `bokeh.plotting.figure()` when creating the
plot.
Returns
-------
output : bokeh.plotting.Figure instance
A plot containing the SBC plot.
Notes
-----
.. You can see example SBC ECDF plots in Fig. 14 b and c in this
paper: https://arxiv.org/abs/1804.06788
"""
if sbc_output is None:
raise RuntimeError("Argument `sbc_output` must be specified.")
# Defaults
if palette is None:
palette = colorcet.b_glasbey_category10
elif palette not in [list, tuple]:
palette = [palette]
if "x_axis_label" not in kwargs:
kwargs["x_axis_label"] = "rank statistic"
if "y_axis_label" not in kwargs:
kwargs["y_axis_label"] = "ECDF difference" if diff else "ECDF"
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 450
toolbar_location = kwargs.pop("toolbar_location", "above")
if "fill_color" not in envelope_patch_kwargs:
envelope_patch_kwargs["fill_color"] = "gray"
if "fill_alpha" not in envelope_patch_kwargs:
envelope_patch_kwargs["fill_alpha"] = 0.5
if "line_color" not in envelope_line_kwargs:
envelope_line_kwargs["line_color"] = "gray"
if "color" in "marker_kwargs" and color_by_warning_code:
raise RuntimeError(
"Cannot specify marker color when `color_by_warning_code` is True."
)
if staircase and color_by_warning_code:
raise RuntimeError("Cannot color by warning code for staircase ECDFs.")
if parameters is None:
parameters = list(sbc_output["parameter"].unique())
elif type(parameters) not in [list, tuple]:
parameters = [parameters]
L = sbc_output["L"].iloc[0]
df = sbc_output.loc[
sbc_output["parameter"].isin(parameters),
["parameter", "rank_statistic", "warning_code"],
]
n = (df["parameter"] == df["parameter"].unique()[0]).sum()
if show_envelope:
x, y_low, y_high = _sbc_rank_envelope(
L,
n,
ptile=ptile,
diff=diff,
bootstrap=bootstrap_envelope,
n_bs_reps=n_bs_reps,
)
p = fill_between(
x1=x,
x2=x,
y1=y_high,
y2=y_low,
patch_kwargs=envelope_patch_kwargs,
line_kwargs=envelope_line_kwargs,
show_line=show_envelope_line,
p=p,
**kwargs,
)
else:
p = bokeh.plotting.figure(**kwargs)
if staircase:
dfs = []
for param in parameters:
if diff:
x_data, y_data = _ecdf_diff(
df.loc[df["parameter"] == param, "rank_statistic"],
L,
staircase=True,
)
else:
x_data, y_data = _ecdf_vals(
df.loc[df["parameter"] == param, "rank_statistic"], staircase=True
)
dfs.append(
pd.DataFrame(
data=dict(rank_statistic=x_data, __ECDF=y_data, parameter=param)
)
)
df = pd.concat(dfs, ignore_index=True)
else:
df["__ECDF"] = df.groupby("parameter")["rank_statistic"].transform(_ecdf_y)
df["warning_code"] = df["warning_code"].astype(str)
if diff:
df["__ECDF"] -= (df["rank_statistic"] + 1) / L
if staircase:
color = marker_kwargs.pop("color", palette)
if type(color) == str:
color = [color] * len(parameters)
elif "color" not in marker_kwargs:
color = palette
else:
color = [marker_kwargs.pop("color")] * len(parameters)
if color_by_warning_code:
if len(color) < len(df["warning_code"].unique()):
raise RuntimeError(
"Not enough colors in palette to cover all warning codes."
)
elif len(color) < len(parameters):
raise RuntimeError("Not enough colors in palette to cover all parameters.")
if staircase:
plot_cmd = p.line
else:
plot_cmd = p.circle
if color_by_warning_code:
for i, (warning_code, g) in enumerate(df.groupby("warning_code")):
if show_legend:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
legend_label=warning_code,
**marker_kwargs,
)
else:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
**marker_kwargs,
)
else:
for i, (param, g) in enumerate(df.groupby("parameter")):
if show_legend:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
legend_label=param,
**marker_kwargs,
)
else:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
**marker_kwargs,
)
if show_legend:
p.legend.click_policy = "hide"
p.legend.location = "bottom_right"
return p
def parcoord_plot(
samples=None,
pars=None,
transformation=None,
color_by_chain=False,
palette=None,
line_kwargs={},
divergence_kwargs={},
xtick_label_orientation="horizontal",
**kwargs,
):
"""
Make a parallel coordinate plot of MCMC samples. The x-axis is the
parameter name and the y-axis is the value of the parameter,
possibly transformed to so the scale of all parameters are similar.
Parameters
----------
samples : ArviZ InferenceData instance or xarray Dataset instance
Result of MCMC sampling.
pars : list of strings
List of variables to include in the plot.
transformation : function, str, or dict, default None
A transformation to apply to each set of samples. The function
must take a single array as input and return an array as the
same size. If None, nor transformation is done. If a dictionary,
each key is the variable name and the corresponding value is a
function for the transformation of that variable. Alternatively,
if `transformation` is `'minmax'`, the data are scaled to range
from zero to one, or if `transformation` is `'rank'`, the rank
of the each data is used.
color_by_chain : bool, default False
If True, color the lines by chain.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
line_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of non-divergent samples.
divergence_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of divergent samples.
xtick_label_orientation : str or float, default 'horizontal'
Orientation of x tick labels. In some plots, horizontally
labeled ticks will have label clashes, and this can fix that.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()` when
instantiating the figure.
Returns
-------
output : Bokeh plot
Parallel coordinates plot.
"""
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
line_width = line_kwargs.pop("line_width", 0.5)
alpha = line_kwargs.pop("alpha", 0.02)
line_join = line_kwargs.pop("line_join", "bevel")
if "color" in line_kwargs and color_by_chain:
raise RuntimeError(
"Cannot specify line color and also color by chain. If coloring by chain, use `palette` kwarg to specify color scheme."
)
color = line_kwargs.pop("color", "black")
divergence_line_join = divergence_kwargs.pop("line_join", "bevel")
divergence_line_width = divergence_kwargs.pop("line_width", 1)
divergence_color = divergence_kwargs.pop("color", "orange")
divergence_alpha = divergence_kwargs.pop("alpha", 1)
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 175
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 600
toolbar_location = kwargs.pop("toolbar_location", "above")
if "x_range" in kwargs:
raise RuntimeError("Cannot specify x_range; this is inferred.")
if not color_by_chain:
palette = [color] * len(palette)
if type(samples) != az.data.inference_data.InferenceData:
raise RuntimeError("Input must be an ArviZ InferenceData instance.")
if not hasattr(samples, "posterior"):
raise RuntimeError("Input samples do not have 'posterior' group.")
if not (
hasattr(samples, "sample_stats") and hasattr(samples.sample_stats, "diverging")
):
warnings.warn("No divergence information available.")
pars, df = _sample_pars_to_df(samples, pars)
if transformation == "minmax":
transformation = {
par: lambda x: (x - x.min()) / (x.max() - x.min())
if x.min() < x.max()
else 0.0
for par in pars
}
elif transformation == "rank":
transformation = {par: lambda x: st.rankdata(x) for par in pars}
if transformation is None:
transformation = {par: lambda x: x for par in pars}
if callable(transformation) or transformation is None:
transformation = {par: transformation for par in pars}
for col, trans in transformation.items():
df[col] = trans(df[col])
df = df.melt(id_vars=["divergent__", "chain__", "draw__"])
p = bokeh.plotting.figure(
x_range=bokeh.models.FactorRange(*pars),
toolbar_location=toolbar_location,
**kwargs,
)
# Plots for samples that were not divergent
ys = np.array(
[
group["value"].values
for _, group in df.loc[~df["divergent__"]].groupby(["chain__", "draw__"])
]
)
if len(ys) > 0:
ys = [y for y in ys]
xs = [list(df["variable"].unique())] * len(ys)
p.multi_line(
xs,
ys,
line_width=line_width,
alpha=alpha,
line_join=line_join,
color=[palette[i % len(palette)] for i in range(len(ys))],
**line_kwargs,
)
# Plots for samples that were divergent
ys = np.array(
[
group["value"].values
for _, group in df.loc[df["divergent__"]].groupby(["chain__", "draw__"])
]
)
if len(ys) > 0:
ys = [y for y in ys]
xs = [list(df["variable"].unique())] * len(ys)
p.multi_line(
xs,
ys,
alpha=divergence_alpha,
line_join=line_join,
color=divergence_color,
line_width=divergence_line_width,
**divergence_kwargs,
)
p.xaxis.major_label_orientation = xtick_label_orientation
return p
def trace_plot(samples=None, pars=None, palette=None, line_kwargs={}, **kwargs):
"""
Make a trace plot of MCMC samples.
Parameters
----------
samples : ArviZ InferenceData instance or xarray Dataset instance
Result of MCMC sampling.
pars : list of strings
List of variables to include in the plot.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
line_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of non-divergent samples.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()`.
Returns
-------
output : Bokeh gridplot
Set of chain traces as a Bokeh gridplot.
"""
if type(samples) != az.data.inference_data.InferenceData:
raise RuntimeError("Input must be an ArviZ InferenceData instance.")
if not hasattr(samples, "posterior"):
raise RuntimeError("Input samples do not have 'posterior' group.")
pars, df = _sample_pars_to_df(samples, pars)
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
line_width = line_kwargs.pop("line_width", 0.5)
alpha = line_kwargs.pop("alpha", 0.5)
line_join = line_kwargs.pop("line_join", "bevel")
if "color" in line_kwargs:
raise RuntimeError(
"Cannot specify line color. Specify color scheme with `palette` kwarg."
)
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 150
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 600
x_axis_label = kwargs.pop("x_axis_label", "step")
if "y_axis_label" in kwargs:
raise RuntimeError(
"`y_axis_label` cannot be specified; it is inferred from samples."
)
if "x_range" not in kwargs:
kwargs["x_range"] = [df["draw__"].min(), df["draw__"].max()]
plots = []
grouped = df.groupby("chain__")
for i, par in enumerate(pars):
p = bokeh.plotting.figure(x_axis_label=x_axis_label, y_axis_label=par, **kwargs)
for i, (chain, group) in enumerate(grouped):
p.line(
group["draw__"],
group[par],
line_width=line_width,
line_join=line_join,
color=palette[i],
*line_kwargs,
)
plots.append(p)
if len(plots) == 1:
return plots[0]
# Link ranges
for i, p in enumerate(plots[:-1]):
plots[i].x_range = plots[-1].x_range
return bokeh.layouts.gridplot(plots, ncols=1)
def corner(
samples=None,
pars=None,
labels=None,
datashade=False,
plot_width=150,
plot_ecdf=False,
cmap="black",
color_by_chain=False,
palette=None,
divergence_color="orange",
alpha=0.02,
single_param_color="black",
bins=20,
show_contours=False,
contour_color="black",
bins_2d=50,
levels=None,
weights=None,
smooth=0.02,
extend_contour_domain=False,
plot_width_correction=50,
plot_height_correction=40,
xtick_label_orientation="horizontal",
):
"""
Make a corner plot of MCMC results. Heavily influenced by the corner
package by <NAME>.
Parameters
----------
samples : Pandas DataFrame or ArviZ InferenceData instance
Results of sampling.
pars : list
List of variables as strings included in `samples` to construct
corner plot.
labels : list, default None
List of labels for the respective variables given in `pars`. If
None, the variable names from `pars` are used.
datashade : bool, default False
Whether or not to convert sampled points to a raster image using
Datashader.
plot_width : int, default 150
Width of each plot in the corner plot in pixels. The height is
computed from the width to make the plots roughly square.
plot_ecdf : bool, default False
If True, plot ECDFs of samples on the diagonal of the corner
plot. If False, histograms are plotted.
cmap : str, default 'black'
Valid colormap string for DataShader or for coloring Bokeh
glyphs.
color_by_chain : bool, default False
If True, color the glyphs by chain index.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
the default color cycle employed by Altair. Ignored is
`color_by_chain` is False.
divergence_color : str, default 'orange'
Color to use for showing points where the sampler experienced a
divergence.
alpha : float, default 1.0
Opacity of glyphs. Ignored if `datashade` is True.
single_param_color : str, default 'black'
Color of histogram or ECDF lines.
bins : int, default 20
Number of bins to use in constructing histograms. Ignored if
`plot_ecdf` is True.
show_contours : bool, default False
If True, show contour plot on top of samples.
contour_color : str, default 'black'
Color of contour lines
bins_2d : int, default 50
Number of bins in each direction for binning 2D histograms when
computing contours.
levels : list of floats, default None
Levels to use when constructing contours. By default, these are
chosen according to this principle from <NAME>:
http://corner.readthedocs.io/en/latest/pages/sigmas.html
weights : default None
Value to pass as `weights` kwarg to np.histogram2d(), used in
constructing contours.
smooth : int or None, default 1
Width of smoothing kernel for making contours. plot_width_correction : int, default 50
Correction for width of plot taking into account tick and axis
labels.
extend_contour_domain : bool, default False
If True, extend the domain of the contours a little bit beyond
the extend of the samples. This is done in the corner package,
but I prefer not to do it.
plot_width_correction : int, default 50
Correction for width of plot taking into account tick and axis
labels.
plot_height_correction : int, default 40
Correction for height of plot taking into account tick and axis
labels.
xtick_label_orientation : str or float, default 'horizontal'
Orientation of x tick labels. In some plots, horizontally
labeled ticks will have label clashes, and this can fix that.
Returns
-------
output : Bokeh gridplot
Corner plot as a Bokeh gridplot.
"""
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
if color_by_chain:
if datashade:
raise NotImplementedError(
"Can only color by chain if `datashade` is False."
)
if cmap not in ["black", None]:
warnings.warn("Ignoring cmap values to color by chain.")
if divergence_color is None:
divergence_color = cmap
if type(samples) == pd.core.frame.DataFrame:
df = samples
if pars is None:
pars = [col for col in df.columns if len(col) < 2 or col[-2:] != "__"]
else:
pars, df = _sample_pars_to_df(samples, pars)
if color_by_chain:
# Have to convert datatype to string to play nice with Bokeh
df["chain__"] = df["chain__"].astype(str)
factors = tuple(df["chain__"].unique())
cmap = bokeh.transform.factor_cmap("chain__", palette=palette, factors=factors)
# Add dummy divergent column if no divergence information is given
if "divergent__" not in df.columns:
df = df.copy()
df["divergent__"] = 0
# Add dummy chain column if no divergence information is given
if "chain__" not in df.columns:
df = df.copy()
df["chain__"] = 0
if len(pars) > 6:
raise RuntimeError("For space purposes, can show only six variables.")
for col in pars:
if col not in df.columns:
raise RuntimeError("Column " + col + " not in the columns of DataFrame.")
if labels is None:
labels = pars
elif len(labels) != len(pars):
raise RuntimeError("len(pars) must equal len(labels)")
if len(pars) == 1:
x = pars[0]
if plot_ecdf:
if datashade:
if plot_width == 150:
plot_height = 200
plot_width = 300
else:
plot_width = 200
plot_height = 200
x_range, _ = _data_range(df, pars[0], pars[0])
p = bokeh.plotting.figure(
x_range=x_range,
y_range=[-0.02, 1.02],
plot_width=plot_width,
plot_height=plot_height,
)
x_ecdf, y_ecdf = _ecdf_vals(df[pars[0]], staircase=True)
df_ecdf = pd.DataFrame(data={pars[0]: x_ecdf, "ECDF": y_ecdf})
_ = datashader.bokeh_ext.InteractiveImage(
p,
_create_line_image,
df=df_ecdf,
x=x,
y="ECDF",
cmap=single_param_color,
)
else:
p = ecdf(
df[pars[0]],
staircase=True,
line_width=2,
line_color=single_param_color,
)
else:
p = histogram(
df[pars[0]],
bins=bins,
density=True,
line_kwargs=dict(line_width=2, line_color=single_param_color),
x_axis_label=pars[0],
)
p.xaxis.major_label_orientation = xtick_label_orientation
return p
if not datashade:
if len(df) > 10000:
raise RuntimeError(
"Cannot render more than 10,000 samples without DataShader."
)
elif len(df) > 5000:
warnings.warn("Rendering so many points without DataShader is ill-advised.")
plots = [[None for _ in range(len(pars))] for _ in range(len(pars))]
for i, j in zip(*np.tril_indices(len(pars))):
pw = plot_width
ph = plot_width
if j == 0:
pw += plot_width_correction
if i == len(pars) - 1:
ph += plot_height_correction
x = pars[j]
if i != j:
y = pars[i]
x_range, y_range = _data_range(df, x, y)
plots[i][j] = bokeh.plotting.figure(
x_range=x_range, y_range=y_range, plot_width=pw, plot_height=ph
)
if datashade:
_ = datashader.bokeh_ext.InteractiveImage(
plots[i][j], _create_points_image, df=df, x=x, y=y, cmap=cmap
)
plots[i][j].circle(
df.loc[df["divergent__"] == 1, x],
df.loc[df["divergent__"] == 1, y],
size=2,
color=divergence_color,
)
else:
if divergence_color is None:
plots[i][j].circle(df[x], df[y], size=2, alpha=alpha, color=cmap)
else:
plots[i][j].circle(
source=df.loc[df["divergent__"] == 0, [x, y, "chain__"]],
x=x,
y=y,
size=2,
alpha=alpha,
color=cmap,
)
plots[i][j].circle(
df.loc[df["divergent__"] == 1, x],
df.loc[df["divergent__"] == 1, y],
size=2,
color=divergence_color,
)
if show_contours:
xs, ys = contour_lines_from_samples(
df[x].values,
df[y].values,
bins=bins_2d,
smooth=smooth,
levels=levels,
weights=weights,
extend_domain=extend_contour_domain,
)
plots[i][j].multi_line(xs, ys, line_color=contour_color, line_width=2)
else:
if plot_ecdf:
x_range, _ = _data_range(df, x, x)
plots[i][i] = bokeh.plotting.figure(
x_range=x_range,
y_range=[-0.02, 1.02],
plot_width=pw,
plot_height=ph,
)
if datashade:
x_ecdf, y_ecdf = _ecdf_vals(df[x], staircase=True)
df_ecdf = pd.DataFrame(data={x: x_ecdf, "ECDF": y_ecdf})
_ = datashader.bokeh_ext.InteractiveImage(
plots[i][i],
_create_line_image,
df=df_ecdf,
x=x,
y="ECDF",
cmap=single_param_color,
)
else:
plots[i][i] = ecdf(
df[x],
p=plots[i][i],
staircase=True,
line_width=2,
line_color=single_param_color,
)
else:
x_range, _ = _data_range(df, x, x)
plots[i][i] = bokeh.plotting.figure(
x_range=x_range,
y_range=bokeh.models.DataRange1d(start=0.0),
plot_width=pw,
plot_height=ph,
)
f, e = np.histogram(df[x], bins=bins, density=True)
e0 = np.empty(2 * len(e))
f0 = np.empty(2 * len(e))
e0[::2] = e
e0[1::2] = e
f0[0] = 0
f0[-1] = 0
f0[1:-1:2] = f
f0[2:-1:2] = f
plots[i][i].line(e0, f0, line_width=2, color=single_param_color)
plots[i][j].xaxis.major_label_orientation = xtick_label_orientation
# Link axis ranges
for i in range(1, len(pars)):
for j in range(i):
plots[i][j].x_range = plots[j][j].x_range
plots[i][j].y_range = plots[i][i].x_range
# Label axes
for i, label in enumerate(labels):
plots[-1][i].xaxis.axis_label = label
for i, label in enumerate(labels[1:]):
plots[i + 1][0].yaxis.axis_label = label
if plot_ecdf:
plots[0][0].yaxis.axis_label = "ECDF"
# Take off tick labels
for i in range(len(pars) - 1):
for j in range(i + 1):
plots[i][j].xaxis.major_label_text_font_size = "0pt"
if not plot_ecdf:
plots[0][0].yaxis.major_label_text_font_size = "0pt"
for i in range(1, len(pars)):
for j in range(1, i + 1):
plots[i][j].yaxis.major_label_text_font_size = "0pt"
grid = bokeh.layouts.gridplot(plots, toolbar_location="left")
return grid
def contour(
X,
Y,
Z,
levels=None,
p=None,
overlaid=False,
cmap=None,
overlay_grid=False,
fill=False,
fill_palette=None,
fill_alpha=0.75,
line_kwargs={},
**kwargs,
):
"""
Make a contour plot, possibly overlaid on an image.
Parameters
----------
X : 2D Numpy array
Array of x-values, as would be produced using np.meshgrid()
Y : 2D Numpy array
Array of y-values, as would be produced using np.meshgrid()
Z : 2D Numpy array
Array of z-values.
levels : array_like
Levels to plot, ranging from 0 to 1. The contour around a given
level contains that fraction of the total probability if the
contour plot is for a 2D probability density function. By
default, the levels are given by the one, two, three, and four
sigma levels corresponding to a marginalized distribution from
a 2D Gaussian distribution.
p : bokeh plotting object, default None
If not None, the contour are added to `p`. This option is not
allowed if `overlaid` is True.
overlaid : bool, default False
If True, `Z` is displayed as an image and the contours are
overlaid.
cmap : str or list of hex colors, default None
If `im` is an intensity image, `cmap` is a mapping of
intensity to color. If None, default is 256-level Viridis.
If `im` is a color image, then `cmap` can either be
'rgb' or 'cmy' (default), for RGB or CMY merge of channels.
overlay_grid : bool, default False
If True, faintly overlay the grid on top of image. Ignored if
overlaid is False.
line_kwargs : dict, default {}
Keyword arguments passed to `p.multiline()` for rendering the
contour.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()`.
Returns
-------
output : Bokeh plotting object
Plot populated with contours, possible with an image.
"""
if len(X.shape) != 2 or Y.shape != X.shape or Z.shape != X.shape:
raise RuntimeError("All arrays must be 2D and of same shape.")
if overlaid and p is not None:
raise RuntimeError("Cannot specify `p` if showing image.")
# Set defaults
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "y")
if "line_color" not in line_kwargs:
if overlaid:
line_kwargs["line_color"] = "white"
else:
line_kwargs["line_color"] = "black"
line_width = line_kwargs.pop("line_width", 2)
if p is None:
if overlaid:
frame_height = kwargs.pop("frame_height", 300)
frame_width = kwargs.pop("frame_width", 300)
title = kwargs.pop("title", None)
p = image.imshow(
Z,
cmap=cmap,
frame_height=frame_height,
frame_width=frame_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_range=[X.min(), X.max()],
y_range=[Y.min(), Y.max()],
no_ticks=False,
flip=False,
return_im=False,
)
else:
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 300
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 300
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
# Set default levels
if levels is None:
levels = 1.0 - np.exp(-np.arange(0.5, 2.1, 0.5) ** 2 / 2)
# Compute contour lines
if fill or line_width:
xs, ys = _contour_lines(X, Y, Z, levels)
# Make fills. This is currently not supported
if fill:
raise NotImplementedError("Filled contours are not yet implemented.")
if fill_palette is None:
if len(levels) <= 6:
fill_palette = bokeh.palettes.Greys[len(levels) + 3][1:-1]
elif len(levels) <= 10:
fill_palette = bokeh.palettes.Viridis[len(levels) + 1]
else:
raise RuntimeError(
"Can only have maximally 10 levels with filled contours"
+ " unless user specifies `fill_palette`."
)
elif len(fill_palette) != len(levels) + 1:
raise RuntimeError(
"`fill_palette` must have 1 more entry" + " than `levels`"
)
p.patch(
xs[-1], ys[-1], color=fill_palette[0], alpha=fill_alpha, line_color=None
)
for i in range(1, len(levels)):
x_p = np.concatenate((xs[-1 - i], xs[-i][::-1]))
y_p = np.concatenate((ys[-1 - i], ys[-i][::-1]))
p.patch(x_p, y_p, color=fill_palette[i], alpha=fill_alpha, line_color=None)
p.background_fill_color = fill_palette[-1]
# Populate the plot with contour lines
p.multi_line(xs, ys, line_width=line_width, **line_kwargs)
if overlay_grid and overlaid:
p.grid.level = "overlay"
p.grid.grid_line_alpha = 0.2
return p
def ds_line_plot(
df,
x,
y,
cmap="#1f77b4",
plot_height=300,
plot_width=500,
x_axis_label=None,
y_axis_label=None,
title=None,
margin=0.02,
):
"""
Make a datashaded line plot.
Parameters
----------
df : pandas DataFrame
DataFrame containing the data
x : Valid column name of Pandas DataFrame
Column containing the x-data.
y : Valid column name of Pandas DataFrame
Column containing the y-data.
cmap : str, default '#1f77b4'
Valid colormap string for DataShader and for coloring Bokeh
glyphs.
plot_height : int, default 300
Height of plot, in pixels.
plot_width : int, default 500
Width of plot, in pixels.
x_axis_label : str, default None
Label for the x-axis.
y_axis_label : str, default None
Label for the y-axis.
title : str, default None
Title of the plot. Ignored if `p` is not None.
margin : float, default 0.02
Margin, in units of `plot_width` or `plot_height`, to leave
around the plotted line.
Returns
-------
output : datashader.bokeh_ext.InteractiveImage
Interactive image of plot. Note that you should *not* use
bokeh.io.show() to view the image. For most use cases, you
should just call this function without variable assignment.
"""
if x_axis_label is None:
if type(x) == str:
x_axis_label = x
else:
x_axis_label = "x"
if y_axis_label is None:
if type(y) == str:
y_axis_label = y
else:
y_axis_label = "y"
x_range, y_range = _data_range(df, x, y, margin=margin)
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_range=x_range,
y_range=y_range,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
title=title,
)
return datashader.bokeh_ext.InteractiveImage(
p, _create_line_image, df=df, x=x, y=y, cmap=cmap
)
def ds_point_plot(
df,
x,
y,
cmap="#1f77b4",
plot_height=300,
plot_width=500,
x_axis_label=None,
y_axis_label=None,
title=None,
margin=0.02,
):
"""
Make a datashaded point plot.
Parameters
----------
df : pandas DataFrame
DataFrame containing the data
x : Valid column name of Pandas DataFrame
Column containing the x-data.
y : Valid column name of Pandas DataFrame
Column containing the y-data.
cmap : str, default '#1f77b4'
Valid colormap string for DataShader and for coloring Bokeh
glyphs.
plot_height : int, default 300
Height of plot, in pixels.
plot_width : int, default 500
Width of plot, in pixels.
x_axis_label : str, default None
Label for the x-axis.
y_axis_label : str, default None
Label for the y-axis.
title : str, default None
Title of the plot. Ignored if `p` is not None.
margin : float, default 0.02
Margin, in units of `plot_width` or `plot_height`, to leave
around the plotted line.
Returns
-------
output : datashader.bokeh_ext.InteractiveImage
Interactive image of plot. Note that you should *not* use
bokeh.io.show() to view the image. For most use cases, you
should just call this function without variable assignment.
"""
if x_axis_label is None:
if type(x) == str:
x_axis_label = x
else:
x_axis_label = "x"
if y_axis_label is None:
if type(y) == str:
y_axis_label = y
else:
y_axis_label = "y"
x_range, y_range = _data_range(df, x, y, margin=margin)
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_range=x_range,
y_range=y_range,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
title=title,
)
return datashader.bokeh_ext.InteractiveImage(
p, _create_points_image, df=df, x=x, y=y, cmap=cmap
)
def mpl_cmap_to_color_mapper(cmap):
"""
Convert a Matplotlib colormap to a bokeh.models.LinearColorMapper
instance.
Parameters
----------
cmap : str
A string giving the name of the color map.
Returns
-------
output : bokeh.models.LinearColorMapper instance
A linear color_mapper with 25 gradations.
Notes
-----
.. See https://matplotlib.org/examples/color/colormaps_reference.html
for available Matplotlib colormaps.
"""
cm = mpl_get_cmap(cmap)
palette = [rgb_frac_to_hex(cm(i)[:3]) for i in range(256)]
return bokeh.models.LinearColorMapper(palette=palette)
def _ecdf_vals(data, staircase=False, complementary=False):
"""Get x, y, values of an ECDF for plotting.
Parameters
----------
data : ndarray
One dimensional Numpy array with data.
staircase : bool, default False
If True, generate x and y values for staircase ECDF (staircase). If
False, generate x and y values for ECDF as dots.
complementary : bool
If True, return values for ECCDF.
Returns
-------
x : ndarray
x-values for plot
y : ndarray
y-values for plot
"""
x = np.sort(data)
y = np.arange(1, len(data) + 1) / len(data)
if staircase:
x, y = cdf_to_staircase(x, y)
if complementary:
y = 1 - y
elif complementary:
y = 1 - y + 1 / len(y)
return x, y
@numba.jit(nopython=True)
def _ecdf_arbitrary_points(data, x):
"""Give the value of an ECDF at arbitrary points x."""
y = np.arange(len(data) + 1) / len(data)
return y[np.searchsorted(np.sort(data), x, side="right")]
def _ecdf_from_samples(df, name, ptiles, x):
"""Compute ECDFs and percentiles from samples."""
df_ecdf = pd.DataFrame()
df_ecdf_vals = pd.DataFrame()
grouped = df.groupby(["chain", "chain_idx"])
for i, g in grouped:
df_ecdf_vals[i] = _ecdf_arbitrary_points(g[name].values, x)
for ptile in ptiles:
df_ecdf[str(ptile)] = df_ecdf_vals.quantile(
ptile / 100, axis=1, interpolation="higher"
)
df_ecdf["x"] = x
return df_ecdf
def cdf_to_staircase(x, y):
"""Convert discrete values of CDF to staircase for plotting.
Parameters
----------
x : array_like, shape (n,)
x-values for concave corners of CDF
y : array_like, shape (n,)
y-values of the concave corvners of the CDF
Returns
-------
x_staircase : array_like, shape (2*n, )
x-values for staircase CDF.
y_staircase : array_like, shape (2*n, )
y-values for staircase CDF.
"""
# Set up output arrays
x_staircase = np.empty(2 * len(x))
y_staircase = np.empty(2 * len(x))
# y-values for steps
y_staircase[0] = 0
y_staircase[1::2] = y
y_staircase[2::2] = y[:-1]
# x- values for steps
x_staircase[::2] = x
x_staircase[1::2] = x
return x_staircase, y_staircase
@numba.jit(nopython=True)
def _y_ecdf(data, x):
y = np.arange(len(data) + 1) / len(data)
return y[np.searchsorted(np.sort(data), x, side="right")]
@numba.jit(nopython=True)
def _draw_ecdf_bootstrap(L, n, n_bs_reps=100000):
x = np.arange(L + 1)
ys = np.empty((n_bs_reps, len(x)))
for i in range(n_bs_reps):
draws = np.random.randint(0, L + 1, size=n)
ys[i, :] = _y_ecdf(draws, x)
return ys
def _sbc_rank_envelope(L, n, ptile=95, diff=True, bootstrap=False, n_bs_reps=None):
x = np.arange(L + 1)
y = st.randint.cdf(x, 0, L + 1)
std = np.sqrt(y * (1 - y) / n)
if bootstrap:
if n_bs_reps is None:
n_bs_reps = int(max(n, max(L + 1, 100 / (100 - ptile))) * 100)
ys = _draw_ecdf_bootstrap(L, n, n_bs_reps=n_bs_reps)
y_low, y_high = np.percentile(ys, [50 - ptile / 2, 50 + ptile / 2], axis=0)
else:
y_low = np.concatenate(
(st.norm.ppf((50 - ptile / 2) / 100, y[:-1], std[:-1]), (1.0,))
)
y_high = np.concatenate(
(st.norm.ppf((50 + ptile / 2) / 100, y[:-1], std[:-1]), (1.0,))
)
# Ensure that ends are appropriate
y_low = np.maximum(0, y_low)
y_high = np.minimum(1, y_high)
# Make "staircase" stepped ECDFs
_, y_low = cdf_to_staircase(x, y_low)
x_staircase, y_high = cdf_to_staircase(x, y_high)
if diff:
_, y = cdf_to_staircase(x, y)
y_low -= y
y_high -= y
return x_staircase, y_low, y_high
def _ecdf_diff(data, L, staircase=False):
x, y = _ecdf_vals(data)
y_uniform = (x + 1) / L
if staircase:
x, y = cdf_to_staircase(x, y)
_, y_uniform = cdf_to_staircase(np.arange(len(data)), y_uniform)
y -= y_uniform
return x, y
def _get_cat_range(df, grouped, order, color_column, horizontal):
if order is None:
if isinstance(list(grouped.groups.keys())[0], tuple):
factors = tuple(
[tuple([str(k) for k in key]) for key in grouped.groups.keys()]
)
else:
factors = tuple([str(key) for key in grouped.groups.keys()])
else:
if type(order[0]) in [list, tuple]:
factors = tuple([tuple([str(k) for k in key]) for key in order])
else:
factors = tuple([str(entry) for entry in order])
if horizontal:
cat_range = bokeh.models.FactorRange(*(factors[::-1]))
else:
cat_range = bokeh.models.FactorRange(*factors)
if color_column is None:
color_factors = factors
else:
color_factors = tuple(sorted(list(df[color_column].unique().astype(str))))
return cat_range, factors, color_factors
def _cat_figure(
df,
grouped,
plot_height,
plot_width,
x_axis_label,
y_axis_label,
title,
order,
color_column,
tooltips,
horizontal,
val_axis_type,
):
fig_kwargs = dict(
plot_height=plot_height,
plot_width=plot_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
title=title,
tooltips=tooltips,
)
cat_range, factors, color_factors = _get_cat_range(
df, grouped, order, color_column, horizontal
)
if horizontal:
fig_kwargs["y_range"] = cat_range
fig_kwargs["x_axis_type"] = val_axis_type
else:
fig_kwargs["x_range"] = cat_range
fig_kwargs["y_axis_type"] = val_axis_type
return bokeh.plotting.figure(**fig_kwargs), factors, color_factors
def _cat_source(df, cats, cols, color_column):
if type(cats) in [list, tuple]:
cat_source = list(zip(*tuple([df[cat].astype(str) for cat in cats])))
labels = [", ".join(cat) for cat in cat_source]
else:
cat_source = list(df[cats].astype(str).values)
labels = cat_source
if type(cols) in [list, tuple, pd.core.indexes.base.Index]:
source_dict = {col: list(df[col].values) for col in cols}
else:
source_dict = {cols: list(df[cols].values)}
source_dict["cat"] = cat_source
if color_column in [None, "cat"]:
source_dict["__label"] = labels
else:
source_dict["__label"] = list(df[color_column].astype(str).values)
source_dict[color_column] = list(df[color_column].astype(str).values)
return bokeh.models.ColumnDataSource(source_dict)
def _tooltip_cols(tooltips):
if tooltips is None:
return []
if type(tooltips) not in [list, tuple]:
raise RuntimeError("`tooltips` must be a list or tuple of two-tuples.")
cols = []
for tip in tooltips:
if type(tip) not in [list, tuple] or len(tip) != 2:
raise RuntimeError("Invalid tooltip.")
if tip[1][0] == "@":
if tip[1][1] == "{":
cols.append(tip[1][2 : tip[1].find("}")])
elif "{" in tip[1]:
cols.append(tip[1][1 : tip[1].find("{")])
else:
cols.append(tip[1][1:])
return cols
def _cols_to_keep(cats, val, color_column, tooltips):
cols = _tooltip_cols(tooltips)
cols += [val]
if type(cats) in [list, tuple]:
cols += list(cats)
else:
cols += [cats]
if color_column is not None:
cols += [color_column]
return list(set(cols))
def _check_cat_input(df, cats, val, color_column, tooltips, palette, kwargs):
if df is None:
raise RuntimeError("`df` argument must be provided.")
if cats is None:
raise RuntimeError("`cats` argument must be provided.")
if val is None:
raise RuntimeError("`val` argument must be provided.")
if type(palette) not in [list, tuple]:
raise RuntimeError("`palette` must be a list or tuple.")
if val not in df.columns:
raise RuntimeError(f"{val} is not a column in the inputted data frame")
cats_array = type(cats) in [list, tuple]
if cats_array:
for cat in cats:
if cat not in df.columns:
raise RuntimeError(f"{cat} is not a column in the inputted data frame")
else:
if cats not in df.columns:
raise RuntimeError(f"{cats} is not a column in the inputted data frame")
if color_column is not None and color_column not in df.columns:
raise RuntimeError(f"{color_column} is not a column in the inputted data frame")
cols = _cols_to_keep(cats, val, color_column, tooltips)
for col in cols:
if col not in df.columns:
raise RuntimeError(f"{col} is not a column in the inputted data frame")
bad_kwargs = ["x", "y", "source", "cat", "legend"]
if kwargs is not None and any([key in kwargs for key in bad_kwargs]):
raise RuntimeError(", ".join(bad_kwargs) + " are not allowed kwargs.")
if val == "cat":
raise RuntimeError("`'cat'` cannot be used as `val`.")
if val == "__label" or (cats == "__label" or (cats_array and "__label" in cats)):
raise RuntimeError("'__label' cannot be used for `val` or `cats`.")
return cols
def _outliers(data):
bottom, middle, top = np.percentile(data, [25, 50, 75])
iqr = top - bottom
outliers = data[(data > top + 1.5 * iqr) | (data < bottom - 1.5 * iqr)]
return outliers
def _box_and_whisker(data):
middle = data.median()
bottom = data.quantile(0.25)
top = data.quantile(0.75)
iqr = top - bottom
top_whisker = data[data <= top + 1.5 * iqr].max()
bottom_whisker = data[data >= bottom - 1.5 * iqr].min()
return pd.Series(
{
"middle": middle,
"bottom": bottom,
"top": top,
"top_whisker": top_whisker,
"bottom_whisker": bottom_whisker,
}
)
def _box_source(df, cats, val, cols):
"""Construct a data frame for making box plot."""
# Need to reset index for use in slicing outliers
df_source = df.reset_index(drop=True)
if type(cats) in [list, tuple]:
level = list(range(len(cats)))
else:
level = 0
if cats is None:
grouped = df_source
else:
grouped = df_source.groupby(cats)
# Data frame for boxes and whiskers
df_box = grouped[val].apply(_box_and_whisker).unstack().reset_index()
source_box = _cat_source(
df_box, cats, ["middle", "bottom", "top", "top_whisker", "bottom_whisker"], None
)
# Data frame for outliers
df_outliers = grouped[val].apply(_outliers).reset_index(level=level)
df_outliers[cols] = df_source.loc[df_outliers.index, cols]
source_outliers = _cat_source(df_outliers, cats, cols, None)
return source_box, source_outliers
def _ecdf_y(data, complementary=False):
"""Give y-values of an ECDF for an unsorted column in a data frame.
Parameters
----------
data : Pandas Series
Series (or column of a DataFrame) from which to generate ECDF
values
complementary : bool, default False
If True, give the ECCDF values.
Returns
-------
output : Pandas Series
Corresponding y-values for an ECDF when plotted with dots.
Notes
-----
.. This only works for plotting an ECDF with points, not for staircase
ECDFs
"""
if complementary:
return 1 - data.rank(method="first") / len(data) + 1 / len(data)
else:
return data.rank(method="first") / len(data)
def _point_ecdf_source(data, val, cats, cols, complementary, colored):
"""DataFrame for making point-wise ECDF."""
df = data.copy()
if complementary:
col = "__ECCDF"
else:
col = "__ECDF"
if cats is None or colored:
df[col] = _ecdf_y(df[val], complementary)
else:
df[col] = df.groupby(cats)[val].transform(_ecdf_y, complementary)
cols += [col]
return _cat_source(df, cats, cols, None)
def _ecdf_collection_dots(
df, val, cats, cols, complementary, order, palette, show_legend, y, p, **kwargs
):
_, _, color_factors = _get_cat_range(df, df.groupby(cats), order, None, False)
source = _point_ecdf_source(df, val, cats, cols, complementary, False)
if "color" not in kwargs:
kwargs["color"] = bokeh.transform.factor_cmap(
"cat", palette=palette, factors=color_factors
)
if show_legend:
kwargs["legend"] = "__label"
p.circle(source=source, x=val, y=y, **kwargs)
return p
def _ecdf_collection_staircase(
df, val, cats, complementary, order, palette, show_legend, p, **kwargs
):
grouped = df.groupby(cats)
color_not_in_kwargs = "color" not in kwargs
if order is None:
order = list(grouped.groups.keys())
grouped_iterator = [
(order_val, grouped.get_group(order_val)) for order_val in order
]
for i, g in enumerate(grouped_iterator):
if show_legend:
if type(g[0]) == tuple:
legend = ", ".join([str(c) for c in g[0]])
else:
legend = str(g[0])
else:
legend = None
if color_not_in_kwargs:
kwargs["color"] = palette[i % len(palette)]
ecdf(
g[1][val],
staircase=True,
p=p,
legend=legend,
complementary=complementary,
**kwargs,
)
return p
def _display_clicks(div, attributes=[], style="float:left;clear:left;font_size=0.5pt"):
"""Build a suitable CustomJS to display the current event
in the div model."""
return bokeh.models.CustomJS(
args=dict(div=div),
code="""
var attrs = %s; var args = [];
for (var i=0; i<attrs.length; i++ ) {
args.push(Number(cb_obj[attrs[i]]).toFixed(4));
}
var line = "<span style=%r>[" + args.join(", ") + "], </span>\\n";
var text = div.text.concat(line);
var lines = text.split("\\n")
if ( lines.length > 35 ) { lines.shift(); }
div.text = lines.join("\\n");
"""
% (attributes, style),
)
def _data_range(df, x, y, margin=0.02):
x_range = df[x].max() - df[x].min()
y_range = df[y].max() - df[y].min()
return (
[df[x].min() - x_range * margin, df[x].max() + x_range * margin],
[df[y].min() - y_range * margin, df[y].max() + y_range * margin],
)
def _create_points_image(x_range, y_range, w, h, df, x, y, cmap):
cvs = ds.Canvas(
x_range=x_range, y_range=y_range, plot_height=int(h), plot_width=int(w)
)
agg = cvs.points(df, x, y, agg=ds.reductions.count())
return ds.transfer_functions.dynspread(
ds.transfer_functions.shade(agg, cmap=cmap, how="linear")
)
def _create_line_image(x_range, y_range, w, h, df, x, y, cmap=None):
cvs = ds.Canvas(
x_range=x_range, y_range=y_range, plot_height=int(h), plot_width=int(w)
)
agg = cvs.line(df, x, y)
return ds.transfer_functions.dynspread(ds.transfer_functions.shade(agg, cmap=cmap))
def _contour_lines(X, Y, Z, levels):
"""
Generate lines for contour plot.
"""
# Compute the density levels.
Zflat = Z.flatten()
inds = np.argsort(Zflat)[::-1]
Zflat = Zflat[inds]
sm = np.cumsum(Zflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Zflat[sm <= v0][-1]
except:
V[i] = Zflat[0]
V.sort()
m = np.diff(V) == 0
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Make contours
c = matplotlib._contour.QuadContourGenerator(X, Y, Z, None, True, 0)
xs = []
ys = []
for level in V:
paths = c.create_contour(level)
for line in paths:
xs.append(line[:, 0])
ys.append(line[:, 1])
return xs, ys
def contour_lines_from_samples(
x, y, smooth=0.02, levels=None, bins=50, weights=None, extend_domain=False
):
"""
Get lines for a contour plot from (x, y) samples.
Parameters
----------
x : array_like, shape (n,)
x-values of samples.
y : array_like, shape (n,)
y-values of samples.
smooth : float, default 0.02
Smoothing parameter for Gaussian smoothing of contour. A
Gaussian filter is applied with standard deviation given by
`smooth * bins`. If None, no smoothing is done.
levels : float, list of floats, or None
The levels of the contours. To enclose 95% of the samples, use
`levels=0.95`. If provided as a list, multiple levels are used.
If None, `levels` is approximated [0.12, 0.39, 0.68, 0.86].
bins : int, default 50
Binning of samples into square bins is necessary to construct
the contours. `bins` gives the number of bins in each direction.
weights : array_like, shape (n,), default None
Weights to apply to each sample in constructing the histogram.
Default is `None`, such that all samples are equally weighted.
extend_domain : bool, default False
If True, extend the domain of the contours beyond the domain
of the min and max of the samples. This can be useful if the
contours might clash with the edges of a plot.
Returns
-------
xs : list of arrays
Each array is the x-values for a plotted contour
ys : list of arrays
Each array is the y-values for a plotted contour
Notes
-----
.. The method proceeds as follows: the samples are binned. The
counts of samples landing in bins are thought of as values of a
function f(xb, yb), where (xb, yb) denotes the center of the
respective bins. This function is then optionally smoothed using
a Gaussian blur, and then the result is used to construct a
contour plot.
.. Based heavily on code from the corner package by <NAME>.
"""
# The code in this function is based on the corner package by <NAME>.
# Following is the copyright notice from that pacakge.
#
# Copyright (c) 2013-2016 <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
if type(bins) != int or bins <= 0:
raise ValueError("`bins` must be a positive integer.")
data_range = [[x.min(), x.max()], [y.min(), y.max()]]
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
elif type(levels) not in [list, tuple, np.ndarray]:
levels = [levels]
for level in levels:
if level <= 0 or level > 1:
raise ValueError("All level values must be between zero and one.")
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(
x.flatten(),
y.flatten(),
bins=bins,
range=list(map(np.sort, data_range)),
weights=weights,
)
except ValueError:
raise ValueError(
"2D histogram generation failed. It could be that one of your sampling ranges has no dynamic range."
)
if smooth is not None:
H = scipy.ndimage.gaussian_filter(H, smooth * bins)
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
if extend_domain:
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate(
[
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
]
)
Y2 = np.concatenate(
[
Y1[0] + np.array([-2, -1]) * | np.diff(Y1[:2]) | numpy.diff |
'''
Greedy Randomised Adaptive Search Procedure
classes and functions.
'''
import numpy as np
import time
class FixedRCLSizer:
'''
Fixed sized RCL list.
When r = 1 then greedy
When r = len(tour) then random
'''
def __init__(self, r):
self.r = r
def get_size(self):
'''
Returns an int representing the size of the required RCL
'''
return self.r
class RandomRCLSizer:
'''
Probabilitic selection of the RCL size
Uniform probability.
'''
def __init__(self, r_list, random_seed=None):
self.r_list = r_list
self.rng = np.random.default_rng(random_seed)
def get_size(self, size=None):
'''
Returns a randomly selected RCL size
'''
return self.rng.choice(self.r_list, size=size)
class SemiGreedyConstructor:
'''
Semi-greedy construction of a tour.
For a city i creates a restricted candidate list of size r
i.e the r shortest distances from city i.
Next city is chosen with equal probability.
Repeats until tour is constructed.
'''
def __init__(self, rcl_sizer, tour, matrix,
random_seed=None):
'''
Constructor
Params:
------
rcl_sizer: object
sizes the restricted candidate list
tour: np.ndarray
vector of city indexes included in problem
matrix: np.ndarray
matrix of travel costs
random_seed: int
used to control sampling and provides a
reproducible result.
'''
# size of rcl
self.rcl_sizer = rcl_sizer
# cities in a tour
self.tour = tour
# travel cost matrix
self.matrix = matrix
# create random number generator
self.rng = np.random.default_rng(random_seed)
def build(self):
'''
Semi-greedy contruction of tour
Returns:
--------
np.array
'''
# first city in tour
solution = np.array([self.tour[0]])
# it is an iterative (construction) procedure
for i in range(len(self.tour)-1):
# get the RCL size
r = self.rcl_sizer.get_size()
# get the RCL
rcl = self.get_rcl(r, solution, solution[-1])
# select the next city
next_city = self.random_from_rcl(rcl)
# update the solution
solution = np.append(solution, np.array([next_city]))
return solution
def get_rcl(self, r, solution, from_city):
'''
Restricted candidate list for final city in current solution
Params:
-------
solution: np.ndarray
vector of current partially constructed solution
from_city: int
index of city used to construct rcl.
Returns:
-------
np.array
'''
# get indexes of cities not in solution
mask = self.tour[~np.in1d(self.tour, solution)]
# get indexes of r smallest travels costs
if mask.shape[0] > r:
# partition the vector for remaining cities - faster than sorting
idx = np.argpartition(self.matrix[from_city][mask],
len(mask) - r)[-r:]
rcl = mask[idx]
else:
# handle when r < n cities remaining
rcl = mask
return rcl
def random_from_rcl(self, rcl):
'''
Select a city at random from rcl.
Return city index in self.matrix
Params:
-------
rcl: np.ndarray
restricted candidate list
vector of candidate city indexes.
'''
return self.rng.choice(rcl)
class GRASP:
'''
Greedy Randomised Adaptive Search Procedure algorithm
for the Travelling Salesman Problem.
The class has the following properties
.best: float
the best cost
.best_solution: np.ndarray
the best tour found
'''
def __init__(self, constructor, local_search, max_iter=1000,
time_limit=np.inf):
'''
Constructor
Parameters:
---------
constructor: object
semi-greedy construction heuristic
local_search: object
local search heuristic e.g. `HillClimber`
max_iter: int, optional (default=1000)
The maximum number of iterations (restarts) of GRASP
time_limit: float64, optional (default=np.inf)
The maximum allowabl run time.
'''
# semi greedy tour construction method
self.constructor = constructor
# local search procedure
self.local_search = local_search
# max runtime budget for GRASP
self.max_iter = max_iter
self.time_limit = time_limit
# init solution
self.best_solution = None
self.best = None
def solve(self):
'''
Run GRASP
Returns:
-------
None
'''
self.best_solution = None
self.best = -np.inf
i = 0
start = time.time()
while i < self.max_iter and ((time.time() - start) < self.time_limit):
i += 1
# construction phase
solution = self.constructor.build()
# Improve solution via local search
self.local_search.set_init_solution(solution)
self.local_search.solve()
current_solution = self.local_search.best_solutions[0]
current = self.local_search.best_cost
# check if better than current solution
if current > self.best:
self.best = current
self.best_solution = current_solution
class MonitoredLocalSearch:
'''
Extends a local search class and provides the observer pattern.
An external object can observe the local search object and catch the
termination event (end of local search). The observer is notified and
passed the results of the local search.
Use cases:
----------
In GRASP this is useful for an algorithm sizing the RCL and learning
on average how different sizes of RCL perform.
'''
def __init__(self, local_search):
'''
Constructor:
Params:
------
local_search: Object
Must implement .solve(), best_cost, best_solution
'''
self.local_search = local_search
self.observers = []
def register_observer(self, observer):
'''
register an object to observe the local search
The observer should implement
local_search_terminated(*args, **kwargs)
'''
self.observers.append(observer)
def set_init_solution(self, solution):
'''
Set the initial solution
Params:
--------
solution: np.ndarray
vector representing the initial solution
'''
self.local_search.set_init_solution(solution)
def solve(self):
'''
Run the local search.
At the end of the run all observers are notified.
'''
# run local search
self.local_search.solve()
# notify observers after search terminates.
best = self.local_search.best_cost
solution = self.local_search.best_solutions[0]
self.notify_observers(best, solution)
def notify_observers(self, *args, **kwargs):
'''
Observers must implement `local_search_terminated()`
method.
Params:
------
*args: list
variable number of arguments
**kwargs: dict
key word arguments
'''
for o in self.observers:
o.local_search_terminated(*args, **kwargs)
def _get_best_cost(self):
'''
best cost from internal local_search object
'''
return self.local_search.best_cost
def _get_best_solutions(self):
'''
get best solutions from local_search object
'''
return self.local_search.best_solutions
best_cost = property(_get_best_cost, doc='best cost')
best_solutions = property(_get_best_solutions, doc='best solution')
class ReactiveRCLSizer:
'''
Dynamically update the probability of selecting a
value of r for the size of the RCL.
Implements Reactive GRASP.
'''
def __init__(self, r_list, local_search, freq=None, random_seed=None):
'''
Constructor
Params:
-------
r_list: list
vector of sizes for RCL e.g. [1, 2, 3, 4, 5]
local_search: MonitoredLocalSearch
local_search to monitor
freq: int, optional (default=None)
Frequency in iterations at which the probabilities are updated.
When set to None it defaults to the length of r_list * 2
random_seed: int, optional (default=None)
Control random sampling for reproducible result
'''
# list of r sizes
self.r_list = r_list
# set of indexes to work with probabilities
self.elements = np.arange(len(r_list))
# probability of choosing r (initially uniform)
self.probs = np.full(len(r_list), 1/len(r_list))
# mean performance of size r
self.means = np.full(len(r_list), 1.0)
# runs of size r
self.allocations = np.full(len(r_list), 0)
# local search to monitor
self.local_search = local_search
# frequency of updating probs
if freq is None:
self.freq = len(self.r_list)
else:
self.freq = freq
# number of iterations within frequency
self.iter = 0
# current r index
self.index = -1
# to init run one of each r value
self.init = True
# imcumbent solution cost
self.best_cost = -np.inf
# register sizer as observer of the local search
local_search.register_observer(self)
# random no. gen
self.rng = np.random.default_rng(random_seed)
def local_search_terminated(self, *args, **kwargs):
'''
Termination of the local search
'''
# iteration complete
self.iter += 1
# get the best cost found in the iteration
iter_cost = args[0]
# record iteration took plaxe with index i
self.allocations[self.index] += 1
# update running mean
mean_x = self.means[self.index]
n = self.allocations[self.index]
self.means[self.index] += (iter_cost - mean_x) / n
self.update_r()
# update incumbent cost if required
if iter_cost > self.best_cost:
self.best_cost = iter_cost
# update probs if freq met.
if self.iter >= self.freq and not self.init:
self.iter = 0
self.update_probability()
def update_probability(self):
'''
Let $q_i = f^* / A_i$
and $p_i = `\dfrac{q_i}{\sum_{j=1}^{m} q_j}$
where
$f^*$ is the incumbent (cost)
$A_i$ is the mean cost found with r_i
larger q_i indicates more suitable values of r_i
'''
q = self.best_cost / self.means
self.probs = q / q.sum()
def update_r(self):
'''
update the size of r
Note that the implementation ensures that all r values are run
for at least one iteration of the algorithm.
'''
# initial bit of logic makes sure there is at least one run of all probabilities
if self.init:
self.index += 1
if self.index >= len(self.r_list):
self.init = False
self.index = self.rng.choice(self.elements, p=self.probs)
else:
self.index = self.rng.choice(self.elements, p=self.probs)
def get_size(self):
'''
Return the selected size of the RCL
The selection is done using a discrete distribution
self.r_probs.
'''
return self.r_list[self.index]
class RandomPlusGreedyConstructor(SemiGreedyConstructor):
'''
Random + semi-greedy construction of a tour.
The first n cities of a tour are randomly constructed.
The remaining cities are seleted using the standard semi-greedy approach.
For a city i creates a restricted candidate list of size r
i.e the r shortest distances from city i. Next city is chosen
with equal probability.
Repeats until tour is constructed.
'''
def __init__(self, rcl_sizer, tour, matrix, p_rand=0.2,
random_seed=None):
'''
RandomPlusGreedy Constructor method
Params:
------
rcl_sizer: object
sizes the restricted candidate list
tour: np.ndarray
vector of city indexes included in problem
matrix: np.ndarray
matrix of travel costs
p_rand: float, optional (default=0.2)
Proportion of tour that is randomly constructed
random_seed: int
used to control sampling provides a
reproducible result.
'''
# super class init
super().__init__(rcl_sizer, tour, matrix,
random_seed)
# proportion of tour that is randomly constructed
self.p_rand = p_rand
self.n_rand = int(p_rand * len(tour))
self.n_greedy = len(tour) - self.n_rand - 1
def build(self):
'''
Random followed by semi-greedy contruction of tour
Returns:
--------
np.array
'''
# first city in tour
solution = np.array([self.tour[0]])
# next n_rand cities are random
rand = self.rng.choice(self.tour[1:], size=self.n_rand, replace=False)
solution = np.append(solution, rand)
# remaining cities are semi-greedy
for i in range(self.n_greedy):
r = self.rcl_sizer.get_size()
rcl = self.get_rcl(r, solution, solution[-1])
next_city = self.random_from_rcl(rcl)
solution = np.append(solution, np.array([next_city]))
return solution
class ConstructorWithMemory:
'''
Provides a construction heuristic with a short term memory
'''
def __init__(self, constructor, memory_size=100):
'''Constructor method
Params:
-------
constructor: Object
Implements build() and returns a solution
memory_size, int, optional (default=100)
size of tabu list
'''
self.constructor = constructor
self.memory_size = memory_size
# memory implemented as list
self.history = []
def build(self):
'''
Run the stochastic construction heuristic
Re-runs heuristic if results is within memory
Returns:
--------
np.ndarray
'''
solution = self.constructor.build()
while str(solution) in self.history:
solution = self.constructor.build()
# if at capacity remove oldest solution
if len(self.history) >= self.memory_size:
self.history.pop(0)
self.history.append(str(solution))
return solution
class EliteSet:
'''
Tracks and updates an elite set of solutions produced by a local search.
'''
def __init__(self, local_search=None, max_size=10, min_delta=0):
'''
Constructor
Params:
-------
local_search: MonitoredLocalSearch
The local search that produces candidates for the elite
set.
max_size: int, optional (default=10)
maximum entries in the elite set
min_delta: int, optional (Default=0)
The min cardinality difference between tours to allow entry
E.g. a = [1, 2, 3, 4, 5]; b = [1, 3, 4, 2, 5]. delta = 3.
Vary delta > 0 to increase diversity (but may limit entry)
'''
if local_search is not None:
self.local_search = local_search
local_search.register_observer(self)
self.min_delta = min_delta
self.max_size = max_size
# data structures for elite solutions
self.solutions = None
self.costs = None
self.n_updates = 0
@property
def is_empty(self):
return self.solutions is None
def is_elite(self, solution):
'''
Is the solution a member of the elite set
Params:
------
solution: np.ndarray
TSP solutution
Returns:
--------
bool
'''
if self.solutions is None:
return False
else:
result = np.where((self.solutions==solution).all(axis=1))[0]
return len(result) > 0
def local_search_terminated(self, *args, **kwargs):
''''
Termination of the local search
'''
s = args[1]
s_cost = args[0]
self.update(s, s_cost)
def init_elite_set(self, s, s_cost):
'''
Initalise the elite set
'''
self.solutions = np.array([s])
self.costs = np.array([s_cost])
def update(self, s, s_cost):
'''
Update the elite set to maximise performance and diversity
Params:
-------
s: np.ndarray
TSP tour
s_cost: float
TSP tour cost
Returns:
-------
Tuple: np.ndarray, np.ndarray
elite_set, elite_costs
'''
if self.solutions is None:
self.init_elite_set(s, s_cost)
elif len(self.solutions) < self.max_size:
delta = (s != self.solutions).sum(axis=1).min()
if delta > self.min_delta:
self.solutions = np.append(self.solutions, [s], axis=0)
self.costs = | np.append(self.costs, [s_cost], axis=0) | numpy.append |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>,Morgane
Functions in this python file are related to plotting different stages of the calcium imaging analysis pipeline.
Most of the save the result in the corresponding folder of the particular step.
"""
# %% Importation
import pylab as pl
import caiman as cm
import matplotlib.pyplot as plt
import math
import numpy as np
from caiman.motion_correction import high_pass_filter_space
from caiman.source_extraction.cnmf.cnmf import load_CNMF
import Analysis_tools.metrics as metrics
import logging
import os
import datetime
import Analysis_tools.analysis_files_manipulation as fm
from caiman.source_extraction.cnmf.initialization import downscale
from Database.database_connection import database
mycursor = database.cursor()
def plot_movie_frame(decoded_file):
"""
This function creates an image for visual inspection of cropping points.
"""
m = cm.load(decoded_file)
pl.imshow(m[0, :, :], cmap='gray')
return
def plot_movie_frame_cropped(cropped_file):
"""
This function creates an image for visual inspections of cropped frame
"""
m = cm.load(cropped_file)
pl.imshow(m[0, :, :], cmap='gray')
return
def get_fig_gSig_filt_vals(cropped_file, gSig_filt_vals):
"""
Plot original cropped frame and several versions of spatial filtering for comparison
:param cropped_file
:param gSig_filt_vals: array containing size of spatial filters that will be applied
:return: figure
"""
m = cm.load(cropped_file)
temp = cm.motion_correction.bin_median(m)
N = len(gSig_filt_vals)
fig, axes = plt.subplots(int(math.ceil((N + 1) / 2)), 2)
axes[0, 0].imshow(temp, cmap='gray')
axes[0, 0].set_title('unfiltered')
axes[0, 0].axis('off')
for i in range(0, N):
gSig_filt = gSig_filt_vals[i]
m_filt = [high_pass_filter_space(m_, (gSig_filt, gSig_filt)) for m_ in m]
temp_filt = cm.motion_correction.bin_median(m_filt)
axes.flatten()[i + 1].imshow(temp_filt, cmap='gray')
axes.flatten()[i + 1].set_title(f'gSig_filt = {gSig_filt}')
axes.flatten()[i + 1].axis('off')
if N + 1 != axes.size:
for i in range(N + 1, axes.size):
axes.flatten()[i].axis('off')
# Get output file paths
sql = "SELECT mouse,session,trial,is_rest,cropping_v,decoding_v,motion_correction_v FROM Analysis WHERE cropping_main=%s "
val = [cropped_file, ]
mycursor.execute(sql, val)
myresult = mycursor.fetchall()
data = []
for x in myresult:
data += x
file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[5]}.{data[4]}.{data[6]}"
data_dir = 'data/interim/motion_correction/'
output_meta_gSig_filt = data_dir + f'meta/figures/frame_gSig_filt/{file_name}.png'
fig.savefig(output_meta_gSig_filt)
return fig
def plot_crispness_for_parameters(selected_rows=None):
"""
This function plots crispness for all the selected rows motion correction states. The idea is to compare crispness results
:param selected_rows: analysis states for which crispness is required to be plotted
:return: figure that is also saved
"""
crispness_mean_original, crispness_corr_original, crispness_mean, crispness_corr = metrics.compare_crispness(
selected_rows)
total_states_number = len(selected_rows)
fig, axes = plt.subplots(1, 2)
axes[0].set_title('Summary image = Mean')
axes[0].plot(np.arange(1, total_states_number, 1), crispness_mean_original)
axes[0].plot(np.arange(1, total_states_number, 1), crispness_mean)
axes[0].legend(('Original', 'Motion_corrected'))
axes[0].set_ylabel('Crispness')
axes[1].set_title('Summary image = Corr')
axes[1].plot(np.arange(1, total_states_number, 1), crispness_corr_original)
axes[1].plot(np.arange(1, total_states_number, 1), crispness_corr)
axes[1].legend(('Original', 'Motion_corrected'))
axes[1].set_ylabel('Crispness')
# Get output file paths
data_dir = 'data/interim/motion_correction/'
sql = "SELECT mouse,session,trial,is_rest,cropping_v,decoding_v,motion_correction_v FROM Analysis WHERE motion_correction_main=%s "
val = [selected_rows, ]
mycursor.execute(sql, val)
myresult = mycursor.fetchall()
data = []
for x in myresult:
data += x
file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[5]}.{data[4]}.{data[6]}"
output_meta_crispness = data_dir + f'meta/figures/crispness/{file_name}.png'
fig.savefig(output_meta_crispness)
return fig
def plot_corr_pnr(mouse_row, parameters_source_extraction):
"""
Plots the summary images correlation and pnr. Also the pointwise product between them (used in Caiman paper Zhou
et al 2018)
:param mouse_row:
:param parameters_source_extraction: parameters that will be used for source
extraction. the relevant parameter here are min_corr and min_pnr because the source extraction algorithm is
initialized (initial cell templates) in all values that surpasses that threshold
:return: figure
"""
input_mmap_file_path = eval(mouse_row.loc['motion_correction_output'])['main']
# Load memory mappable input file
if os.path.isfile(input_mmap_file_path):
Yr, dims, T = cm.load_memmap(input_mmap_file_path)
# logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
images = Yr.T.reshape((T,) + dims, order='F')
else:
logging.warning(f'{mouse_row.name} .mmap file does not exist. Cancelling')
# Determine output paths
step_index = db.get_step_index('motion_correction')
data_dir = 'data/interim/source_extraction/trial_wise/'
# Check if the summary images are already there
gSig = parameters_source_extraction['gSig'][0]
corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(mouse_row.name, gSig_abs=(gSig, gSig))
if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):
# Already computed summary images
logging.info(f'{mouse_row.name} Already computed summary images')
cn_filter = np.load(corr_npy_file_path)
pnr = np.load(pnr_npy_file_path)
else:
# Compute summary images
t0 = datetime.datetime.today()
logging.info(f'{mouse_row.name} Computing summary images')
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=parameters_source_extraction['gSig'][0],
swap_dim=False)
# Saving summary images as npy files
corr_npy_file_path = data_dir + f'meta/corr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.npy'
pnr_npy_file_path = data_dir + f'meta/pnr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.npy'
with open(corr_npy_file_path, 'wb') as f:
np.save(f, cn_filter)
with open(pnr_npy_file_path, 'wb') as f:
np.save(f, pnr)
fig = plt.figure(figsize=(15, 15))
min_corr = round(parameters_source_extraction['min_corr'], 2)
min_pnr = round(parameters_source_extraction['min_pnr'], 1)
max_corr = round(cn_filter.max(), 2)
max_pnr = 20
# continuous
cmap = 'viridis'
fig, axes = plt.subplots(1, 3, sharex=True)
corr_fig = axes[0].imshow(np.clip(cn_filter, min_corr, max_corr), cmap=cmap)
axes[0].set_title('Correlation')
fig.colorbar(corr_fig, ax=axes[0])
pnr_fig = axes[1].imshow( | np.clip(pnr, min_pnr, max_pnr) | numpy.clip |
# Here's an attempt to recode the perl script that threads the QTL finding wrapper into python.
# Instead of having a wrapper to call python scripts, we'll use a single script to launch everything. This avoids having to reparse the data (even though it is fast).
# Ok, so now we're going to try a heuristic to accelerate the QTL addition step.
# The heuristic will be to scan every X QTLs instead of every single one. Once we find a good one, we only scan the x*2 positions around the top hit. I am hoping that this will give at least 2 times faster searches.
import string
import numpy as np
from scipy import linalg
import sys
import csv
import itertools
import time
import random
import argparse
import os
cwd = os.getcwd()
import psutil
process = psutil.Process(os.getpid())
import multiprocessing as mp
from multiprocessing import Pool
#sys.path.append('/n/desai_lab/users/klawrence/BBQ/alldata')
#sys.path.append('/n/home00/nnguyenba/scripts/BBQ/alldata')
try:
sys.path.append('/n/home00/nnguyenba/scripts/BBQ/alldata')
except:
sys.path.append('/n/holyscratch01/desai_lab/nnguyenba/BBQ/all_data')
pass
from spore_defs import *
# Read SNP map
#SNP_reader = csv.reader(open('/n/desai_lab/users/klawrence/BBQ/alldata/BYxRM_nanopore_SNPs.txt','r'),delimiter='\t')
#SNP_reader = csv.reader(open('/n/home00/nnguyenba/scripts/BBQ/alldata/BYxRM_nanopore_SNPs.txt','r'),delimiter='\t')
SNP_reader = csv.reader(open('/n/holyscratch01/desai_lab/nnguyenba/BBQ/all_data/BYxRM_nanopore_SNPs.txt','r'),delimiter='\t')
genome_str = genome_str_to_int(next(SNP_reader))
SNP_list = genome_to_chroms(genome_str)
num_chroms = len(SNP_list)
num_SNPs = [len(x) for x in SNP_list]
num_SNPs_total = sum(num_SNPs)
#print(num_SNPs,file=sys.stdout,flush=True)
#print(num_SNPs_total,file=sys.stdout,flush=True)
chrom_startpoints = get_chrom_startpoints(genome_str)
chrom_endpoints = get_chrom_endpoints(genome_str)
# print(chrom_startpoints) [0, 996, 4732, 5291, 9327, 11187, 12476, 16408, 18047, 20126, 23101, 26341, 30652, 33598, 35398, 39688]
# print(chrom_endpoints) [994, 4730, 5289, 9325, 11185, 12474, 16406, 18045, 20124, 23099, 26339, 30650, 33596, 35396, 39686, 41608]
# print(num_SNPs) [995, 3735, 558, 4035, 1859, 1288, 3931, 1638, 2078, 2974, 3239, 4310, 2945, 1799, 4289, 1921]
#exit()
# Systematically check every positions
from argparse import ArgumentParser, SUPPRESS
# Disable default help
parser = ArgumentParser(add_help=False)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
# Add back help
optional.add_argument(
'-h',
'--help',
action='help',
default=SUPPRESS,
help='show this help message and exit'
)
required.add_argument('--fit', help='Plain text two-column file containing the fitnesses and the standard errors.')
optional.add_argument('--log', help='Plain text file logging the progress of the QTL search.', default="output.txt")
optional.add_argument('--oCV', help='Outside cross-validation value (k = 0-9)', type=int, default=0)
optional.add_argument('--iCV', help='Inside cross-validation value (l = 0-8)', type=int, default=0)
optional.add_argument('--model', help='Whether to fit on the training set (m = 0), on the train+test set (m = 1) or on the complete data (m = 2)', type=int, default=0)
optional.add_argument('--dir', help='Directory where intermediate files are found.', default=cwd)
optional.add_argument('--scratch', help='Local scratch directory', default='/n/holyscratch01/desai_lab/nnguyenba/BBQ/all_data/genomes/')
optional.add_argument('--refine', help='Refine every X QTLs, default is 5. 0 means never refine.', default=5, type=int)
optional.add_argument('--unweighted', help='Only run the forward search on unweighted data.', default=0, type=int)
optional.add_argument('--cpu', help='Number of threads to run on.', default=16, type=int)
optional.add_argument('--nosave', help='Set to 1 to avoid saving the npy progress files.', default=0, type=int)
optional.add_argument('--maxqtl', help='Number of QTLs to find.', default=300, type=int)
optional.add_argument('--downsample', help='Number of segregants to downsample.', default=0, type=int)
optional.add_argument('--sporelist', help='Restrict searches to a list of spores.')
args = parser.parse_args()
print(args, file=sys.stderr)
outside_CV = args.oCV # Goes from 0 to 9 # k = 10
inside_CV = args.iCV # Goes from 0 to 8 # l = 9
if(outside_CV > 9 or outside_CV < 0):
print("--oCV must be [0,9]")
exit()
if(inside_CV > 8 or inside_CV < 0):
print("--iCV must be [0,8]")
exit()
if(~np.isin(args.model , range(3))):
print("--model must be [0,2]")
exit()
if(args.refine == 0):
args.refine = np.Infinity
# Read in the fitness data
fitnesses_data = np.loadtxt(args.fit)
# Parse and see if it has standard errors
if(len(fitnesses_data.shape) != 2 or args.unweighted == 1):
# No errors found, assume all errors the same.
if(len(fitnesses_data.shape) == 1):
fitnesses_data = np.reshape(fitnesses_data,(-1,1))
fitnesses = fitnesses_data[:,0]
errors = np.ones(len(fitnesses_data))
else:
fitnesses = fitnesses_data[:,0]
errors = fitnesses_data[:,1]
errors = np.square(errors)
errors = np.reciprocal(errors)
seed = 100000
np.random.seed(seed) # This allows us to keep the same cross validation sets.
# If we are restricting search to a list of spores, then need to parse the list of spores.
sporelist = np.array(range(len(fitnesses)))
if(args.sporelist):
sporelist = np.loadtxt(args.sporelist, dtype=int)
# First let's take care of the outside CV
if(args.downsample > 0 and args.downsample < len(sporelist)):
#fitnesses = fitnesses[0:args.downsample]
#errors = errors[0:args.downsample]
sporelist = sporelist[0:args.downsample]
perm = np.random.permutation(sporelist)
train_perm = perm.copy()
if(args.model != 2):
train_perm = np.delete(train_perm, np.r_[outside_CV/10 * len(sporelist):(outside_CV + 1)/10 * len(sporelist)].astype(int),axis=0)
validation_perm = np.take(perm, np.r_[outside_CV/10 * len(sporelist):(outside_CV + 1)/10 * len(sporelist)].astype(int))
if(args.model != 1):
# Ok now let's take care of the inside CV
# To do this, we split the train_perm into a train/test permutation
test_perm = np.take(train_perm, np.r_[inside_CV/9 * len(train_perm):(inside_CV + 1)/9 * len(train_perm)].astype(int))
train_perm = np.delete(train_perm, np.r_[inside_CV/9 * len(train_perm):(inside_CV + 1)/9 * len(train_perm)].astype(int))
# We're doing a k*l fold validation procedure, where l = k-1.
# This allows us to only create 10 test sets, and only 10 validation sets, so the cross validation loops do not explode.
# For example, let the 80 - 10 - 10 (train - test - validation) split
# We can use the same validation for the following split: 10 - 80 -10 (test - train - validation)
# Now looking at that split, we can use the same test to do the following: 10 - 10 - 80 (test - validation - train)
# We will only 'train' on a subset of the data
train_set = np.take(fitnesses,train_perm) # This is 80% of the fitness data
errors = np.take(errors,train_perm)
phenotypes = train_set[~np.isnan(train_set)] # Is a numpy.ndarray
mean_phenotypes = np.mean(phenotypes)
TSS = np.sum((phenotypes-mean_phenotypes)**2)
errors = errors[~np.isnan(train_set)]
num_usable_spores = len(phenotypes)
# Open all the genotype files
genotypes_file = []
num_lines_genotypes = []
chr_to_scan = []
start = time.perf_counter()
for i in range(16):
#genotypes_file.append(np.load(str(args.scratch) + "/chr"+str(i+1)+"_pos_major.npy", mmap_mode="r")) # Uses 30 gb. Need to load once to cache into memory. Then subsequent searches are near instant.
genotypes_file.append(np.load(str(args.scratch) + "/chr"+str(i+1)+"_pos_major.npy"))
num_lines_genotypes.append(genotypes_file[i].shape[0])
chr_to_scan.append(i)
print(str(i) + " " + str(time.perf_counter() - start) + " " + str(process.memory_info().rss/1024/1024),file=sys.stderr)
# Here we will handle whether the script has been restart or whether we are starting from scratch.
# Open the log file.
current_likelihood = np.Infinity
current_pos_line = ""
current_beta_line = ""
current_progress_line = ""
flag_refined_pos = 0
geno_file = ""
Q_file = ""
R_file = ""
num_QTLs = 0
if(os.path.isfile(args.dir + "/" + args.log)):
with open(args.dir + "/" + args.log,'r') as readfile:
linecount = 0
for line in readfile:
line = line.rstrip()
if(linecount % 4 == 0):
current_likelihood = line
elif(linecount % 4 == 1):
current_pos_line = line
elif(linecount % 4 == 2):
current_beta_line = line
elif(linecount % 4 == 3):
current_progress_line = line
linecount = linecount + 1
# split the progress_line into the relevant flags
if(linecount > 0):
arr = current_progress_line.split("\t")
geno_file = arr[0]
Q_file = arr[1]
R_file = arr[2]
if(arr[3] == "find_new"):
flag_refined_pos = 1 # Need to refine
num_QTLs = int(arr[4])
# Read in the file of previous computations if we have found QTLs before. Otherwise, generate them.
prev_pos = []
prev_genotypes = []
prev_pos = np.array(prev_pos, dtype=np.int32)
prev_genotypes = np.array(prev_genotypes)
q = []
r = []
if(num_QTLs != 0):
# This is restarting.
prev_pos = np.fromstring(current_pos_line, dtype=int, sep=" ")
flag_load_prev = 0
try:
prev_genotypes = np.load(args.dir + "/" + geno_file)
except:
flag_load_prev = 1
pass
size_of_prev_genome = (prev_pos.size)
# Consistent prev_pos and prev_genotypes?
if(flag_load_prev == 1 or prev_genotypes.shape[1] != size_of_prev_genome):
# We have to remake it from the prev_pos line.
prev_genotypes = np.ones((num_usable_spores,size_of_prev_genome))
for pos_index in range(len(prev_pos)):
pos = prev_pos[pos_index]
chr_qtl = np.searchsorted(np.array(chrom_startpoints), pos+0.5)
start_of_chr = chrom_startpoints[chr_qtl-1]
pos_in_chr = pos - start_of_chr
pos_line = genotypes_file[chr_qtl-1][pos_in_chr]
pos_line = np.take(pos_line, train_perm)
pos_line = pos_line[~np.isnan(train_set)]
prev_genotypes[:,pos_index] = pos_line.copy()
base_genotypes = np.ones((num_usable_spores,1+size_of_prev_genome))
base_genotypes[:,1:] = prev_genotypes # First index is the intercept.
q,r = np.linalg.qr(base_genotypes * np.sqrt(np.reshape(errors,(num_usable_spores,1))))
else:
# Do we have q,r?
flag_remake = 0
if(os.path.isfile(args.dir + "/" + Q_file) and os.path.isfile(args.dir + "/" + R_file)):
#q = np.load(args.dir + "/" + Q_file)
#r = np.load(args.dir + "/" + R_file)
try:
q = np.load(args.dir + "/" + Q_file)
except:
flag_remake = 1
pass
try:
r = np.load(args.dir + "/" + R_file)
except:
flag_remake = 1
pass
else:
flag_remake = 1
if(flag_remake == 1):
# Remake
base_genotypes = np.ones((num_usable_spores,1+size_of_prev_genome))
base_genotypes[:,1:] = prev_genotypes # First index is the intercept.
q,r = np.linalg.qr(base_genotypes * np.sqrt(np.reshape(errors,(num_usable_spores,1))))
else:
size_of_prev_genome = 0
# Ok, we've now reloaded all the previous computations.
# Set up computation settings
poolcount = args.cpu*2
num_chrom_to_scan = len(genotypes_file)
def find_QTL(num):
lowest_RSS = np.Infinity
genome_at_lowest_RSS = []
pos_index_at_lowest_RSS = 0
last_q = []
#start = time.clock()
for chr in range(num_chrom_to_scan):
loc = chrom_startpoints[chr_to_scan[chr]]
for i in range(0 + num, num_lines_genotypes[chr_to_scan[chr]], poolcount):
if(np.isin(loc+i, prev_pos)):
continue
genome_line = genotypes_file[chr_to_scan[chr]][i]
# Remove genomes that have no phenotypes
# We need to remove genomes that have no phenotypes and genomes that aren't in the train set
genomes = np.take(genome_line,train_perm)
genomes = genomes[~np.isnan(train_set)]
genomes = np.reshape(genomes,(num_usable_spores,1)) # A N row by 1 column matrix
WX = genomes * np.sqrt(np.reshape(errors,(num_usable_spores,1))) # X = X * sqrt(W) -> N by 1
QtX = np.dot(np.transpose(q),WX) # Gets the scale for each vectors in Q. # Q^t * X -> k by 1
QtX_Q = np.einsum('ij,j->i',q,np.ravel(QtX)) # Dot product of Q and Q^t * X, but shaped as a single vector. This is the sum of all the projections of the new genotype on Q
orthogonalized = WX-np.reshape(QtX_Q,(num_usable_spores,1)) # Orthogonalize: Remove the projections from the real vector.
new_q = orthogonalized/np.linalg.norm(orthogonalized) # Orthonormalize: Now do final conversion.
# This gets the last column of Q.
# We only need the last column of Q to get the new residuals. We'll assemble the full Q or the full R if we need it (i.e. to obtain betas).
q_upTy = np.einsum('i,i', np.ravel(new_q), phenotypes * np.sqrt(errors))
q_upq_upTy = np.ravel(new_q) * q_upTy
predicted_fitnesses = initial_predicted_fitnesses + q_upq_upTy/np.sqrt(errors)
# Scale the intercept term
mean_predicted_fitnesses = | np.mean(predicted_fitnesses) | numpy.mean |
"""
registerSegments.py
---------------
Main Function for registering (aligning) colored point clouds with ICP/feature
matching as well as pose graph optimizating
"""
# import png
from PIL import Image
import csv
import open3d as o3d
import pymeshlab
import numpy as np
import cv2
import os
import glob
from utils.ply import Ply
from utils.camera import *
from registration import icp, feature_registration, match_ransac, rigid_transform_3D
from tqdm import trange
from pykdtree.kdtree import KDTree
import time
import sys
from config.registrationParameters import *
from config.segmentationParameters import SEG_INTERVAL, STARTFRAME, SEG_METHOD,ANNOTATION_INTERVAL
from config.DataAcquisitionParameters import SERIAL,camera_intrinsics
import pandas as pd
# Set up parameters for registration
# voxel sizes use to down sample raw pointcloud for fast ICP
voxel_size = VOXEL_SIZE
max_correspondence_distance_coarse = voxel_size * 15
max_correspondence_distance_fine = voxel_size * 1.5
# Set up parameters for post-processing
# Voxel size for the complete mesh
voxel_Radius = VOXEL_R
# Point considered an outlier if more than inlier_Radius away from other points
inlier_Radius = voxel_Radius * 2.5
# search for up to N frames for registration, odometry only N=1, all frames N = np.inf
N_Neighbours = K_NEIGHBORS
def post_process(originals, voxel_Radius, inlier_Radius):
"""
Merge segments so that new points will not be add to the merged
model if within voxel_Radius to the existing points, and keep a vote
for if the point is issolated outside the radius of inlier_Radius at
the timeof the merge
Parameters
----------
originals : List of open3d.Pointcloud classe
6D pontcloud of the segments transformed into the world frame
voxel_Radius : float
Reject duplicate point if the new point lies within the voxel radius
of the existing point
inlier_Radius : float
Point considered an outlier if more than inlier_Radius away from any
other points
Returns
----------
points : (n,3) float
The (x,y,z) of the processed and filtered pointcloud
colors : (n,3) float
The (r,g,b) color information corresponding to the points
vote : (n, ) int
The number of vote (seen duplicate points within the voxel_radius) each
processed point has reveived
"""
for point_id in trange(len(originals)):
if point_id == 0:
vote = np.zeros(len(originals[point_id].points))
points = np.array(originals[point_id].points,dtype = np.float64)
colors = np.array(originals[point_id].colors,dtype = np.float64)
else:
points_temp = np.array(originals[point_id].points,dtype = np.float64)
colors_temp = np.array(originals[point_id].colors,dtype = np.float64)
dist , index = nearest_neighbour(points_temp, points)
new_points = np.where(dist > voxel_Radius)
points_temp = points_temp[new_points]
colors_temp = colors_temp[new_points]
inliers = np.where(dist < inlier_Radius)
vote[(index[inliers],)] += 1
vote = np.concatenate([vote, np.zeros(len(points_temp))])
points = np.concatenate([points, points_temp])
colors = np.concatenate([colors, colors_temp])
return (points,colors,vote)
def surface_reconstruction_screened_poisson(path):
ms = pymeshlab.MeshSet()
ms.load_new_mesh(path)
ms.compute_normals_for_point_sets()
ms.surface_reconstruction_screened_poisson()
ms.save_current_mesh(path)
filtered_mesh = o3d.io.read_point_cloud(path)
return filtered_mesh
def full_registration(pcds_down,cads,depths, max_correspondence_distance_coarse,max_correspondence_distance_fine):
"""
perform pairwise registration and build pose graph for up to N_Neighbours
Parameters
----------
pcds_down : List of open3d.Pointcloud instances
Downampled 6D pontcloud of the unalligned segments
max_correspondence_distance_coarse : float
The max correspondence distance used for the course ICP during the process
of coarse to fine registration
max_correspondence_distance_fine : float
The max correspondence distance used for the fine ICP during the process
of coarse to fine registration
Returns
----------
pose_graph: an open3d.PoseGraph instance
Stores poses of each segment in the node and pairwise correlation in vertice
"""
global N_Neighbours
pose_graph = o3d.pipelines.registration.PoseGraph()
odometry = np.identity(4)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))
n_pcds = len(pcds_down)
for source_id in trange(n_pcds):
for target_id in range(source_id + 1, min(source_id + N_Neighbours,n_pcds)):
# derive pairwise registration through feature matching
color_src = cads[source_id]
depth_src = depths[source_id]
color_dst = cads[target_id]
depth_dst = depths[target_id]
res = feature_registration((color_src, depth_src),
(color_dst, depth_dst))
if res is None:
# if feature matching fails, perform pointcloud matching
transformation_icp, information_icp = icp(
pcds_down[source_id], pcds_down[target_id],max_correspondence_distance_coarse,
max_correspondence_distance_fine, method = RECON_METHOD)
else:
transformation_icp = res
information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(
pcds_down[source_id], pcds_down[target_id], max_correspondence_distance_fine,
transformation_icp)
information_icp *= 1.2 ** (target_id - source_id - 1)
if target_id == source_id + 1:
# odometry
odometry = np.dot(transformation_icp, odometry)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(np.linalg.inv(odometry)))
pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain=False))
else:
# loop closure
pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain=True))
return pose_graph
def joints_full_registration(pcds_down, LinkOrientations, LinkPositions):
"""
perform pairwise registration using robot end-effector poses and build pose graph
Parameters
----------
pcds_down : List of open3d.Pointcloud instances
Downampled 6D pontcloud of the unalligned segments
LinkOrientations : List of end-effector Orientations
LinkPositions : List of end-effector positions
Returns
----------
pose_graph: an open3d.PoseGraph instance
Stores poses of each segment in the node and pairwise correlation in vertice
"""
global N_Neighbours
pose_graph = o3d.pipelines.registration.PoseGraph()
odometry = np.identity(4)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))
n_pcds = len(pcds_down)
for source_id in trange(n_pcds):
for target_id in range(source_id + 1, min(source_id + N_Neighbours, n_pcds)):
R1 , R2 = LinkOrientations[source_id] , LinkOrientations[target_id]
T1 , T2 = LinkPositions[source_id] , LinkPositions[target_id]
transformation_icp = calculate_transformation(R1 , R2, T1, T2)
information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(
pcds_down[source_id], pcds_down[target_id], max_correspondence_distance_fine,
transformation_icp)
if target_id == source_id + 1:
# odometry
odometry = np.dot(transformation_icp, odometry)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(np.linalg.inv(odometry)))
pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain=False))
else:
# loop closure
pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain=True))
return pose_graph
def calculate_transformation(R1, R2, T1, T2):
R = np.dot(R2, np.linalg.inv(R1))
T = T2 - np.dot(T1, np.dot(np.linalg.inv(R1).T, R2.T))
transformation_icp = [[R[0][0],R[0][1],R[0][2],T[0]],
[R[1][0],R[1][1],R[1][2],T[1]],
[R[2][0],R[2][1],R[2][2],T[2]],
[0,0,0,1]]
return transformation_icp
def load_robot_joints(path, keyframe_ids):
robot_joints = pd.read_csv(path+"/robot_joints.csv", index_col='filenames')
LinkR = robot_joints['LinkRotationMatrices']
LinkPositions = robot_joints['LinkPositions']
Rs=[]
Ts=[]
for filename in keyframe_ids:
R = list(map(float, LinkR[filename][1:len(LinkR[filename]) - 1].split(',')))
R = np.reshape(np.array(R), [3, 3])
T = np.array(list(map(float, LinkPositions[filename][1:len(LinkPositions[filename]) - 1].split(','))))
Rs.append(R)
Ts.append(T)
return Rs, Ts
def load_object_states(path, keyframe_ids):
robot_joints = pd.read_csv(path+"/robot_joints.csv", index_col='filenames')
ObjectR = robot_joints['ObjectRotationMatrices']
ObjectPositions = robot_joints['ObjectPositions']
Rs=[]
Ts=[]
for filename in keyframe_ids:
R = list(map(float, ObjectR[filename][1:len(ObjectR[filename]) - 1].split(',')))
R= np.reshape( | np.array(R) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" clusting_test.py
Description:
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, <NAME>"
__credits__ = ["<NAME>"]
__license__ = ""
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = ""
__status__ = "Prototype"
# Standard Libraries #
import datetime
# Third-Party Packages #
import numpy as np
from sklearn.cluster import MeanShift
# Local Packages #
from src.hdf5xltek import *
# Definitions #
# Functions #
def find_spikes(x, c, top=5000, bottom=-5000, b=None):
high = np.asarray(x[:, c] > top).nonzero()
low = np.asarray(x[:, c] < bottom).nonzero()
bounds = np.append(high[0], low[0], 0)
bounds = np.sort(bounds)
X = bounds.reshape(-1, 1)
ms = MeanShift(bandwidth=b)
ms.fit(X)
labels = ms.labels_
c_centers = ms.cluster_centers_
sep = separate_clusters(X, labels)
return labels, c_centers, sep
def separate_clusters(x, labels):
n_clusters = len(np.unique(labels))
sep = [[] for i in range(n_clusters)]
for v, c in zip(x, labels):
sep[int(c)].append(int(v))
return sep
# Main #
if __name__ == "__main__":
v_c = list(range(72, 80))
# v_c = [30, 59, 51, 43, 35]
first = datetime.datetime(2018, 10, 17, 9, 34, 00)
second = datetime.datetime(2018, 10, 17, 12, 5, 30)
study = HDF5XLTEKstudy('EC188')
d, f, g = study.data_range_time(first, second, frame=True)
print(study.find_time(datetime.datetime(2019, 1, 24, 9, 40, 4)))
fs = d[0].frames[0].sample_rate
task = d[0][5576000:7700000, :]
all_viewer = EEGScanner(d[0][8934000:11750000], v_c, ylim=2000, show=True)
#all_viewer = eegscanner.eegscanner(task, v_c, show=True)
task2 = [d[0][8934000:9269000], d[0][9450000:9686000], d[0][9750000:10060000], d[0][10060000:10380000],
d[0][10380000:11000000], d[0][11000000:11500000], d[0][11500000:11750000]]
tests = [task[:420000, :], task[420000:715000, :], task[715000:1020000, :],
task[1020000:1300000, :], task[1300000:1560000, :], task[1560000:1830000, :], task[1830000:, :]]
all_t = tests+task2
#all_t = task2
pre = 128
length = 128
total = length+pre
meme = []
s_chans = [79, 79, 79, 79, 79, 79, 72, 72, 72, 72, 72, 72, 72, 72]
# s_chans = [51, 51, 3, 3, 79, 51, 51]
# valid = ((0, -3), (4, -1), (0, -1), (0, -1), (0, -1), (0, -1), (0, -1), (0, -1))
inx = np.transpose(np.arange(32 - 1, 0 - 1, -1).reshape(4, 8)).flatten()
inx2 = np.transpose(np.arange(64 - 1, 32 - 1, -1).reshape(4, 8)).flatten()
cn1 = [['32: parstriangularis', '24: Parsopercularis', '16: Parsopercularis', '8: Precentral'],
['31: parstriangularis', '23: Parsopercularis', '15: Precentral', '7: Precentral'],
['30: parstriangularis', '22: Parsopercularis', '14: Precentral', '6: Precentral'],
['29: parstriangularis', '21: Precentral', '13: Precentral', '5: Postcentral'],
['28: Superiortemporal', '20: Superiortemporal', '12: Superiortemporal', '4: Superiortemporal'],
['27: Middletemporal', '19: Middletemporal', '11: Middletemporal', '3:Superiortemporal'],
['26: Middletemporal', '18: Middletemporal', '10: Middletemporal', '2: Middletemporal'],
['25: Middletemporal', '17: Middletemporal', '9: Middletemporal', '1: Middletemporal']]
cn1 = np.array(cn1, str)
#cn2 = np.transpose(np.arange(64 - 1, 32 - 1, -1).reshape(4, 8)) + 1
cn2 = [['64: Postcentral', '56: Supramarginal', '48: Postcentral', '40: Supramarginal'],
['63: Postcentral', '55: Supramarginal', '47: Supramarginal', '39: Supramarginal'],
['62: Postcentral', '54: Supramarginal', '46: Supramarginal', '38: Supramarginal'],
['61: Superiortemporal', '53: Supramarginal', '45: Supramarginal', '37: Superiortemporal'],
['60: Superiortemporal', '52: Superiortemporal', '44: Superiortemporal', '36: Superiortemporal'],
['59: Middletemporal', '51: Middletemporal', '43: Middletemporal', '35: Middletemporal'],
['58: Middletemporal', '50: Middletemporal', '42: Middletemporal', '34: Middletemporal'],
['57: Middletemporal', '49: Inferiortemporal', '41: Middletemporal', '33: Middletemporal']]
cn2 = np.array(cn2, str)
t_name = ['PMGC1 and PMGC2', 'PMGC2 and PMGC3', 'PMGC3 and PMGC4', 'PMGC4 and PMGC5', 'PMGC5 and PMGC6', 'PMGC6 and PMGC7',
'PMGC7 and PMGC8', 'TGA4 and TGA12', 'TGB28 and TGA4', 'TGB20 and TGB28', 'TGB12 and TGB20', 'TGA4 and TGA12',
'TGB4 and TGB12', 'TGB28 and TGB29']
# t_name = ['TGA4 and TGA12', 'TGB28 and TGA4', 'TGB20 and TGB28', 'TGB12 and TGB20', 'TGA4 and TGA12',
# 'TGB4 and TGB12', 'TGB28 and TGB29']
for t, c, n in zip(all_t[8:], s_chans[8:], t_name[8:]):
me = np.ndarray((total, task.shape[1], 0))
one = np.ndarray((total, 0))
bins = []
lab, cent, sep = find_spikes(t, c, top=4000, bottom=-4000, b=1000)
indices = [int(x) for x in cent]
indices.sort()
mx = [x-50+np.argmax(t[x-50:x+50,c],0) for x in indices]
# ind = indices[v[0]:v[1]]
for i in mx:
start = i-pre
finish = i+length
bins.append(t[start:finish, :])
me = np.append(me, | np.expand_dims(bins[-1], 2) | numpy.expand_dims |
import json
from .helper import get_labels, replace_values
import cv2
from pathlib import Path
import numpy as np
import pdb
import os
def check_labels(params):
json_path=params["json_path"]
image_ext=params["image_ext"]
file_name_function = params["file_name_function"]
features = params["features"]
root_path = params["root_path"]
img_path = params["img_path"]
paint_color = params["paint_color"]
replacements = params["replacements"]
declined_file_name = params["declined_file_name"]
if os.path.isfile(declined_file_name):
declined_labels = list( | np.load(declined_file_name) | numpy.load |
# -*- coding: utf-8 -*-
"""
Created on 16/05/16
@author: <NAME>
Program to calculate lick indices
"""
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
import astropy.units as u
from astropy import constants
__all__ = ["Lick"]
class Lick():
""" Class to measure Lick indices.
Computation of the Lick indices in a given spectrum. Position of the
passbands are determined by redshifting the position of the bands
to the systemic velocity of the galaxy spectrum.
=================
Input parameters:
=================
wave (array):
Wavelength of the spectrum given.
galaxy (array):
Galaxy spectrum in arbitrary units.
bands0 (array) :
Definition of passbands for Lick indices at rest
wavelengths. Units should be consistent with wavelength array.
vel (float, optional):
Systemic velocity of the spectrum in km/s. Defaults to zero.
dw (float, optinal):
Extra wavelength to be considered besides
bands for interpolation. Defaults to 2 wavelength units.
===========
Attributes:
===========
bands (array):
Wavelengths of the bands after shifting to the
systemic velocity of the galaxy.
"""
def __init__(self, wave, galaxy, bands0, vel=None, dw=None, units=None):
self.galaxy = galaxy
self.wave = wave.to(u.AA).value
self.vel = vel
self.bands0 = bands0.to(u.AA).value
if dw is None:
self.dw = 2
if vel is None:
self.vel = 0 * u.km / u.s
self.units = units if units is not None else \
np.ones(len(self.bands0)) * u.AA
ckms = constants.c.to("km/s")
# c = 299792.458 # Speed of light in km/s
self.bands = self.bands0 * np.sqrt((1 + self.vel.to("km/s")/ckms)
/(1 - self.vel.to("km/s")/ckms))
def classic_integration(self):
""" Calculation of Lick indices using spline integration.
===========
Attributes:
===========
R (array):
Raw integration values for the Lick indices.
Ia (array):
Indices measured in equivalent widths.
Im (array):
Indices measured in magnitudes.
classic (array):
Indices measured according to the conventional
units mixturing equivalent widths and magnitudes.
"""
self.R = | np.zeros(self.bands._shape[0]) | numpy.zeros |
import warnings
import sys
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import matplotlib as mpl
import matplotlib.colors as mplcolors
import numpy as np
import matplotlib.ticker as mtik
import types
try:
import scipy.ndimage
from scipy.stats import norm
haveScipy = True
except ImportError:
haveScipy = False
PYVER = sys.version_info[0]
MPLVER = int(mpl.__version__.split('.')[0])
__all__ = ['plotGTC']
#################### Create a full GTC
def plotGTC(chains, **kwargs):
r"""Make a great looking Giant Triangle Confusogram (GTC) with one line of
code! A GTC is a lot like a triangle (or corner) plot, but you get to put as
many sets of data, and overlay as many truths as you like. That's what can
make it so *confusing*!
Parameters
----------
chains : array-like[nSamples,nDims] or a
list[[nSamples1,nDims], [nSamples2,nDims], ...]
All chains (where a chain is [nSamples,nDims]) in the list must have
the same number of dimensions. Note: If you are using ``emcee``
(http://dan.iel.fm/emcee/current/) - and you should! - each element
of chains is an ``EnsembleSampler.flatchain`` object.
Keyword Arguments
-----------------
weights : array-like[nSamples] or a list[[nSamples1], ...]
Weights for the sample points. The number of 1d arrays passed must
correspond to the number of `chains`, and each `weights` array must have
the same length nSamples as its corresponding chain.
chainLabels : array-like[nChains]
A list of text labels describing each chain passed to chains.
len(chainLabels) must equal len(chains). chainLabels supports LaTex
commands enclosed in $..$. Additionally, you can pass None as a label.
Default is ``None``.
paramNames : list-like[nDims]
A list of text labels describing each dimension of chains.
len(paramNames) must equal nDims=chains[0].shape[1]. paramNames supports
LaTex commands enclosed in $..$. Additionally, you can pass None as a
label. Default is None, however if you pass a ``pandas.DataFrame``
object, `paramNames` defaults to the ``DataFrame`` column names.
truths : list-like[nDims] or [[nDims], ...]
A list of parameter values, one for each parameter in `chains` to
highlight in the GTC parameter space, or a list of lists of values to
highlight in the parameter space. For each set of truths passed to
`truths`, there must be a value corresponding to every dimension in
`chains`, although any value may be ``None``. Default is ``None``.
truthLabels : list-like[nTruths]
A list of labels, one for each list passed to truths. truthLabels
supports LaTex commands enclosed in $..$. Additionally, you can pass
``None`` as a label. Default is ``None``.
truthColors : list-like[nTruths]
User-defined colors for the truth lines, must be one per set of truths
passed to `truths`. Default color is gray ``#4d4d4d`` for up to three
lines.
truthLineStyles : list-like[nTruths]
User-defined line styles for the truth lines, must be one per set of
truths passed to `truths`. Default line styles are
``['--',':','dashdot']``.
priors : list of tuples [(mu1, sigma1), ...]
Each tuple describes a Gaussian to be plotted over that parameter's
histogram. The number of priors must equal the number of dimensions in
`chains`. Default is ``None``.
plotName : string
A path to save the GTC to in pdf form. Default is ``None``.
nContourLevels : int
The number of contour levels to plot in the 2d histograms. May be 1, 2,
or 3. Default is 2.
sigmaContourLevels : bool
Whether you want 2d "sigma" contour levels (39%, 86%, 99%) instead of
the standard contour levels (68%, 95%, 99%). Default is ``False``.
nBins : int
An integer describing the number of bins used to compute the histograms.
Default is 30.
smoothingKernel : float
Size of the Gaussian smoothing kernel in bins. Default is 1. Set to 0
for no smoothing.
filledPlots2d : bool
Whether you want the 2d contours to be filled
Default is ``True``.
filledPlots1d : bool
Whether you want the 1d histograms to be filled
Default is ``True``.
plotDensity : bool
Whether you want to see the 2d density of points. Default is ``False``.
figureSize : float or string
A number in inches describing the length = width of the GTC, or a string
indicating a predefined journal setting and whether the figure will span
one column or the full page width. Default is 70/dpi where ``dpi =
plt.rcParams['figure.dpi']``. Options to choose from are
``'APJ_column'``, ``'APJ_page'``, ``'MNRAS_column'``, ``'MNRAS_page'``,
``'AandA_column'``, ``'AandA_page'``.
panelSpacing : string
Options are ``'loose'`` or ``'tight'``. Determines whether there is some
space between the subplots of the GTC or not. Default is ``'tight'``.
legendMarker : string
Options are ``'All'``, ``'None'``, ``'Auto'``. ``'All'`` and ``'None'``
force-show or force-hide all label markers. ``'Auto'`` shows label
markers if two or more truths are plotted.
paramRanges : list of tuples [nDim]
Set the boundaries of each parameter range. Must provide a tuple for
each dimension of `chains`. If ``None`` is provided for a parameter, the
range defaults to the width of the histogram.
labelRotation : tuple [2]
Rotate the tick labels by 45 degrees for less overlap. Sets the x- and
y-axis separately. Options are ``(True,True)``, ``(True,False)``,
``(False,True)``, ``(False,False)``, ``None``. Using ``None`` sets to
default ``(True,True)``.
tickShifts : tuple [2]
Shift the x/y tick labels horizontally/vertically by a fraction of the
tick spacing. Example tickShifts = (0.1, 0.05) shifts the x-tick labels
right by ten percent of the tick spacing and shifts the y-tick labels up
by five percent of the tick spacing. Default is (0.1, 0.1). If tick
rotation is turned off for either axis, then the corresponding shift is
set to zero.
colorsOrder : list-like[nDims]
The color order for chains passed to `chains`. Default is ``['blues',
'oranges','greens', 'reds', 'purples', 'browns', 'pinks', 'grays',
'yellows', 'cyans']``. Currently, ``pygtc`` is limited to these color
values, so you can reorder them, but can't yet define your own colors.
If you really love the old colors, you can get at them by calling:
``['blues_old', 'greens_old', ...]``.
do1dPlots : bool
Whether or not 1d histrograms are plotted on the diagonal. Default is
``True``.
doOnly1dPlot : bool
Plot only ONE 1d histogram. If this is True, then chains must have shape
``(samples,1)``. Default is ``False``.
mathTextFontSet : string
Set font family for rendering LaTex. Default is ``'stixsans'``. Set to
``None`` to use the default setting in your matplotlib rc. See Notes for
known issues regarding this keyword.
customLabelFont : ``matplotlib.fontdict``
Full customization of label fonts. See matplotlib for full
documentation. Default is ``{'family':'Arial', 'size':9}``.
customLegendFont : ``matplotlib.fontdict``
Full customization of legend fonts. See matplotlib for full
documentation. Default is ``{'family':'Arial', 'size':9}``.
customTickFont : ``matplotlib.fontdict``
Full customization of tick label fonts. See matplotlib for full
documentation. Default is ``{'family':'Arial', 'size':6}``. Attempting
to set the color will result in an error.
holdRC : bool
Whether or not to reset rcParams back to default. You may wish to set
this to ``True`` if you are working in interactive mode (ie with IPython
or in a JuPyter notebook) and you want the plots that display to be
identical to the plots that save in the pdf. See Notes below for more
information. Default is ``False``.
Returns
-------
fig : ``matplotlib.figure`` object
You can do all sorts of fun things with this in terms of customization
after it gets returned. If you are using a ``JuPyter`` notebook with
inline plotting enabled, you should assign a variable to catch the
return or else the figure will plot twice.
Note
----
If you are calling ``plotGTC`` from within an interactive python session (ie
via IPython or in a JuPyter notebook), the label font in the saved pdf may
differ from the plot that appears when calling ``matplotlib.pyplot.show()``.
This will happen if the mathTextFontSet keyword sets a value that is
different than the one stored in ``rcParams['mathtext.fontset']`` and you
are using equations in your labels by enclosing them in $..$. The output pdf
will display correctly, but the interactive plot will use whatever is stored
in the rcParams default to render the text that is inside the $..$.
Unfortunately, this is an oversight in matplotlib's design, which only
allows one global location for specifying this setting. As a workaround, you
can set ``holdRC = True`` when calling ``plotGTC`` and it will *not* reset
your rcParams back to their default state. Thus, when the figure renders in
interactive mode, it will match the saved pdf. If you wish to reset your
rcParams back to default at any point, you can call
``matplotlib.rcdefaults()``. However, if you are in a jupyter notebook and
have set ``%matplotlib inline``, then calling ``matplotlib.rcdefaults()``
may not set things back the way they were, but rerunning the line magic
will.
This is all due to a bug in matplotlib that is slated to be fixed in the
upcoming 2.0 release."""
##### Figure setting
#Set up some colors
truthsDefaultColors = ['#4d4d4d', '#4d4d4d', '#4d4d4d']
truthsDefaultLS = ['--',':','dashdot']
colorsDict = {
# Match pygtc up to v0.2.4
'blues_old' : ('#4c72b0','#7fa5e3','#b2d8ff'),
'greens_old' : ('#55a868','#88db9b','#bbffce'),
'yellows_old' : ('#f5964f','#ffc982','#fffcb5'),
'reds_old' : ('#c44e52','#f78185','#ffb4b8'),
'purples_old' : ('#8172b2','#b4a5e5','#37d8ff'),
# New color scheme, dark colors match matplotlib v2
'blues' : ('#1f77b4','#52aae7','#85ddff'),
'oranges' : ('#ff7f0e','#ffb241','#ffe574'),
'greens' : ('#2ca02c','#5fd35f','#92ff92'),
'reds' : ('#d62728','#ff5a5b','#ff8d8e'),
'purples' : ('#9467bd','#c79af0','#facdff'),
'browns' : ('#8c564b','#bf897e','#f2bcb1'),
'pinks' : ('#e377c2','#ffaaf5','#ffddff'),
'grays' : ('#7f7f7f','#b2b2b2','#e5e5e5'),
'yellows' : ('#bcbd22','#eff055','#ffff88'),
'cyans' : ('#17becf','#4af1ff','#7dffff'),
}
defaultColorsOrder = ['blues', 'oranges','greens', 'reds', 'purples',
'browns', 'pinks', 'grays', 'yellows', 'cyans']
priorColor = '#333333'
#Angle of tick labels
tickAngle = 45
#Dictionary of size types or whatever:
mplPPI = plt.rcParams['figure.dpi'] #Matplotlib dots per inch
figSizeDict = { 'APJ_column' : 245.26653 / mplPPI,
'APJ_page' : 513.11743 / mplPPI,
'MNRAS_column' : 240. / mplPPI,
'MNRAS_page' : 504. / mplPPI,
'AandA_column' : 256.0748 / mplPPI,
'AandA_page' : 523.5307 / mplPPI}
##### Check the validity of the chains argument:
# Numpy really doesn't like lists of Pandas DataFrame objects
# So if it gets one, extract array vals and throw away the rest
dfColNames = None
try: # Not a list of DFs, but might be a single DF
try:
# Check if single numpy 2d chain
if chains.ndim == 2:
chains = [chains]
except:
pass
# Read in column names from Pandas DataFrame if exists
# Also convert DataFrame to simple numpy array to avoid later conflicts
if hasattr(chains[0], 'columns'):
# Set param names from DataFrame column names, can be overridden later
dfColNames = list(chains[0].columns.values)
chains = [df.values for df in chains]
except ValueError: # Probably a list of pandas DFs
if hasattr(chains[0], 'columns') and hasattr(chains[0], 'values'):
dfColNames = list(chains[0].columns.values)
chains = [df.values for df in chains]
# Get number of chains
nChains = len(chains)
assert nChains<=len(defaultColorsOrder), \
"currently only supports up to "+str(len(defaultColorsOrder))+" chains"
# Check that each chain looks reasonable (2d shape)
for i in range(nChains):
assert len(chains[i].shape)==2, "unexpected shape of chain %d"%(chains[i])
# Number of dimensions (parameters), check all chains have same nDim
nDim = len(chains[0][0,:])
for i in range(nChains):
nDimi = len(chains[i][0,:])
assert nDimi==nDim, "chain %d has unexpected number of dimensions %d"%(i,nDimi)
# Labels for multiple chains, goes in plot legend
chainLabels = kwargs.pop('chainLabels', None)
if chainLabels is not None:
# Convert to list if only one label
if __isstr(chainLabels):
chainLabels = [chainLabels]
# Check that number of labels equals number of chains
assert len(chainLabels) == nChains, "chainLabels mismatch with number of chains"
# Check that it's a list of strings
assert all(__isstr(s) for s in chainLabels), "chainLabels must be list of strings"
# Label the x and y axes, supports latex
paramNames = kwargs.pop('paramNames', None)
if paramNames is not None:
# Convert to list if only one name
if __isstr(paramNames):
paramNames = [paramNames]
# Check that number of paramNames equals nDim
assert len(paramNames) == nDim, "paramNames mismatch with number of dimensions"
# Check that it's a list of strings
assert all(__isstr(s) for s in paramNames), "paramNames must be list of strings"
elif dfColNames is not None:
paramNames = dfColNames
# Custom parameter range
paramRanges = kwargs.pop('paramRanges', None)
if paramRanges is not None:
assert len(paramRanges)==nDim, "paramRanges must match number of parameters"
# Rotated tick labels
labelRotation = kwargs.pop('labelRotation', (True,True))
# Shifted tick labels, Default is nudge by 0.1 * tick spacing
shiftX, shiftY = kwargs.pop('tickShifts', (0.1, 0.1))
#If the rotation is turned off, then don't shift the labels
if not labelRotation[0]:
shiftX = 0
if not labelRotation[1]:
shiftY = 0
# User-defined color ordering
colorsOrder = kwargs.pop('colorsOrder', defaultColorsOrder)
# Convert to list if only one entry
if __isstr(colorsOrder):
colorsOrder = [colorsOrder]
if not all(color in colorsDict.keys() for color in colorsOrder):
raise ValueError("Bad color name in colorsOrder=%s, pick from %s"%(colorsOrder,colorsDict.keys()))
colors = [colorsDict[cs] for cs in colorsOrder]
# Highlight a point (or several) in parameter space by lines
truthColors = kwargs.pop('truthColors', truthsDefaultColors) #Default supports up to three truths
truthLineStyles = kwargs.pop('truthLineStyles', truthsDefaultLS)
truths = kwargs.pop('truths', None)
if truths is not None:
# Convert to list if needed
if len(np.shape(truths))==1:
truths = [truths]
truths = np.array(truths)
assert np.shape(truths)[0]<=len(truthColors), \
"More truths than available colors. Set colors with truthColors = [colors...]"
assert np.shape(truths)[0]<=len(truthLineStyles), \
"More truths than available line styles. Set line styles with truthLineStyles = [ls...]"
assert np.shape(truths)[1]==nDim, \
"Each list of truths must match number of parameters"
# Labels for the different truth lines
truthLabels = kwargs.pop('truthLabels', None) #Labels for multiple truths, goes in plot legend
if truthLabels is not None:
# Convert to list if only one label
if __isstr(truthLabels):
truthLabels = [truthLabels]
# Check that it's a list of strings
assert all(__isstr(s) for s in truthLabels), "truthLabels must be list of strings"
assert len(truthLabels) == len(truths), "truthLabels mismatch with number of truths"
# Show Gaussian PDF on 1d plots (to show Gaussian priors)
priors = kwargs.pop('priors', None)
if priors is not None:
if haveScipy:
assert len(priors)==nDim, "List of priors must match number of parameters"
for i in range(nDim):
if priors[i]:
assert priors[i][1]>0, "Prior width must be positive"
else:
warnings.warn("You need to have scipy installed to display Gaussian priors, ignoring priors keyword.", UserWarning)
priors = None
# Manage the sample point weights
weights = kwargs.pop('weights', None)
if weights is None:
# Set unit weights if no weights are provided
weights = [np.ones(len(chains[i])) for i in range(nChains)]
else:
if len(weights)==len(chains[0]):
weights = [weights]
for i in range(nChains):
assert len(weights[i])==len(chains[i]), \
"missmatch in chain/weights #%d: len(chain) %d, len(weights) %d"%(i,len(chains[i]),len(weights[i]))
# Set plotName to save the plot to plotName
plotName = kwargs.pop('plotName', None) #Um... the name of the plot?!
if plotName is not None:
assert __isstr(plotName), "plotName must be a string type"
# Which contour levels to show
nContourLevels = kwargs.pop('nContourLevels', 2)
assert nContourLevels in [1,2,3], "nContourLevels must be 1, 2, or 3"
# Maintain support for older naming convention. TODO: Remove in next major version
deprecated_nContourLevels = kwargs.pop('nConfidenceLevels', False)
if deprecated_nContourLevels:
warnings.warn("nConfidenceLevels has been replaced by nContourLevels", DeprecationWarning)
nContourLevels = deprecated_nContourLevels
assert nContourLevels in [1,2,3], "nContourLevels must be 1, 2, or 3"
# 2d contour levels: (68%, 95%, 99%) or sigma (39%, 86%, 99%)
confLevels = (.3173, .0455, .0027)
sigmaContourLevels = kwargs.pop('sigmaContourLevels', False)
if sigmaContourLevels:
confLevels = (.6065, .1353, .0111)
# Maintain support for older naming convention. TODO: Remove in next major version
deprecated_ConfLevels = kwargs.pop('gaussianConfLevels', False)
if deprecated_ConfLevels:
warnings.warn("gaussianConfLevels has been replaced by sigmaContourLevels", DeprecationWarning)
confLevels = (.6065, .1353, .0111)
deprecated_ConfLevels = kwargs.pop('GaussianConfLevels', False)
if deprecated_ConfLevels:
warnings.warn("GaussianConfLevels has been replaced by sigmaContourLevels", DeprecationWarning)
confLevels = (.6065, .1353, .0111)
# Data binning and smoothing
nBins = kwargs.pop('nBins', 30) # Number of bins for 1d and 2d histograms. 30 works...
smoothingKernel = kwargs.pop('smoothingKernel', 1) #Don't you like smooth data?
if (smoothingKernel != 0) and (not haveScipy):
warnings.warn("Warning: You don't have Scipy installed. Your curves will not be smoothed.", UserWarning)
smoothingKernel = 0
if smoothingKernel>=nBins/10:
warnings.warn("Wow, that's a huge smoothing kernel! You sure you want"
"its scale to be %.1f percent of the plot?!"
%(100.*float(smoothingKernel)/float(nBins)), UserWarning)
# Filled contours and histograms
filledPlots2d = kwargs.pop('filledPlots2d', True)
filledPlots1d = kwargs.pop('filledPlots1d', True)
# Filled contours and histograms
plotDensity = kwargs.pop('plotDensity', False)
# Figure size: choose size to fit journal, use reasonable default, or provide your own
figureSize = kwargs.pop('figureSize', None) #Figure size descriptor or figure width=height in inches
if figureSize is None:
# If no figure size is given, use resolution of 70 ppp (pixel per panel)
figureWidth = nDim*70. / mplPPI
else:
# User-defined width=height in inches
if not __isstr(figureSize):
figureWidth = figureSize
else:
# Choose from a couple of presets to fit your publication
if figureSize in figSizeDict.keys():
figureWidth = figSizeDict[figureSize]
else:
raise ValueError("figureSize %s unknown"%figureSize)
# Space between panels
panelSpacing = kwargs.pop('panelSpacing', 'tight')
# Marker lines in legend
showLegendMarker = False
legendMarker = kwargs.pop('legendMarker', 'Auto')
assert legendMarker in ('All','None','Auto'), \
"legendMarker must be one of 'All', 'None', 'Auto'"
if legendMarker=='Auto':
if truthLabels is not None:
if len(truthLabels)>1: showLegendMarker = True
elif legendMarker=='All': showLegendMarker = True
# Plot 1d histograms
do1dPlots = kwargs.pop('do1dPlots', True)
# Plot ONLY 1d histograms
doOnly1dPlot = kwargs.pop('doOnly1dPlot', False)
if doOnly1dPlot:
for i in range(nChains):
assert chains[i].shape[1]==1, \
"Provide chains of shape(Npoints,1) if you only want the 1d histogram"
do1dPlots = True
# Set font in rcParams (Not in the default file, but just in the running kernel)
mathtextTypes = ['cm', 'stix', 'custom', 'stixsans', None]
mathTextFontSet = kwargs.pop('mathTextFontSet', 'stixsans')
assert mathTextFontSet in mathtextTypes, \
"mathTextFont set must be one of 'cm', 'stix', 'custom', 'stixsans', None."
oldMathTextFontSet = plt.rcParams['mathtext.fontset']
if mathTextFontSet is not None:
plt.rcParams['mathtext.fontset'] = mathTextFontSet
holdRC = kwargs.pop('holdRC', False)
assert holdRC in [True, False], "holdRC must be True or False."
#Grab the custom fontdicts
#Default size is 9 for all labels.
defaultFontFamily = 'Arial'
defaultLabelFontSize = 9
defaultTickFontSize = 6
customLabelFont = kwargs.pop('customLabelFont', {})
if 'size' not in customLabelFont.keys():
customLabelFont['size'] = defaultLabelFontSize
if 'family' not in customLabelFont.keys():
customLabelFont['family'] = defaultFontFamily
customLegendFont = kwargs.pop('customLegendFont', {})
if 'size' not in customLegendFont.keys():
customLegendFont['size'] = defaultLabelFontSize
if 'family' not in customLegendFont.keys():
customLegendFont['family'] = defaultFontFamily
customTickFont = kwargs.pop('customTickFont', {})
if 'size' not in customTickFont.keys():
customTickFont['size'] = defaultTickFontSize
if 'family' not in customTickFont.keys():
customTickFont['family'] = defaultFontFamily
#Ticks require a FontProperties instead of a font dict
tickFontProps = mpl.font_manager.FontProperties(**customTickFont)
# Check to see if there are any remaining keyword arguments
keys = ''
for key in iter(kwargs.keys()):
keys = keys + key + ' '
raise NameError("illegal keyword arguments: " + keys)
##### Define colormap
myColorMap = setCustomColorMaps(colors)
##### Matplotlib and figure settings
axisColor = '#333333'
# Create the figure, and empty list for first column / last row
fig = plt.figure(figsize=(figureWidth,figureWidth))
axV, axH = [], []
# Minimum and maximum sample for each dimension
samplesMin = np.nanmin(np.array([np.nanmin(chains[k], axis=0)
for k in range(nChains)]), axis=0)
samplesMax = np.nanmax(np.array([np.nanmax(chains[k], axis=0)
for k in range(nChains)]), axis=0)
# Left and right panel boundaries
# Use data limits and override if user-defined
panelAxRange = np.vstack((samplesMin, samplesMax)).T
for i in range(nDim):
if paramRanges is not None:
if paramRanges[i]:
panelAxRange[i] = paramRanges[i]
xTicks, yTicks = nDim*[None], nDim*[None]
########## 2D contour plots
if not doOnly1dPlot:
for i in range(nDim): # row
for j in range(nDim): # column
if j<i:
##### Create subplot
if do1dPlots:
ax = fig.add_subplot(nDim,nDim,(i*nDim)+j+1)
else:
ax = fig.add_subplot(nDim-1,nDim-1,((i-1)*(nDim-1))+j+1)
##### Draw contours and truths
# Extract 2d chains
chainsForPlot2D = [[chains[k][:,j], chains[k][:,i]] for k in range(nChains)]
# Extract 2d truths
truthsForPlot2D = None
if truths is not None:
truthsForPlot2D = [[truths[k,i], truths[k,j]] for k in range(len(truths))]
# Plot!
ax = __plot2d(ax, nChains, chainsForPlot2D, weights, nBins,
smoothingKernel, filledPlots2d, colors, nContourLevels,
confLevels, truthsForPlot2D, truthColors, truthLineStyles,
plotDensity, myColorMap)
##### Range
ax.set_xlim(panelAxRange[j][0],panelAxRange[j][1])
ax.set_ylim(panelAxRange[i][0],panelAxRange[i][1])
##### Tick labels without offset and scientific notation
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_xaxis().get_major_formatter().set_scientific(False)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
##### x-labels at bottom of plot only
if i==nDim-1:
if paramNames is not None:
ax.set_xlabel(paramNames[j], fontdict=customLabelFont)
else:
ax.get_xaxis().set_ticklabels([])
##### y-labels for left-most panels only
if j==0:
if paramNames is not None:
ax.set_ylabel(paramNames[i], fontdict=customLabelFont)
else:
ax.get_yaxis().set_ticklabels([])
##### Panel layout
ax.grid(False)
try:
#This is the matplotlib 2.0 way of doing things
ax.set_facecolor('w')
except AttributeError:
#Fallback to matplotlib 1.5
ax.set_axis_bgcolor('w')
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_color(axisColor)
ax.spines[axis].set_linewidth(1)
##### Global tick properties
ax.tick_params(direction='in', top=True, right=True, pad=4,
colors=axisColor, size=4, width=.5, labelsize=6)
##### get x limits
deltaX = panelAxRange[j,1]-panelAxRange[j,0]
##### Ticks x axis
if xTicks[j] is None:
# 5 ticks max
ax.xaxis.set_major_locator(mtik.MaxNLocator(5))
# Remove xticks that are too close (5% of panel size) to panel edge
LoHi = (panelAxRange[j,0]+.05*deltaX, panelAxRange[j,1]-.05*deltaX)
tickLocs = ax.xaxis.get_ticklocs()
idx = np.where((tickLocs>LoHi[0])&(tickLocs<LoHi[1]))[0]
xTicks[j] = tickLocs[idx]
ax.xaxis.set_ticks(xTicks[j])
##### get y limits
deltaY = panelAxRange[i,1]-panelAxRange[i,0]
##### Ticks y axis
if yTicks[i] is None:
# 5 ticks max
ax.yaxis.set_major_locator(mtik.MaxNLocator(5))
# Remove xticks that are too close (5% of panel size) to panel edge
LoHi = (panelAxRange[i,0]+.05*deltaY, panelAxRange[i,1]-.05*deltaY)
tickLocs = ax.yaxis.get_ticklocs()
idx = np.where((tickLocs>LoHi[0])&(tickLocs<LoHi[1]))[0]
yTicks[i] = tickLocs[idx]
ax.yaxis.set_ticks(yTicks[i])
##### Calculate the position for shifting the x-axis tick labels
#Bump all the labels over just a tiny bit so
#it looks good! Default is 0.1 * tick spacing
#Get the number of ticks to convert
#to coordinates of fraction of tick separation
numTicksX = len(xTicks[j])-1
#Transform the shift to data coords
shiftXdata = 1.0*shiftX*deltaX/numTicksX
##### Rotate tick labels
for xLabel in ax.get_xticklabels():
if labelRotation[0]:
xLabel.set_rotation(tickAngle)
xLabel.set_horizontalalignment('right')
#Add a custom attribute to the tick label object
xLabel.custom_shift = shiftXdata
#Now monkey patch the label's set_x method to force it to
#shift the x labels when it gets called during render
#Python 3 changes how this gets called
if PYVER >= 3:
xLabel.set_x = types.MethodType(lambda self,
x: mpl.text.Text.set_x(self, x+self.custom_shift),
xLabel)
else:
xLabel.set_x = types.MethodType(lambda self,
x: mpl.text.Text.set_x(self, x+self.custom_shift),
xLabel, mpl.text.Text)
#Update the font if needed
xLabel.set_fontproperties(tickFontProps)
##### Calculate the position for shifting the y-axis tick labels
#Bump all the labels over just a tiny bit so
#it looks good! Default is 0.1 * tick spacing
#Get the number of ticks to convert
#to coordinates of fraction of tick separation
numTicksY = len(yTicks[i])-1
shiftYdata = 1.0*shiftY*deltaY/numTicksY
for yLabel in ax.get_yticklabels():
if labelRotation[1]:
yLabel.set_rotation(tickAngle)
yLabel.set_verticalalignment('top')
#Add a custom attribute to the tick label object
yLabel.custom_shift = shiftYdata
#Now monkey patch the label's set_x method to force it to
#shift the x labels when it gets called during render
if PYVER >= 3:
yLabel.set_y = types.MethodType(lambda self,
y: mpl.text.Text.set_y(self, y+self.custom_shift),
yLabel)
else:
yLabel.set_y = types.MethodType(lambda self,
y: mpl.text.Text.set_y(self, y+self.custom_shift),
yLabel, mpl.text.Text)
#Update the font if needed
yLabel.set_fontproperties(tickFontProps)
##### First column and last row are needed to align labels
if j==0:
axV.append(ax)
if i==nDim-1:
axH.append(ax)
if do1dPlots:
########## 1D histograms
for i in range(nDim):
##### Create subplot
ax = fig.add_subplot(nDim,nDim,(i*nDim)+i+1)
##### Plot histograms, truths, Gaussians
# Extract 1d chains
chainsForPlot1D = [chains[k][:,i] for k in range(nChains)]
# Extract 1d truths
truthsForPlot1D = None
if truths is not None:
truthsForPlot1D = [truths[k,i] for k in range(len(truths))]
# Extract 1d prior
prior1d = None
if priors is not None:
if priors[i] and priors[i][1]>0:
prior1d = priors[i]
# Plot!
ax = __plot1d(ax, nChains, chainsForPlot1D, weights, nBins,
smoothingKernel, filledPlots1d, colors, truthsForPlot1D,
truthColors, truthLineStyles, prior1d, priorColor)
##### Panel layout
ax.grid(False)
try:
#This is the matplotlib 2.0 way of doing things
ax.set_facecolor('w')
except AttributeError:
#Fallback to matplotlib 1.5
ax.set_axis_bgcolor('w')
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_color(axisColor)
ax.spines[axis].set_linewidth(1)
##### Global tick properties
ax.tick_params(direction='in', top=True, right=True, pad=4,
colors=axisColor, size=4, width=.5, labelsize=6)
##### Tick labels without offset and scientific notation
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_xaxis().get_major_formatter().set_scientific(False)
##### No ticks or labels on y-axes, lower limit 0
ax.yaxis.set_ticks([])
ax.set_ylim(bottom=0)
ax.xaxis.set_ticks_position('bottom')
##### x-label for bottom-right panel only and a scaling hack
if i==nDim-1:
if paramNames is not None:
ax.set_xlabel(paramNames[i], fontdict=customLabelFont)
#Hack to get scaling to work for final 1D plot under MPL < 2.0
if (MPLVER < 2) and (smoothingKernel == 0):
max_y = 0
#Loop through the children, find the polygons
#and extract the maximum y-value
for child in ax.get_children():
if type(child) == plt.Polygon:
child_max_y = child.get_xy()[:,1].max()
if child_max_y > max_y:
max_y = child_max_y
#Set upper limit to be 5% above maximum y-value
ax.set_ylim(0, max_y*1.05)
else:
ax.get_xaxis().set_ticklabels([])
#### Set x range
ax.set_xlim(panelAxRange[i])
#### Calculate limits and tick spacing
deltaX = panelAxRange[i,1]-panelAxRange[i,0]
##### Ticks x axis
if i==nDim-1:
# 5 ticks max
ax.xaxis.set_major_locator(mtik.MaxNLocator(5))
# Remove xticks that are too close (5% of panel size) to panel edge
LoHi = (panelAxRange[i,0]+.05*deltaX, panelAxRange[i,1]-.05*deltaX)
tickLocs = ax.xaxis.get_ticklocs()
idx = | np.where((tickLocs>LoHi[0])&(tickLocs<LoHi[1])) | numpy.where |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 4 21:44:21 2017
@author: wangronin
"""
import pdb
import warnings
import numpy as np
from numpy import sqrt, exp, pi
from scipy.stats import norm
from abc import ABCMeta, abstractmethod
# warnings.filterwarnings("error")
# TODO: perphas also enable acquisition function engineering here?
# meaning the combination of the acquisition functions
class InfillCriteria:
__metaclass__ = ABCMeta
def __init__(self, model, plugin=None, minimize=True):
assert hasattr(model, 'predict')
self.model = model
self.minimize = minimize
# change maximization problem to minimization
self.plugin = plugin if self.minimize else -plugin
if self.plugin is None:
self.plugin = np.min(model.y) if minimize else -np.max(self.model.y)
@abstractmethod
def __call__(self, X):
raise NotImplementedError
def _predict(self, X):
y_hat, sd2 = self.model.predict(X, eval_MSE=True)
sd = | sqrt(sd2) | numpy.sqrt |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 11:38:35 2019
@author: <NAME>
"""
#TODO: add error handling for reading of files
#TODO: warning for not finding any features
import argparse
import cluster_function_prediction_tools as tools
import os, sys
from Bio import SeqIO
import SSN_tools
import readFeatureFiles
import numpy as np
import readInputFiles
SSN_pfam_names = ["Thiolase, N-terminal domain","ABC transporter","Acyl transferase domain","AAA domain",
"ABC-2 family transporter protein","Acyl-CoA dehydrogenase, C-terminal domain","Acyl-CoA dehydrogenase, N-terminal domain",
"Alcohol dehydrogenase GroES-like domain","Alpha/beta hydrolase family","Aminotransferase class I and II",
"Beta-ketoacyl synthase, C-terminal domain","Beta-ketoacyl synthase, N-terminal domain","Cytochrome P450","DegT/DnrJ/EryC1/StrS aminotransferase family",
"Enoyl-(Acyl carrier protein) reductase","Erythronolide synthase docking","FAD binding domain","Glycosyl transferase family 2",
"Glycosyltransferase family 28 N-terminal domain","Glycosyl transferases group 1","Glycosyltransferase like family 2","Glyoxalase/Bleomycin resistance protein/Dioxygenase superfamily",
"KR domain","Lanthionine synthetase C-like protein",
"Major Facilitator Superfamily","Methyltransferase small domain","Methyltransferase domain",
"NAD dependent epimerase/dehydratase family","NDP-hexose 2,3-dehydratase",
"O-methyltransferase","Oxidoreductase family, C-terminal alpha/beta domain","Oxidoreductase family, NAD-binding Rossmann fold",
"Phosphopantetheine attachment site","Polyketide cyclase / dehydrase and lipid transport","Polyketide synthase dehydratase",
"Protein of unknown function (DUF1205)",
"short chain dehydrogenase","SnoaL-like domain","SpaB C-terminal domain",
"Sugar (and other) transporter","transcriptional_regulatory_protein,_c_terminal_domains","Thioesterase superfamily","ubiE/COQ5 methyltransferase family","UDP-glucoronosyl and UDP-glucosyl transferase","YcaO-like family",
"Zinc-binding dehydrogenase","pyridine_nucleotide-disulphide_oxidoreductase"]
#read arguments given by user
parser = argparse.ArgumentParser()
parser.add_argument('antismash_results',help='file containing the antismash results for the cluster in a genbank file')
parser.add_argument('rgi_results',help='file containing the rgi results for the cluster')
parser.add_argument('--output', help='set directory to write predictions to, default write to current directory')
parser.add_argument('--seed', help='random seed to use for training classifiers',type=int)
parser.add_argument('--no_SSN', help="don't use pfam subfamilies in classification, program will run faster with only small impact on accuracy (default: use sub-PFAMs)", nargs='?', default=False, const=True)
parser.add_argument('--blastp_path', help="path to blastp executable, only neeeded if using SSN, default is blastp")
parser.add_argument('--write_features', help='set directory to write features to, default do not write features')
parser.add_argument('--antismash_version', help='version of antismash used to generate antismash input file, supported versions are 4 and 5, defualt 5')
parser.add_argument('--rgi_version', help='version of rgi used to generate antismash input file, supported versions are 3 and 5, default 5')
args = parser.parse_args()
data_path = os.path.dirname(sys.argv[0]) + "/"
if args.write_features == None:
write_features = False
feature_dir = ""
else:
write_features = True
feature_dir = args.write_features
if args.seed == None:
seed = 0
else:
seed = args.seed
if args.blastp_path == None:
blastp_path = "blastp"
else:
blastp_path = args.blastp_path
antismash_infilename = args.antismash_results
rgi_infilename = args.rgi_results
no_SSN = args.no_SSN
if args.output == None:
out_directory = "./"
else:
out_directory = args.output
if args.rgi_version == "5":
rgi_version = 5
elif args.rgi_version == "3":
rgi_version = 3
elif args.rgi_version == None:
rgi_version = 5
else:
print("please enter a valid rgi version, program currently accepts output from versions 3 and 5")
exit()
antismash_version = 5
if args.antismash_version == "5":
antismash_version = 5
elif args.antismash_version == "4":
antismash_version = 4
elif args.antismash_version == None:
antismash_version = 5
else:
print("please enter a valid antismash version, program currently accepts output from versions 4 and 5")
exit()
#check validity of files and directories given by user
if not tools.checkIfFileExists(antismash_infilename, "antismash") or not tools.checkIfFileExists(rgi_infilename, "rgi"):
exit()
if not os.path.isdir(out_directory):
print("The given out directory does not exist, please enter a valid directory")
exit()
if not os.access(out_directory, os.W_OK):
print("You do not have permission to write to the given output directory, please use a different directory")
exit()
#read the list of features
try:
training_SSN_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/SSN.csv")
if antismash_version == 4:
training_pfam_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/PFAM.csv")
training_smCOG_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/SMCOG.csv")
#SSN_calc_features = readFeatureFiles.readFeatureMatrixFloat("gene_feature_matrices/test_compounds_SSN.csv")
training_CDS_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/CDS_motifs.csv")
training_pks_nrps_type_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/pks_nrps_type.csv")
training_pk_signature_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/pk_signature.csv")
training_pk_minowa_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/pk_minowa.csv")
training_pk_consensus_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/pk_consensus.csv")
training_nrp_stachelhaus_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/nrp_stachelhaus.csv")
training_nrp_nrpspredictor_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/nrp_nrpspredictor.csv")
training_nrp_pHMM_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/nrp_pHMM.csv")
training_nrp_predicat_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/nrp_predicat.csv")
training_nrp_sandpuma_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/nrp_sandpuma.csv")
elif antismash_version == 5:
training_pfam_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/PFAM5.csv")
training_smCOG_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/SMCOG5.csv")
training_CDS_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/CDS_motifs5.csv")
training_pk_consensus_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/pk_nrp_consensus5.csv")
if rgi_version == 3:
training_card_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/CARD_gene.csv")
used_resistance_genes_list = readFeatureFiles.readFeatureList(data_path+"feature_matrices/CARD_gene_list.txt")
elif rgi_version == 5:
training_card_features = readFeatureFiles.readFeatureMatrix(data_path+"feature_matrices/CARD5_genes.csv")
used_resistance_genes_list = readFeatureFiles.readFeatureList(data_path+"feature_matrices/CARD5_gene_list.txt")
is_antibacterial = readFeatureFiles.readClassesMatrix(data_path+"feature_matrices/is_antibacterial.csv")
is_antifungal = readFeatureFiles.readClassesMatrix(data_path+"feature_matrices/is_antifungal.csv")
is_cytotoxic = readFeatureFiles.readClassesMatrix(data_path+"feature_matrices/is_cytotoxic.csv")
is_unknown = readFeatureFiles.readClassesMatrix(data_path+"feature_matrices/is_unknown.csv")
targets_gram_pos = readFeatureFiles.readClassesMatrix(data_path+"feature_matrices/targets_gram_pos.csv")
targets_gram_neg = readFeatureFiles.readClassesMatrix(data_path+"feature_matrices/targets_gram_neg.csv")
full_cluster_list = readFeatureFiles.readClusterList(data_path+"feature_matrices/cluster_list_CARD.txt")
except:
print("did not find file containing training data, please keep script located in directory downloaded from github")
exit()
#read the antismash input file
try:
record = SeqIO.read(open(antismash_infilename, 'rU'),"genbank")
except:
print("error reading antismash output file")
exit()
as_features = record.features
try:
rgi_infile = open(rgi_infilename, 'r')
except:
print("error reading rgi output file")
exit()
#make the feature matrices for the cluster
training_features = | np.concatenate((training_pfam_features, training_card_features), axis=1) | numpy.concatenate |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
try:
import hdf5plugin # for P10, should be imported before h5py or PyTables
except ModuleNotFoundError:
pass
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.ndimage.measurements import center_of_mass
from bcdi.experiment.detector import create_detector
from bcdi.experiment.setup import Setup
import bcdi.preprocessing.bcdi_utils as bu
import bcdi.graph.graph_utils as gu
import bcdi.utils.utilities as util
import bcdi.utils.validation as valid
helptext = """
Open a series of rocking curve data and track the position of the Bragg peak over the
series. Supported beamlines: ESRF ID01, PETRAIII P10, SOLEIL SIXS, SOLEIL CRISTAL,
MAX IV NANOMAX.
"""
scans = np.arange(1460, 1475 + 1, step=3) # list or array of scan numbers
scans = np.concatenate((scans, np.arange(1484, 1586 + 1, 3)))
scans = np.concatenate((scans, np.arange(1591, 1633 + 1, 3)))
scans = np.concatenate((scans, np.arange(1638, 1680 + 1, 3)))
root_folder = "D:/data/P10_OER/data/"
sample_name = "dewet2_2" # list of sample names. If only one name is indicated,
# it will be repeated to match the number of scans
save_dir = "D:/data/P10_OER/analysis/candidate_12/"
# images will be saved here, leave it to None otherwise (default to root_folder)
x_axis = [0.740 for _ in range(16)]
for _ in range(10):
x_axis.append(0.80)
for _ in range(15):
x_axis.append(-0.05)
for _ in range(15):
x_axis.append(0.3)
for _ in range(15):
x_axis.append(0.8)
# values against which the Bragg peak center of mass evolution will be plotted,
# leave [] otherwise
x_label = "voltage (V)" # label for the X axis in plots, leave '' otherwise
comment = "_BCDI_RC" # comment for the saving filename, should start with _
strain_range = 0.00005 # range for the plot of the q value
peak_method = (
"max_com" # Bragg peak determination: 'max', 'com', 'max_com' (max then com)
)
debug = False # set to True to see more plots
###############################
# beamline related parameters #
###############################
beamline = (
"P10" # name of the beamline, used for data loading and normalization by monitor
)
# supported beamlines: 'ID01', 'SIXS_2018', 'SIXS_2019', 'CRISTAL', 'P10'
custom_scan = False # True for a stack of images acquired without scan,
# e.g. with ct in a macro (no info in spec file)
custom_images = np.arange(11353, 11453, 1) # list of image numbers for the custom_scan
custom_monitor = np.ones(
len(custom_images)
) # monitor values for normalization for the custom_scan
custom_motors = {
"eta": np.linspace(16.989, 18.989, num=100, endpoint=False),
"phi": 0,
"nu": -0.75,
"delta": 36.65,
}
# ID01: eta, phi, nu, delta
# CRISTAL: mgomega, gamma, delta
# P10: om, phi, chi, mu, gamma, delta
# SIXS: beta, mu, gamma, delta
rocking_angle = "outofplane" # "outofplane" or "inplane"
is_series = False # specific to series measurement at P10
specfile_name = ""
# template for ID01: name of the spec file without '.spec'
# template for SIXS_2018: full path of the alias dictionnary,
# typically root_folder + 'alias_dict_2019.txt'
# template for all other beamlines: ''
###############################
# detector related parameters #
###############################
detector = "Eiger4M" # "Eiger2M" or "Maxipix" or "Eiger4M"
x_bragg = 1387 # horizontal pixel number of the Bragg peak,
# can be used for the definition of the ROI
y_bragg = 809 # vertical pixel number of the Bragg peak,
# can be used for the definition of the ROI
roi_detector = [
y_bragg - 200,
y_bragg + 200,
x_bragg - 400,
x_bragg + 400,
] # [Vstart, Vstop, Hstart, Hstop]
# leave it as None to use the full detector.
# Use with center_fft='skip' if you want this exact size.
debug_pix = 40 # half-width in pixels of the ROI centered on the Bragg peak
hotpixels_file = None # root_folder + 'hotpixels.npz' # non empty file path or None
flatfield_file = (
None # root_folder + "flatfield_8.5kev.npz" # non empty file path or None
)
template_imagefile = "_master.h5"
# template for ID01: 'data_mpx4_%05d.edf.gz' or 'align_eiger2M_%05d.edf.gz'
# template for SIXS_2018: 'align.spec_ascan_mu_%05d.nxs'
# template for SIXS_2019: 'spare_ascan_mu_%05d.nxs'
# template for Cristal: 'S%d.nxs'
# template for P10: '_master.h5'
# template for NANOMAX: '%06d.h5'
# template for 34ID: 'Sample%dC_ES_data_51_256_256.npz'
####################################
# q calculation related parameters #
####################################
convert_to_q = True # True to convert from pixels to q values using parameters below
beam_direction = (1, 0, 0) # beam along z
directbeam_x = 476 # x horizontal, cch2 in xrayutilities
directbeam_y = 1374 # y vertical, cch1 in xrayutilities
direct_inplane = -2.0 # outer angle in xrayutilities
direct_outofplane = 0.8
sdd = 1.83 # sample to detector distance in m
energy = 10300 # in eV, offset of 6eV at ID01
##################################
# end of user-defined parameters #
##################################
###################
# define colormap #
###################
bad_color = "1.0" # white
bckg_color = "0.7" # grey
colormap = gu.Colormap(bad_color=bad_color)
my_cmap = colormap.cmap
########################################
# check and initialize some parameters #
########################################
print(f"\n{len(scans)} scans: {scans}")
print(f"\n {len(x_axis)} x_axis values provided:")
if len(x_axis) == 0:
x_axis = np.arange(len(scans))
if len(x_axis) != len(scans):
raise ValueError("the length of x_axis should be equal to the number of scans")
if isinstance(sample_name, str):
sample_name = [sample_name for idx in range(len(scans))]
valid.valid_container(
sample_name,
container_types=(tuple, list),
length=len(scans),
item_types=str,
name="preprocess_bcdi",
)
if peak_method not in [
"max",
"com",
"max_com",
]:
raise ValueError('invalid value for "peak_method" parameter')
int_sum = [] # integrated intensity in the detector ROI
int_max = [] # maximum intensity in the detector ROI
zcom = [] # center of mass for the first data axis
ycom = [] # center of mass for the second data axis
xcom = [] # center of mass for the third data axis
tilt_com = [] # center of mass for the incident rocking angle
q_com = [] # q value of the center of mass
check_roi = [] # a small ROI around the Bragg peak will be stored for each scan,
# to see if the peak is indeed
# captured by the rocking curve
#######################
# Initialize detector #
#######################
detector = create_detector(
name=detector,
template_imagefile=template_imagefile,
roi=roi_detector,
)
####################
# Initialize setup #
####################
setup = Setup(
beamline=beamline,
detector=detector,
energy=energy,
rocking_angle=rocking_angle,
distance=sdd,
beam_direction=beam_direction,
custom_scan=custom_scan,
custom_images=custom_images,
custom_monitor=custom_monitor,
custom_motors=custom_motors,
is_series=is_series,
)
########################################
# print the current setup and detector #
########################################
print("\n##############\nSetup instance\n##############")
print(setup)
print("\n#################\nDetector instance\n#################")
print(detector)
###############################################
# load recursively the scans and update lists #
###############################################
flatfield = util.load_flatfield(flatfield_file)
hotpix_array = util.load_hotpixels(hotpixels_file)
for scan_idx, scan_nb in enumerate(scans, start=1):
tmp_str = f"Scan {scan_idx}/{len(scans)}: S{scan_nb}"
print(f'\n{"#" * len(tmp_str)}\n' + tmp_str + "\n" + f'{"#" * len(tmp_str)}')
# initialize the paths
setup.init_paths(
sample_name=sample_name[scan_idx - 1],
scan_number=scan_nb,
root_folder=root_folder,
save_dir=save_dir,
verbose=True,
specfile_name=specfile_name,
template_imagefile=template_imagefile,
)
# override the saving directory, we want to save results at the same place
detector.savedir = save_dir
logfile = setup.create_logfile(
scan_number=scan_nb, root_folder=root_folder, filename=detector.specfile
)
data, mask, frames_logical, monitor = bu.load_bcdi_data(
logfile=logfile,
scan_number=scan_nb,
detector=detector,
setup=setup,
flatfield=flatfield,
hotpixels=hotpix_array,
normalize=True,
debugging=debug,
)
tilt, grazing, inplane, outofplane = setup.diffractometer.goniometer_values(
frames_logical=frames_logical, logfile=logfile, scan_number=scan_nb, setup=setup
)
nbz, nby, nbx = data.shape
if peak_method == "max":
piz, piy, pix = np.unravel_index(data.argmax(), shape=(nbz, nby, nbx))
elif peak_method == "com":
piz, piy, pix = center_of_mass(data)
else: # 'max_com'
max_z, max_y, max_x = np.unravel_index(data.argmax(), shape=data.shape)
com_z, com_y, com_x = center_of_mass(
data[
:,
int(max_y) - debug_pix : int(max_y) + debug_pix,
int(max_x) - debug_pix : int(max_x) + debug_pix,
]
)
# correct the pixel offset due to the ROI defined by debug_pix around the max
piz = com_z # the data was not cropped along the first axis
piy = com_y + max_y - debug_pix
pix = com_x + max_x - debug_pix
if debug:
fig, _, _ = gu.multislices_plot(
data,
sum_frames=True,
plot_colorbar=True,
cmap=my_cmap,
title="scan" + str(scan_nb),
scale="log",
is_orthogonal=False,
reciprocal_space=True,
)
fig.text(
0.60, 0.30, f"(piz, piy, pix) = ({piz:.1f}, {piy:.1f}, {pix:.1f})", size=12
)
plt.draw()
if peak_method == "max_com":
fig, _, _ = gu.multislices_plot(
data[
:,
int(max_y) - debug_pix : int(max_y) + debug_pix,
int(max_x) - debug_pix : int(max_x) + debug_pix,
],
sum_frames=True,
plot_colorbar=True,
cmap=my_cmap,
title="scan" + str(scan_nb),
scale="log",
is_orthogonal=False,
reciprocal_space=True,
)
fig.text(
0.60,
0.30,
f"(com_z, com_y, com_x) = ({com_z:.1f}, {com_y:.1f}, {com_x:.1f})",
size=12,
)
plt.draw()
print("")
zcom.append(piz)
ycom.append(piy)
xcom.append(pix)
int_sum.append(data.sum())
int_max.append(data.max())
check_roi.append(
data[:, :, int(pix) - debug_pix : int(pix) + debug_pix].sum(axis=1)
)
interp_tilt = interp1d(np.arange(data.shape[0]), tilt, kind="linear")
tilt_com.append(interp_tilt(piz))
##############################
# convert pixels to q values #
##############################
if convert_to_q:
(
setup.outofplane_angle,
setup.inplane_angle,
setup.tilt_angle,
setup.grazing_angle,
) = (outofplane, inplane, tilt, grazing)
# calculate the position of the Bragg peak in full detector pixels
bragg_x = detector.roi[2] + pix
bragg_y = detector.roi[0] + piy
# calculate the position of the direct beam at 0 detector angles
x_direct_0 = directbeam_x + setup.inplane_coeff * (
direct_inplane * np.pi / 180 * sdd / detector.pixelsize_x
) # inplane_coeff is +1 or -1
y_direct_0 = (
directbeam_y
- setup.outofplane_coeff
* direct_outofplane
* np.pi
/ 180
* sdd
/ detector.pixelsize_y
) # outofplane_coeff is +1 or -1
# calculate corrected detector angles for the Bragg peak
bragg_inplane = setup.inplane_angle + setup.inplane_coeff * (
detector.pixelsize_x * (bragg_x - x_direct_0) / sdd * 180 / np.pi
) # inplane_coeff is +1 or -1
bragg_outofplane = (
setup.outofplane_angle
- setup.outofplane_coeff
* detector.pixelsize_y
* (bragg_y - y_direct_0)
/ sdd
* 180
/ np.pi
) # outofplane_coeff is +1 or -1
print(
f"\nBragg angles before correction (gam, del): ({setup.inplane_angle:.4f}, "
f"{setup.outofplane_angle:.4f})"
)
print(
f"Bragg angles after correction (gam, del): ({bragg_inplane:.4f}, "
f"{bragg_outofplane:.4f})"
)
# update setup with the corrected detector angles
setup.inplane_angle = bragg_inplane
setup.outofplane_angle = bragg_outofplane
##############################################################
# wavevector transfer calculations (in the laboratory frame) #
##############################################################
kin = 2 * np.pi / setup.wavelength * np.asarray(beam_direction)
# in lab frame z downstream, y vertical, x outboard
kout = (
setup.exit_wavevector
) # in lab.frame z downstream, y vertical, x outboard
q = (kout - kin) / 1e10 # convert from 1/m to 1/angstrom
q_com.append(np.linalg.norm(q))
print(f"Wavevector transfer of Bragg peak: {q}, Qnorm={np.linalg.norm(q):.4f}")
##########################################################
# plot the ROI centered on the Bragg peak for each scan #
##########################################################
plt.ion()
# plot maximum 7x7 ROIs per figure
nb_fig = 1 + len(scans) // 49
if nb_fig == 1:
nb_rows = np.floor(np.sqrt(len(scans)))
nb_columns = np.ceil(len(scans) / nb_rows)
else:
nb_rows = 7
nb_columns = 7
scan_counter = 0
for fig_idx in range(nb_fig):
fig = plt.figure(figsize=(12, 9))
for idx in range(min(49, len(scans) - scan_counter)):
axis = plt.subplot(nb_rows, nb_columns, idx + 1)
axis.imshow(np.log10(check_roi[scan_counter]))
axis.set_title("S{:d}".format(scans[scan_counter]))
scan_counter = scan_counter + 1
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + f"check-roi{fig_idx+1}" + comment + ".png")
##########################################################
# plot the evolution of the center of mass and intensity #
##########################################################
fig, ((ax0, ax1, ax2), (ax3, ax4, ax5)) = plt.subplots(
nrows=2, ncols=3, figsize=(12, 9)
)
ax0.plot(scans, x_axis, "-o")
ax0.set_xlabel("Scan number")
ax0.set_ylabel(x_label)
ax1.scatter(x_axis, int_sum, s=24, c=scans, cmap=my_cmap)
ax1.set_xlabel(x_label)
ax1.set_ylabel("Integrated intensity")
ax1.set_facecolor(bckg_color)
ax2.scatter(x_axis, int_max, s=24, c=scans, cmap=my_cmap)
ax2.set_xlabel(x_label)
ax2.set_ylabel("Maximum intensity")
ax2.set_facecolor(bckg_color)
ax3.scatter(x_axis, xcom, s=24, c=scans, cmap=my_cmap)
ax3.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax3.set_ylabel("xcom (pixels)")
else: # 'max'
ax3.set_ylabel("xmax (pixels)")
ax3.set_facecolor(bckg_color)
ax4.scatter(x_axis, ycom, s=24, c=scans, cmap=my_cmap)
ax4.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax4.set_ylabel("ycom (pixels)")
else: # 'max'
ax4.set_ylabel("ymax (pixels)")
ax4.set_facecolor(bckg_color)
plt5 = ax5.scatter(x_axis, zcom, s=24, c=scans, cmap=my_cmap)
gu.colorbar(plt5, scale="linear", numticks=min(len(scans), 20), label="scan #")
ax5.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax5.set_ylabel("zcom (pixels)")
else: # 'max'
ax5.set_ylabel("zmax (pixels)")
ax5.set_facecolor(bckg_color)
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + "summary" + comment + ".png")
############################################
# plot the evolution of the incident angle #
############################################
tilt_com = np.asarray(tilt_com)
x_axis = np.asarray(x_axis)
uniq_xaxis = | np.unique(x_axis) | numpy.unique |
import abc
from collections import OrderedDict
import time
import gtimer as gt
import numpy as np
from rlkit.core import logger, eval_util
from rlkit.data_management.env_replay_buffer import MultiTaskReplayBuffer,EnvReplayBuffer
from rlkit.data_management.path_builder import PathBuilder
from rlkit.samplers.in_place import SMMInPlacePathSampler, InPlacePathSampler,SeedInPlacePathSampler, ExpInPlacePathSampler,ExpInPlacePathSamplerSimple
from rlkit.torch import pytorch_util as ptu
from rlkit.smm.smm_policy import hard_smm_point
from rlkit.smm.smm_sampler import SMMSampler
from rlkit.policies.base import ExplorationPolicy
import pickle
import torch
class MetaRLAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
train_tasks,
eval_tasks,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=False,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
use_SMM=False,
load_SMM =False,
use_history=False,
SMM_path=None,
num_skills = 1,
seed_sample=False,
attention=False,
snail=False,
sample_interval=5
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.use_SMM = use_SMM
self.load_SMM = load_SMM
self.use_history = use_history,
self.SMM_path = SMM_path
self.num_skills = num_skills
self.seed_sample = seed_sample
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
if self.seed_sample:
self.seedsampler = SeedInPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
sample_interval=sample_interval
)
if self.use_SMM:
self.smm_sampler = SMMSampler(
env=env,
max_path_length=max_path_length,
agent = agent,
load_SMM=self.load_SMM,
use_history=self.use_history,
SMM_path=self.SMM_path,
num_skills = self.num_skills
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating
for idx in self.train_tasks:
self.task_idx = idx
self.env.reset_task(idx)
if not self.use_SMM:
if not self.seed_sample:
self.collect_data(self.num_initial_steps, 1, np.inf)
else:
self.collect_data(self.num_initial_steps, 1, np.inf)
self.collect_data_seed(self.num_initial_steps, 1, np.inf,accumulate_context=False)
else:
self.collect_data_smm(self.num_initial_steps)
self.collect_data_policy(self.num_initial_steps, 1, np.inf)
# Sample data from train tasks.
for i in range(self.num_tasks_sample):
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
self.enc_replay_buffer.task_buffers[idx].clear()
if not self.use_SMM:
if not self.seed_sample:
# collect some trajectories with z ~ prior
if self.num_steps_prior > 0:
self.collect_data(self.num_steps_prior, 1, np.inf)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
else:
if self.num_steps_prior > 0:
self.collect_data(self.num_steps_prior, 1, np.inf)
self.collect_data_seed(self.num_steps_prior, 1, np.inf,accumulate_context=False)
if self.num_steps_posterior > 0:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train)
self.collect_data_seed(self.num_steps_posterior, 1, self.update_post_train)
if self.num_extra_rl_steps_posterior > 0:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
self.collect_data_seed(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
else:
if self.num_steps_prior > 0:
self.collect_data_smm(self.num_steps_prior)
self.collect_data_policy(self.num_steps_prior, 1, np.inf)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
self.collect_data_smm(self.num_steps_posterior)
self.collect_data_policy(self.num_steps_posterior, 1, self.update_post_train)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
self.collect_data_policy(self.num_extra_rl_steps_posterior, 1, self.update_post_train)
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def collect_data_smm(self,num_samples):
'''
Notice that SMM data should only be available for the encoder
:param num_samples: number of transitions to sample
:return:
'''
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.smm_sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=np.inf)
num_transitions += n_samples
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += num_transitions
gt.stamp('smm sample')
def collect_data_policy(self, num_samples, resample_z_rate, update_posterior_rate):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('policy sample')
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
#for p in paths:
# print(p['actions'],p['rewards'])
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_seed(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True,accumulate_context=True):
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.seedsampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=1,
accum_context=accumulate_context
)
num_transitions += n_samples
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
#if update_posterior_rate != np.inf:
# context = self.prepare_context(self.task_idx)
# self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate():
self.evaluate(epoch)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
if self.use_SMM:
if not self.load_SMM:
path, num = self.smm_sampler.obtain_samples(max_samples=self.max_path_length, max_trajs=1,
accum_context=True)
num_transitions += num
self.agent.infer_posterior(self.agent.context)
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.num_steps_per_eval - num_transitions,
max_trajs=1, accum_context=False)
paths += path
num_transitions += num
num_trajs += 1
if num_trajs >= self.num_exp_traj_eval:
self.agent.infer_posterior(self.agent.context)
else:
while num_transitions < self.num_steps_per_eval:
path, num = self.smm_sampler.obtain_samples(max_samples=self.max_path_length, max_trajs=1,
accum_context=True)
num_transitions += num
#paths+=path
num_trajs += 1
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.num_steps_per_eval - num_transitions,
max_trajs=1, accum_context=False)
paths += path
num_transitions += num
num_trajs += 1
self.agent.infer_posterior(self.agent.context)
else:
while num_transitions < self.num_steps_per_eval:
if self.seed_sample:
path, num = self.seedsampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.num_steps_per_eval - num_transitions,
max_trajs=1, accum_context=True)
else:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.num_steps_per_eval - num_transitions,
max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
if num_trajs >= self.num_exp_traj_eval:
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
if hasattr(self.env,"_pitfall"):
pitfall = self.env._pitfall
for path in paths:
path['pitfall'] = pitfall
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([np.mean(a) for a in all_rets]))
# record online returns for the first n trajectories
n = min([len(a) for a in all_rets])
all_rets = [a[:n] for a in all_rets]
all_rets = np.mean(np.stack(all_rets), axis=0) # avg return per nth rollout
online_returns.append(all_rets)
n = min([len(t) for t in online_returns])
online_returns = [t[:n] for t in online_returns]
return final_returns, online_returns
def evaluate(self, epoch):
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
### sample trajectories from prior for debugging / visualization
if self.dump_eval_paths:
# 100 arbitrarily chosen for visualizations of point_robot trajectories
# just want stochasticity of z, not the policy
self.agent.clear_z()
if not self.use_SMM:
prior_paths, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length * 20,
accum_context=False,
resample=1)
else:
prior_paths, _ = self.smm_sampler.obtain_samples(
max_samples=self.max_path_length * 20,
)
logger.save_extra_data(prior_paths, path='eval_trajectories/prior-epoch{}'.format(epoch))
### train tasks
# eval on a subset of train tasks for speed
indices = np.random.choice(self.train_tasks, len(self.eval_tasks))
eval_util.dprint('evaluating on {} train tasks'.format(len(indices)))
### eval train tasks with posterior sampled from the training replay buffer
train_returns = []
for idx in indices:
self.task_idx = idx
self.env.reset_task(idx)
paths = []
for _ in range(self.num_steps_per_eval // self.max_path_length):
context = self.sample_context(idx)
self.agent.infer_posterior(context)
p, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length,
accum_context=False,
max_trajs=1,
resample=np.inf)
paths += p
#for p in paths:
# print(p['actions'],p['rewards'])
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
train_returns.append(eval_util.get_average_returns(paths))
train_returns = np.mean(train_returns)
### eval train tasks with on-policy data to match eval of test tasks
train_final_returns, train_online_returns = self._do_eval(indices, epoch)
eval_util.dprint('train online returns')
eval_util.dprint(train_online_returns)
### test tasks
eval_util.dprint('evaluating on {} test tasks'.format(len(self.eval_tasks)))
test_final_returns, test_online_returns = self._do_eval(self.eval_tasks, epoch)
eval_util.dprint('test online returns')
eval_util.dprint(test_online_returns)
# save the final posterior
self.agent.log_diagnostics(self.eval_statistics)
#if hasattr(self.env, "log_diagnostics"):
# self.env.log_diagnostics(paths, prefix=None)
avg_train_return = np.mean(train_final_returns)
avg_test_return = np.mean(test_final_returns)
avg_train_online_return = np.mean(np.stack(train_online_returns), axis=0)
avg_test_online_return = np.mean(np.stack(test_online_returns), axis=0)
self.eval_statistics['AverageTrainReturn_all_train_tasks'] = train_returns
self.eval_statistics['AverageReturn_all_train_tasks'] = avg_train_return
self.eval_statistics['AverageReturn_all_test_tasks'] = avg_test_return
logger.save_extra_data(avg_train_online_return, path='online-train-epoch{}'.format(epoch))
logger.save_extra_data(avg_test_online_return, path='online-test-epoch{}'.format(epoch))
for key, value in self.eval_statistics.items():
logger.record_tabular(key, value)
self.eval_statistics = None
if self.render_eval_paths:
self.env.render_paths(paths)
if self.plotter:
self.plotter.draw()
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
class ExpAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
agent_exp,
train_tasks,
eval_tasks,
encoder,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=True,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
use_SMM=False,
load_SMM =False,
use_history=False,
SMM_path=None,
num_skills = 1,
seed_sample=False,
snail=False,
meta_episode_len=10,
num_trajs = 2,
num_trajs_eval=1
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent_exp # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.context_encoder = encoder
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.use_SMM = use_SMM
self.load_SMM = load_SMM
self.use_history = use_history,
self.SMM_path = SMM_path
self.num_skills = num_skills
self.seed_sample = seed_sample
self.meta_episode_len = meta_episode_len
self.num_trajs = num_trajs
self.num_trajs_eval = num_trajs_eval
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
self.expsampler = ExpInPlacePathSampler(
env=env,
policy=self.exploration_agent,
encoder=self.context_encoder,
max_path_length=self.max_path_length,
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating
for idx in self.train_tasks:
self.task_idx = idx
self.env.reset_task(idx)
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
self.collect_data(self.num_initial_steps, 1, np.inf,add_to_enc_buffer=True)
# Sample data from train tasks.
for i in range(self.num_tasks_sample):
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
if (it_+1)%5==0:
self.enc_replay_buffer.task_buffers[idx].clear()
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
if self.num_steps_prior > 0:
self.collect_data(self.num_steps_prior, 1, np.inf,add_to_enc_buffer=True)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train,add_to_enc_buffer=True)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,)
print('collect over')
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def sample_eval(self,indices, context):
reward = torch.zeros(context.shape[0],1,1).cuda()
rem = 0
for indice in indices:
self.env.reset_task(indice)
context_i = context[rem,...]
context_i = torch.unsqueeze(context_i,0)
self.agent.clear_z()
self.agent.infer_posterior(context_i)
path,_ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length*5,resample=1)
reward[rem] = eval_util.get_average_returns(path)
rem = rem + 1
return reward
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=False):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context, context_unbatched = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_exp(self, num_episodes):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
paths, n_samples = self.expsampler.obtain_samples(max_trajs=num_episodes)
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += n_samples
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate(epoch):
self.evaluate(epoch,self.num_trajs)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self,epoch):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True #if (epoch+1)%5==0 else False
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
path, num = self.expsampler.obtain_samples(deterministic=self.eval_deterministic,
max_trajs=self.num_exp_traj_eval, accum_context_for_agent=True, context_agent = self.agent,split=True)
num_transitions += num
num_trajs +=self.num_exp_traj_eval
paths+=path
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.num_steps_per_eval - num_transitions, max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([ | np.mean(a) | numpy.mean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 00:10:46 2018
@author: nantanick
"""
from Shanten import Shanten
from mahjong.tile import TilesConverter
import random
import copy
import numpy as np
# complete hand
# hand = TilesConverter.string_to_34_array(pin='112233999', honors='11177')
#unaccounted_tiles = np.array([4]*34)-hand
#tenpai
#hand = TilesConverter.string_to_34_array(man='284', pin='24667',sou='1136', honors='77')
#unaccounted_tiles = np.array([4]*34)-hand
#terrible
#hand = TilesConverter.string_to_34_array(pin='112233',man='257',sou='345', honors='16')
#hand = np.array(TilesConverter.string_to_34_array(man='1894', pin='2378',sou='2345', honors='15'))
def simulate_naive(hand, hand_open, unaccounted_tiles):
"""
#naive simulation
hand, hand_open -- hand in 34 format
unaccounted_tiles -- all the unused tiles in 34 format
turn -- a number from 0-3 (0 is the player)
"""
shanten = Shanten()
hand = list(hand)
unaccounted = list(unaccounted_tiles)
tiles_left = sum(unaccounted_tiles)
unaccounted_nonzero = np.nonzero(unaccounted)
#14 in dead wall 13*3= 39 in other hand -> total 53
for i in range(tiles_left - 53):
if shanten.calculate_shanten(hand, hand_open) <= 0:#if tenpai
return True
hand_nonzero = np.nonzero(hand)[0] #discard something random
discard = random.choice(hand_nonzero)
hand[discard] -= 1
unaccounted_nonzero = np.nonzero(unaccounted)[0] #get a random card
draw_tile = random.choice(unaccounted_nonzero)
unaccounted[draw_tile] -= 1
hand[draw_tile] +=1
return False
def simulate_naive2(hand, hand_open, unaccounted_tiles):
"""
#naive simulation
hand, hand_open -- hand in 34 format
unaccounted_tiles -- all the unused tiles in 34 format
turn -- a number from 0-3 (0 is the player)
"""
shanten_calc = Shanten()
hand = list(hand)
unaccounted = list(unaccounted_tiles)
tiles_left = sum(unaccounted_tiles)
unaccounted_nonzero = np.nonzero(unaccounted)
#14 in dead wall 13*3= 39 in other hand -> total 53
shanten_sum = 0
for i in range(tiles_left - 119):
shanten = shanten_calc.calculate_shanten(hand, hand_open)
if shanten <= 0:#if tenpai
break
shanten_sum += shanten
hand_nonzero = np.nonzero(hand)[0] #discard something random
discard = random.choice(hand_nonzero)
hand[discard] -= 1
unaccounted_nonzero = np.nonzero(unaccounted)[0] #get a random card
draw_tile = random.choice(unaccounted_nonzero)
unaccounted[draw_tile] -= 1
hand[draw_tile] +=1
return shanten_sum
def simulate_naive3(hand, hand_open, unaccounted_tiles):
"""
#naive simulation
hand, hand_open -- hand in 34 format
unaccounted_tiles -- all the unused tiles in 34 format
turn -- a number from 0-3 (0 is the player)
"""
shanten_calc = Shanten()
hand = list(hand)
unaccounted = list(unaccounted_tiles)
tiles_left = sum(unaccounted_tiles)
unaccounted_nonzero = np.nonzero(unaccounted)
#14 in dead wall 13*3= 39 in other hand -> total 53
shanten_sum = 0.
for i in range(tiles_left - 119):
shanten = shanten_calc.calculate_shanten(hand, hand_open)
if shanten <= 0:#if tenpai
break
shanten_sum += shanten**2
hand_nonzero = np.nonzero(hand)[0] #discard something random
discard = random.choice(hand_nonzero)
hand[discard] -= 1
unaccounted_nonzero = np.nonzero(unaccounted)[0] #get a random card
draw_tile = random.choice(unaccounted_nonzero)
unaccounted[draw_tile] -= 1
hand[draw_tile] +=1
return shanten_sum
def simulate_weighted(hand, hand_open, unaccounted_tiles):
"""
Does a weighted simulation
hand, hand_open -- hand in 34 format
unaccounted_tiles -- all the unused tiles in 34 format
turn -- a number from 0-3 (0 is the player)
"""
shanten = Shanten()
hand = list(hand)
unaccounted = copy.deepcopy(unaccounted_tiles)
tiles_left = sum(unaccounted_tiles)
#14 in dead wall 13*3= 39 in other hand -> total 53
for i in range(tiles_left):
if shanten.calculate_shanten(hand, hand_open) <= 0: #if tenpai
return True
hand_nonzero = np.nonzero(hand)[0]
nonzero_inverted = [4-hand[i] for i in hand_nonzero]
weight = np.array(nonzero_inverted)/sum(nonzero_inverted)
discard = np.random.choice(hand_nonzero, 1, p = weight, replace=False)[0]
hand[discard] -= 1
#print(weight, nonzero_inverted)
unaccounted_nonzero = np.nonzero(unaccounted)[0] #get a random card
draw_tile = random.choice(unaccounted_nonzero)
unaccounted[draw_tile] -= 1
hand[draw_tile] +=1
return False
def simulate_weighted2(hand, hand_open, unaccounted_tiles):
"""
Does a weighted simulation (Incomplete)
hand, hand_open -- hand in 34 format
unaccounted_tiles -- all the unused tiles in 34 format
turn -- a number from 0-3 (0 is the player)
"""
shanten = Shanten()
hand = copy.deepcopy(hand)
unaccounted = copy.deepcopy(unaccounted_tiles)
tiles_left = sum(unaccounted_tiles)
#14 in dead wall 13*3= 39 in other hand -> total 53
for i in range(tiles_left - 53):
if shanten.calculate_shanten(hand, None) <= 0:
return True
weight = [0]*34
for i, count in enumerate(hand):
if count >=3:
weight[i] = 0
else:
weight[i] = 4 - count
weight = np.array(weight)/sum(weight)
discard = np.random.choice(range(34), 1, p = weight, replace=False)[0]
hand[discard] -= 1
#print(weight, nonzero_inverted)
unaccounted_nonzero = np.nonzero(unaccounted)[0] #get a random card
draw_tile = random.choice(unaccounted_nonzero)
unaccounted[draw_tile] -= 1
hand[draw_tile] +=1
return False
def simulate_weighted3(hand, hand_open, unaccounted_tiles):
"""
Does a weighted simulation (Incomplete)
hand, hand_open -- hand in 34 format
unaccounted_tiles -- all the unused tiles in 34 format
turn -- a number from 0-3 (0 is the player)
"""
shanten_calc = Shanten()
hand = list(hand)
unaccounted = list(unaccounted_tiles)
tiles_left = sum(unaccounted_tiles)
unaccounted_nonzero = np.nonzero(unaccounted)
#14 in dead wall 13*3= 39 in other hand -> total 53
shanten_sum = 0
for i in range(tiles_left-119):
shanten = shanten_calc.calculate_shanten(hand, hand_open)
if shanten <= 0:#if tenpai
break
shanten_sum += shanten
hand_nonzero = np.nonzero(hand)[0]
nonzero_inverted = [4-hand[i] for i in hand_nonzero]
weight = np.array(nonzero_inverted)/sum(nonzero_inverted)
discard = np.random.choice(hand_nonzero, 1, p = weight, replace=False)[0]
hand[discard] -= 1
#print(weight, nonzero_inverted)
unaccounted_nonzero = | np.nonzero(unaccounted) | numpy.nonzero |
import random
from copy import deepcopy
import mazebase
# These weird import statements are taken from https://github.com/facebook/MazeBase/blob/23454fe092ecf35a8aab4da4972f231c6458209b/py/example.py#L12
import mazebase.games as mazebase_games
import numpy as np
from mazebase.games import curriculum
from mazebase.games import featurizers
from environment.env import Environment
from environment.observation import Observation
from utils.constant import *
class MazebaseWrapper(Environment):
"""
Wrapper class over maze base environment
"""
def __init__(self):
super(MazebaseWrapper, self).__init__()
self.name = MAZEBASE
try:
# Reference: https://github.com/facebook/MazeBase/blob/3e505455cae6e4ec442541363ef701f084aa1a3b/py/mazebase/games/mazegame.py#L454
small_size = (10, 10, 10, 10)
lk = curriculum.CurriculumWrappedGame(
mazebase_games.LightKey,
curriculums={
'map_size': mazebase_games.curriculum.MapSizeCurriculum(
small_size,
small_size,
(10, 10, 10, 10)
)
}
)
game = mazebase_games.MazeGame(
games=[lk],
featurizer=mazebase_games.featurizers.GridFeaturizer()
)
except mazebase.utils.mazeutils.MazeException as e:
print(e)
self.game = game
self.actions = self.game.all_possible_actions()
def observe(self):
game_observation = self.game.observe()
# Logic borrowed from:
# https://github.com/facebook/MazeBase/blob/23454fe092ecf35a8aab4da4972f231c6458209b/py/example.py#L192
obs, info = game_observation[OBSERVATION]
featurizers.grid_one_hot(self.game, obs)
obs = | np.array(obs) | numpy.array |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torch.optim.lr_scheduler import MultiStepLR
import shutil
import random
from numpy import linalg as LA
from scipy.stats import mode
import collections
import tqdm
import os
from architecture import *
def batch_shape(model_name, x):
"""
Return a batch x with a relevant shape for a given model. For example,
# MLP
x = torch.ones(5, 360) # batch, features
# Conv1d
x = torch.ones(5, 1, 360) # batch, channel, features
# GNN
x = torch.ones(5, 360) # batch, features
graph = torch.ones(360, 360)
# Resnet3d
x = torch.ones(5, 1, 100, 100, 100) # batch, channel, dim1, dim2, dim3
"""
if model_name == 'MLP' or model_name == 'GNN' or model_name == 'LR':
return torch.squeeze(x, dim=1)
elif model_name == 'Resnet3d':
return torch.unsqueeze(x, dim=1)
else:
return x
def save_checkpoint(state, is_best, folder, filename='checkpoint.pth.tar'):
if not os.path.exists(folder):
os.makedirs(folder)
torch.save(state, folder + filename)
if is_best:
shutil.copyfile(folder + filename, folder + '/model_best.pth.tar')
def load_checkpoint(model, save_path, type='best'):
if type == 'best':
checkpoint = torch.load('{}/model_best.pth.tar'.format(save_path))
elif type == 'last':
checkpoint = torch.load('{}/checkpoint.pth.tar'.format(save_path))
else:
assert False, 'type should be in [best, or last], but got {}'.format(type)
model.load_state_dict(checkpoint['state_dict'])
def compute_accuracy(outputs, y):
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == y).sum().item()
return correct
# Train a model for 1 epoch and return its loss.
def train(model, criterion, optimizer, data_loader, use_cuda):
""" Train a neural network for one epoch.
"""
model.train()
epoch_loss = 0.
epoch_acc = 0.
epoch_total = 0.
for i, (x, y) in enumerate(data_loader):
if use_cuda:
x = x.cuda()
y = y.cuda()
# Adapt the batch shape to the model.
x = batch_shape(model.name, x)
# Zero the parameter gradients.
optimizer.zero_grad()
# Forward + backward + optimize.
outputs = model(x)
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
acc = compute_accuracy(outputs.clone().detach(), y)
# Statistics.
epoch_loss += loss.item()
epoch_acc += acc
epoch_total += y.size(0)
return epoch_loss / (i+1), epoch_acc / epoch_total
# Evaluate a model on the validation / test set.
def episodic_evaluation(model, data_loader, sampler_infos, use_cuda):
"""
Return the average accuracy on few-shot tasks (called episodes).
A task contains training samples with known labels and query
samples. The accuracy is the number of times we correctly
predict the labels of the query samples.
To attribute a label to a new sample, we consider the outputs of the
penultimate layer of model. A label is represented by the average
outputs of its training samples. A new sample is labeled
in function of the closest label-representative.
"""
model.eval()
n_way = sampler_infos[1]
n_shot = sampler_infos[2]
epoch_acc = 0.
total = 0.
with torch.no_grad():
# Iterate over several episodes.
for i, (x, y) in enumerate(tqdm.tqdm(data_loader)):
# print(i, end='\r')
if use_cuda:
x = x.cuda()
y = y.cuda()
# Adapt the batch shape to the model.
x = batch_shape(model.name, x)
# Retrieve the outputs of the penultimate layer of model of all
# samples.
outputs = model(x, remove_last_layer=True)
# print('outputs shape', outputs.shape)
training = outputs[:n_way*n_shot]
query = outputs[n_way*n_shot:]
train_labels = y[:n_way*n_shot]
query_labels = y[n_way*n_shot:]
# Compute the vector representative of each class.
training = training.reshape(n_way, n_shot, -1).mean(1)
train_labels = train_labels[::n_shot]
# Find the labels of the query samples.
scores = cosine_score(training, query)
pred_labels = torch.argmin(scores, dim=1)
pred_labels = torch.take(train_labels, pred_labels)
# Compute the accuracy.
acc = (query_labels == pred_labels).float().sum()
epoch_acc += acc
total += query_labels.size(0)
del training, query
return epoch_acc / total
# Compute similarities between two sets of vectors.
def cosine_score(X, Y):
"""
Return a score between 0 and 1 (0 for very similar, 1 for not similar at all)
between all vectors in X and all vectors in Y. As the score is based on the
cosine similarity, all vectors are expected to have positive values only.
Parameters:
X -- set of vectors (number of vectors, vector size).
Y -- set of vectors (number of vectors, vector size).
"""
scores = 1. - F.cosine_similarity(Y[:, None, :], X[None, :, :], dim=2)
return scores
def sample_case(data, n_shot, n_way, n_query):
"""
Return the training and test data of a few-shot task.
Parameters:
data -- dict whose keys are labels and values are the samples associated with.
n_way -- int, number of classes
n_shot -- int, number of training examples per class.
n_query -- int, number of test examples per class.
"""
# Randomly sample n_way classes.
classes = random.sample(list(data.keys()), n_way)
train_data = []
test_data = []
test_labels = []
train_labels = []
# For each class, randomly select training and test examples.
for label in classes:
samples = random.sample(data[label], n_shot + n_query)
train_labels += [label] * n_shot
test_labels += [label] * n_query
train_data += samples[:n_shot]
test_data += samples[n_shot:]
train_data = np.array(train_data).astype(np.float32)
test_data = | np.array(test_data) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 2020
Class to read and manipulate CryoSat-2 waveform data
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
http://www.numpy.org
http://www.scipy.org/NumPy_for_Matlab_Users
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 08/2020: flake8 compatible binary regular expression strings
Forked 02/2020 from read_cryosat_L1b.py
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import numpy as np
import pointCollection as pc
import netCDF4
import re
import os
class data(pc.data):
np.seterr(invalid='ignore')
def __default_field_dict__(self):
"""
Define the default fields that get read from the CryoSat-2 file
"""
field_dict = {}
field_dict['Location'] = ['days_J2k','Day','Second','Micsec','USO_Corr',
'Mode_ID','SSC','Inst_config','Rec_Count','Lat','Lon','Alt','Alt_rate',
'Sat_velocity','Real_beam','Baseline','ST_ID','Roll','Pitch','Yaw','MCD']
field_dict['Data'] = ['TD', 'H_0','COR2','LAI','FAI','AGC_CH1','AGC_CH2',
'TR_gain_CH1','TR_gain_CH2','TX_Power','Doppler_range','TR_inst_range',
'R_inst_range','TR_inst_gain','R_inst_gain','Internal_phase',
'External_phase','Noise_power','Phase_slope']
field_dict['Geometry'] = ['dryTrop','wetTrop','InvBar','DAC','Iono_GIM',
'Iono_model','ocTideElv','lpeTideElv','olTideElv','seTideElv','gpTideElv',
'Surf_type','Corr_status','Corr_error']
field_dict['Waveform_20Hz'] = ['Waveform','Linear_Wfm_Multiplier',
'Power2_Wfm_Multiplier','N_avg_echoes']
field_dict['METADATA'] = ['MPH','SPH']
return field_dict
def from_dbl(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from binary formats
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
# CryoSat-2 Mode record sizes
i_size_timestamp = 12
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 125
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# check baseline from file to set i_record_size and allocation function
if (BASELINE == 'C'):
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2 + 6*4 + 3*3*4 + 3*2 + 4*4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_BC_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_BC_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_BC_RW*2 + \
n_SARIN_BC_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baseline C
read_cryosat_variables = self.cryosat_baseline_C
else:
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2+ 6*4 + 3*3*4 + 4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_RW*2 + \
n_SARIN_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baselines A and B
read_cryosat_variables = self.cryosat_baseline_AB
# get dataset MODE from PRODUCT portion of file name
# set record sizes and DS_TYPE for read_DSD function
self.MODE = re.findall('(LRM|SAR|SIN)', PRODUCT).pop()
if (self.MODE == 'LRM'):
i_record_size = i_record_size_LRM_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SAR'):
i_record_size = i_record_size_SAR_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SIN'):
i_record_size = i_record_size_SARIN_L1b
DS_TYPE = 'CS_L1B'
# read the input file to get file information
fid = os.open(os.path.expanduser(full_filename),os.O_RDONLY)
file_info = os.fstat(fid)
os.close(fid)
# num DSRs from SPH
j_num_DSR = np.int32(file_info.st_size//i_record_size)
# print file information
if verbose:
print(full_filename)
print('{0:d} {1:d} {2:d}'.format(j_num_DSR,file_info.st_size,i_record_size))
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size == file_info.st_size):
print('No Header on file')
print('The number of DSRs is: {0:d}'.format(j_num_DSR))
else:
print('Header on file')
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size != file_info.st_size):
# If there are MPH/SPH/DSD headers
s_MPH_fields = self.read_MPH(full_filename)
j_sph_size = np.int32(re.findall(r'[-+]?\d+',s_MPH_fields['SPH_SIZE']).pop())
s_SPH_fields = self.read_SPH(full_filename, j_sph_size)
# extract information from DSD fields
s_DSD_fields = self.read_DSD(full_filename, DS_TYPE=DS_TYPE)
# extract DS_OFFSET
j_DS_start = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DS_OFFSET']).pop())
# extract number of DSR in the file
j_num_DSR = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['NUM_DSR']).pop())
# check the record size
j_DSR_size = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DSR_SIZE']).pop())
# minimum size is start of the read plus number of records to read
j_check_size = j_DS_start + (j_DSR_size*j_num_DSR)
if verbose:
print('The offset of the DSD is: {0:d} bytes'.format(j_DS_start))
print('The number of DSRs is {0:d}'.format(j_num_DSR))
print('The size of the DSR is {0:d}'.format(j_DSR_size))
# check if invalid file size
if (j_check_size > file_info.st_size):
raise IOError('File size error')
# extract binary data from input CryoSat data file (skip headers)
fid = open(os.path.expanduser(full_filename), 'rb')
cryosat_header = fid.read(j_DS_start)
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# add headers to output dictionary as METADATA
CS_L1b_mds['METADATA'] = {}
CS_L1b_mds['METADATA']['MPH'] = s_MPH_fields
CS_L1b_mds['METADATA']['SPH'] = s_SPH_fields
CS_L1b_mds['METADATA']['DSD'] = s_DSD_fields
# close the input CryoSat binary file
fid.close()
else:
# If there are not MPH/SPH/DSD headers
# extract binary data from input CryoSat data file
fid = open(os.path.expanduser(full_filename), 'rb')
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# close the input CryoSat binary file
fid.close()
# if unpacking the units
if unpack:
CS_l1b_scale = self.cryosat_scaling_factors()
# for each dictionary key
for group in CS_l1b_scale.keys():
# for each variable
for key,val in CS_L1b_mds[group].items():
# check if val is the 20Hz waveform beam variables
if isinstance(val, dict):
# for each waveform beam variable
for k,v in val.items():
# scale variable
CS_L1b_mds[group][key][k] = CS_l1b_scale[group][key][k]*v.copy()
else:
# scale variable
CS_L1b_mds[group][key] = CS_l1b_scale[group][key]*val.copy()
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def from_nc(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from netCDF4 format data
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
print(full_filename) if verbose else None
# get dataset MODE from PRODUCT portion of file name
self.MODE = re.findall(r'(LRM|FDM|SAR|SIN)', PRODUCT).pop()
# read level-2 CryoSat-2 data from netCDF4 file
CS_L1b_mds = self.cryosat_baseline_D(full_filename, unpack=unpack)
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def calc_GPS_time(self, day, second, micsec):
"""
Calculate the GPS time (seconds since Jan 6, 1980 00:00:00)
"""
# TAI time is ahead of GPS by 19 seconds
return (day + 7300.0)*86400.0 + second.astype('f') + micsec/1e6 - 19
def count_leap_seconds(self, GPS_Time):
"""
Count number of leap seconds that have passed for given GPS times
"""
# GPS times for leap seconds
leaps = [46828800, 78364801, 109900802, 173059203, 252028804, 315187205,
346723206, 393984007, 425520008, 457056009, 504489610, 551750411,
599184012, 820108813, 914803214, 1025136015, 1119744016, 1167264017]
# number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= leap)
if (count > 0):
i_records,i_blocks = np.nonzero(GPS_Time >= leap)
n_leaps[i_records,i_blocks] += 1.0
return n_leaps
def read_MPH(self, full_filename):
"""
Read ASCII Main Product Header (MPH) block from an ESA PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# check that first line of header matches PRODUCT
if not bool(re.match(br'PRODUCT\=\"(.*)(?=\")',file_contents[0])):
raise IOError('File does not start with a valid PDS MPH')
# read MPH header text
s_MPH_fields = {}
for i in range(n_MPH_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_MPH_fields
def read_SPH(self, full_filename, j_sph_size):
"""
Read ASCII Specific Product Header (SPH) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# compile regular expression operator for reading headers
rx = re.compile(br'(.*?)\=\"?(.*)',re.VERBOSE)
# check first line of header matches SPH_DESCRIPTOR
if not bool(re.match(br'SPH\_DESCRIPTOR\=',file_contents[n_MPH_lines+1])):
raise IOError('File does not have a valid PDS DSD')
# read SPH header text (no binary control characters)
s_SPH_lines = [li for li in file_contents[n_MPH_lines+1:] if rx.match(li)
and not re.search(br'[^\x20-\x7e]+',li)]
# extract SPH header text
s_SPH_fields = {}
c = 0
while (c < len(s_SPH_lines)):
# check if line is within DS_NAME portion of SPH header
if bool(re.match(br'DS_NAME',s_SPH_lines[c])):
# add dictionary for DS_NAME
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
key = value.decode('utf-8').rstrip()
s_SPH_fields[key] = {}
for line in s_SPH_lines[c+1:c+7]:
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',line)):
# data fields within quotes
dsfield,dsvalue=re.findall(br'(.*?)\=\"(.*)(?=\")',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',line)):
# data fields without quotes
dsfield,dsvalue=re.findall(br'(.*?)\=(.*)',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
# add 6 to counter to go to next entry
c += 6
# use regular expression operators to read headers
elif bool(re.match(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',s_SPH_lines[c])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# add 1 to counter to go to next line
c += 1
# Return block name array to calling function
return s_SPH_fields
def read_DSD(self, full_filename, DS_TYPE=None):
"""
Read ASCII Data Set Descriptors (DSD) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# number of text lines in a DSD header
n_DSD_lines = 8
# Level-1b CryoSat DS_NAMES within files
regex_patterns = []
if (DS_TYPE == 'CS_L1B'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_LRM[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SAR[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SARIN[\s+]*"')
elif (DS_TYPE == 'SIR_L1B_FDM'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_FDM[\s+]*"')
# find the DSD starting line within the SPH header
c = 0
Flag = False
while ((Flag is False) and (c < len(regex_patterns))):
# find indice within
indice = [i for i,line in enumerate(file_contents[n_MPH_lines+1:]) if
re.search(regex_patterns[c],line)]
if indice:
Flag = True
else:
c+=1
# check that valid indice was found within header
if not indice:
raise IOError('Can not find correct DSD field')
# extract s_DSD_fields info
DSD_START = n_MPH_lines + indice[0] + 1
s_DSD_fields = {}
for i in range(DSD_START,DSD_START+n_DSD_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_DSD_fields
def cryosat_baseline_AB(self, fid, n_records):
"""
Read L1b MDS variables for CryoSat Baselines A and B
"""
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# Bind all the variables of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
# CryoSat-2 Time and Orbit Group
CS_l1b_mds['Location'] = {}
# Time: day part
CS_l1b_mds['Location']['Day'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32,fill_value=0)
# Time: second part
CS_l1b_mds['Location']['Second'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Time: microsecond part
CS_l1b_mds['Location']['Micsec'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# USO correction factor
CS_l1b_mds['Location']['USO_Corr'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Mode ID
CS_l1b_mds['Location']['Mode_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Source sequence counter
CS_l1b_mds['Location']['SSC'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Instrument configuration
CS_l1b_mds['Location']['Inst_config'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Record Counter
CS_l1b_mds['Location']['Rec_Count'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Location']['Alt'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
CS_l1b_mds['Location']['Alt_rate'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
# ITRF= International Terrestrial Reference Frame
CS_l1b_mds['Location']['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
# CRF= CryoSat Reference Frame.
CS_l1b_mds['Location']['Real_beam'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
CS_l1b_mds['Location']['Baseline'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Measurement Confidence Data Flags
# Generally the MCD flags indicate problems when set
# If MCD is 0 then no problems or non-nominal conditions were detected
# Serious errors are indicated by setting bit 31
CS_l1b_mds['Location']['MCD'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
CS_l1b_mds['Data'] = {}
# Window Delay reference (two-way) corrected for instrument delays
CS_l1b_mds['Data']['TD'] = np.ma.zeros((n_records,n_blocks),dtype=np.int64)
# H0 Initial Height Word from telemetry
CS_l1b_mds['Data']['H_0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# COR2 Height Rate: on-board tracker height rate over the radar cycle
CS_l1b_mds['Data']['COR2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Coarse Range Word (LAI) derived from telemetry
CS_l1b_mds['Data']['LAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Fine Range Word (FAI) derived from telemetry
CS_l1b_mds['Data']['FAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
# Gain calibration corrections are applied (Sum of AGC stages 1 and 2
# plus the corresponding corrections) (dB/100)
CS_l1b_mds['Data']['AGC_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
# Gain calibration corrections are applied (dB/100)
CS_l1b_mds['Data']['AGC_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Transmit Power in microWatts
CS_l1b_mds['Data']['TX_Power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Doppler range correction: Radial component (mm)
# computed for the component of satellite velocity in the nadir direction
CS_l1b_mds['Data']['Doppler_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: transmit-receive antenna (mm)
# Calibration correction to range on channel 1 computed from CAL1.
CS_l1b_mds['Data']['TR_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: receive-only antenna (mm)
# Calibration correction to range on channel 2 computed from CAL1.
CS_l1b_mds['Data']['R_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: transmit-receive antenna (dB/100)
# Calibration correction to gain on channel 1 computed from CAL1
CS_l1b_mds['Data']['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: receive-only (dB/100)
# Calibration correction to gain on channel 2 computed from CAL1
CS_l1b_mds['Data']['R_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Internal Phase Correction (microradians)
CS_l1b_mds['Data']['Internal_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# External Phase Correction (microradians)
CS_l1b_mds['Data']['External_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Noise Power measurement (dB/100): converted from telemetry units to be
# the noise floor of FBR measurement echoes.
# Set to -9999.99 when the telemetry contains zero.
CS_l1b_mds['Data']['Noise_power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Phase slope correction (microradians)
# Computed from the CAL-4 packets during the azimuth impulse response
# amplitude (SARIN only). Set from the latest available CAL-4 packet.
CS_l1b_mds['Data']['Phase_slope'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
CS_l1b_mds['Data']['Spares1'] = np.ma.zeros((n_records,n_blocks,4),dtype=np.int8)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry'] = {}
# Dry Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['dryTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Wet Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['wetTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['InvBar'] = np.ma.zeros((n_records),dtype=np.int32)
# Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['DAC'] = np.ma.zeros((n_records),dtype=np.int32)
# GIM Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_GIM'] = np.ma.zeros((n_records),dtype=np.int32)
# Model Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_model'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['ocTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['lpeTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean loading tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['olTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Solid Earth tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['seTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Geocentric Polar tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['gpTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Surface Type: enumerated key to classify surface at nadir
# 0 = Open Ocean
# 1 = Closed Sea
# 2 = Continental Ice
# 3 = Land
CS_l1b_mds['Geometry']['Surf_type'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare1'] = np.ma.zeros((n_records,4),dtype=np.int8)
# Corrections Status Flag
CS_l1b_mds['Geometry']['Corr_status'] = np.ma.zeros((n_records),dtype=np.uint32)
# Correction Error Flag
CS_l1b_mds['Geometry']['Corr_error'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare2'] = np.ma.zeros((n_records,4),dtype=np.int8)
# CryoSat-2 Average Waveforms Groups
CS_l1b_mds['Waveform_1Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SIN'):
# SARIN Mode
# Same as the LRM/SAR groups but the waveform array is 512 bins instead of
# 128 and the number of echoes averaged is different.
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
# CryoSat-2 Waveforms Groups
# Beam Behavior Parameters
Beam_Behavior = {}
# Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# 3rd moment: providing the degree of asymmetry of the range integrated
# stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
# CryoSat-2 mode specific waveforms
CS_l1b_mds['Waveform_20Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
elif (self.MODE == 'SIN'):
# SARIN Mode
# Averaged Power Echo Waveform [512]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
# Coherence [512]: packed units (1/1000)
CS_l1b_mds['Waveform_20Hz']['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
# Phase Difference [512]: packed units (microradians)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
# for each record in the CryoSat file
for r in range(n_records):
# CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
CS_l1b_mds['Location']['Day'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
for b in range(n_blocks):
CS_l1b_mds['Data']['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Data']['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry']['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
CS_l1b_mds['Geometry']['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 Average Waveforms Groups
if (self.MODE == 'LRM'):
# Low-Resolution Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SAR'):
# SAR Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SIN'):
# SARIN Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
# CryoSat-2 Waveforms Groups
if (self.MODE == 'LRM'):
# Low-Resolution Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SAR'):
# SAR Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
elif (self.MODE == 'SIN'):
# SARIN Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
CS_l1b_mds['Waveform_20Hz']['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_RW)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_RW)
# set the mask from day variables
mask_20Hz = CS_l1b_mds['Location']['Day'].data == CS_l1b_mds['Location']['Day'].fill_value
Location_keys = [key for key in CS_l1b_mds['Location'].keys() if not re.search(r'Spare',key)]
Data_keys = [key for key in CS_l1b_mds['Data'].keys() if not re.search(r'Spare',key)]
Geometry_keys = [key for key in CS_l1b_mds['Geometry'].keys() if not re.search(r'Spare',key)]
Wfm_1Hz_keys = [key for key in CS_l1b_mds['Waveform_1Hz'].keys() if not re.search(r'Spare',key)]
Wfm_20Hz_keys = [key for key in CS_l1b_mds['Waveform_20Hz'].keys() if not re.search(r'Spare',key)]
for key in Location_keys:
CS_l1b_mds['Location'][key].mask = mask_20Hz.copy()
for key in Data_keys:
CS_l1b_mds['Data'][key].mask = mask_20Hz.copy()
# return the output dictionary
return CS_l1b_mds
def cryosat_baseline_C(self, fid, n_records):
"""
Read L1b MDS variables for CryoSat Baseline C
"""
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# Bind all the variables of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
# CryoSat-2 Time and Orbit Group
CS_l1b_mds['Location'] = {}
# Time: day part
CS_l1b_mds['Location']['Day'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32,fill_value=0)
# Time: second part
CS_l1b_mds['Location']['Second'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Time: microsecond part
CS_l1b_mds['Location']['Micsec'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# USO correction factor
CS_l1b_mds['Location']['USO_Corr'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Mode ID
CS_l1b_mds['Location']['Mode_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Source sequence counter
CS_l1b_mds['Location']['SSC'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Instrument configuration
CS_l1b_mds['Location']['Inst_config'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Record Counter
CS_l1b_mds['Location']['Rec_Count'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Location']['Alt'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
CS_l1b_mds['Location']['Alt_rate'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
# ITRF= International Terrestrial Reference Frame
CS_l1b_mds['Location']['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
# CRF= CryoSat Reference Frame.
CS_l1b_mds['Location']['Real_beam'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
CS_l1b_mds['Location']['Baseline'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Star Tracker ID
CS_l1b_mds['Location']['ST_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
# Antenna Bench Roll Angle (Derived from star trackers)
# packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Roll'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Antenna Bench Pitch Angle (Derived from star trackers)
# packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Pitch'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Antenna Bench Yaw Angle (Derived from star trackers)
# packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Yaw'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Measurement Confidence Data Flags
# Generally the MCD flags indicate problems when set
# If MCD is 0 then no problems or non-nominal conditions were detected
# Serious errors are indicated by setting bit 31
CS_l1b_mds['Location']['MCD'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
CS_l1b_mds['Location']['Spares'] = np.ma.zeros((n_records,n_blocks,2),dtype=np.int16)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
CS_l1b_mds['Data'] = {}
# Window Delay reference (two-way) corrected for instrument delays
CS_l1b_mds['Data']['TD'] = np.ma.zeros((n_records,n_blocks),dtype=np.int64)
# H0 Initial Height Word from telemetry
CS_l1b_mds['Data']['H_0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# COR2 Height Rate: on-board tracker height rate over the radar cycle
CS_l1b_mds['Data']['COR2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Coarse Range Word (LAI) derived from telemetry
CS_l1b_mds['Data']['LAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Fine Range Word (FAI) derived from telemetry
CS_l1b_mds['Data']['FAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
# Gain calibration corrections are applied (Sum of AGC stages 1 and 2
# plus the corresponding corrections) (dB/100)
CS_l1b_mds['Data']['AGC_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
# Gain calibration corrections are applied (dB/100)
CS_l1b_mds['Data']['AGC_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Transmit Power in microWatts
CS_l1b_mds['Data']['TX_Power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Doppler range correction: Radial component (mm)
# computed for the component of satellite velocity in the nadir direction
CS_l1b_mds['Data']['Doppler_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: transmit-receive antenna (mm)
# Calibration correction to range on channel 1 computed from CAL1.
CS_l1b_mds['Data']['TR_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: receive-only antenna (mm)
# Calibration correction to range on channel 2 computed from CAL1.
CS_l1b_mds['Data']['R_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: transmit-receive antenna (dB/100)
# Calibration correction to gain on channel 1 computed from CAL1
CS_l1b_mds['Data']['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: receive-only (dB/100)
# Calibration correction to gain on channel 2 computed from CAL1
CS_l1b_mds['Data']['R_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Internal Phase Correction (microradians)
CS_l1b_mds['Data']['Internal_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# External Phase Correction (microradians)
CS_l1b_mds['Data']['External_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Noise Power measurement (dB/100)
CS_l1b_mds['Data']['Noise_power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Phase slope correction (microradians)
# Computed from the CAL-4 packets during the azimuth impulse response
# amplitude (SARIN only). Set from the latest available CAL-4 packet.
CS_l1b_mds['Data']['Phase_slope'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
CS_l1b_mds['Data']['Spares1'] = np.ma.zeros((n_records,n_blocks,4),dtype=np.int8)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry'] = {}
# Dry Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['dryTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Wet Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['wetTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['InvBar'] = np.ma.zeros((n_records),dtype=np.int32)
# Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['DAC'] = np.ma.zeros((n_records),dtype=np.int32)
# GIM Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_GIM'] = np.ma.zeros((n_records),dtype=np.int32)
# Model Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_model'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['ocTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['lpeTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean loading tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['olTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Solid Earth tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['seTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Geocentric Polar tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['gpTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Surface Type: enumerated key to classify surface at nadir
# 0 = Open Ocean
# 1 = Closed Sea
# 2 = Continental Ice
# 3 = Land
CS_l1b_mds['Geometry']['Surf_type'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare1'] = np.ma.zeros((n_records,4),dtype=np.int8)
# Corrections Status Flag
CS_l1b_mds['Geometry']['Corr_status'] = np.ma.zeros((n_records),dtype=np.uint32)
# Correction Error Flag
CS_l1b_mds['Geometry']['Corr_error'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare2'] = np.ma.zeros((n_records,4),dtype=np.int8)
# CryoSat-2 Average Waveforms Groups
CS_l1b_mds['Waveform_1Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SIN'):
# SARIN Mode
# Same as the LRM/SAR groups but the waveform array is 512 bins instead of
# 128 and the number of echoes averaged is different.
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
# CryoSat-2 Waveforms Groups
# Beam Behavior Parameters
Beam_Behavior = {}
# Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# 3rd moment: providing the degree of asymmetry of the range integrated
# stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# Standard deviation as a function of boresight angle (microradians)
Beam_Behavior['SD_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center angle as a function of boresight angle (microradians)
Beam_Behavior['Center_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-7),dtype=np.int16)
# CryoSat-2 mode specific waveform variables
CS_l1b_mds['Waveform_20Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Averaged Power Echo Waveform [256]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_BC_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
elif (self.MODE == 'SIN'):
# SARIN Mode
# Averaged Power Echo Waveform [1024]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
# Coherence [1024]: packed units (1/1000)
CS_l1b_mds['Waveform_20Hz']['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int16)
# Phase Difference [1024]: packed units (microradians)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int32)
# for each record in the CryoSat file
for r in range(n_records):
# CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
CS_l1b_mds['Location']['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['ST_ID'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Location']['Roll'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Pitch'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Yaw'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Spares'][r,b,:] = np.fromfile(fid,dtype='>i2',count=2)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
for b in range(n_blocks):
CS_l1b_mds['Data']['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Data']['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry']['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
CS_l1b_mds['Geometry']['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 Average Waveforms Groups
if (self.MODE == 'LRM'):
# Low-Resolution Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SAR'):
# SAR Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SIN'):
# SARIN Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
# CryoSat-2 Waveforms Groups
if (self.MODE == 'LRM'):
# Low-Resolution Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SAR'):
# SAR Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_BC_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
elif (self.MODE == 'SIN'):
# SARIN Mode
for b in range(n_blocks):
CS_l1b_mds['Waveform_20Hz']['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_BC_RW)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
CS_l1b_mds['Waveform_20Hz']['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
CS_l1b_mds['Waveform_20Hz']['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_BC_RW)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_BC_RW)
# set the mask from day variables
mask_20Hz = CS_l1b_mds['Location']['Day'].data == CS_l1b_mds['Location']['Day'].fill_value
Location_keys = [key for key in CS_l1b_mds['Location'].keys() if not re.search(r'Spare',key)]
Data_keys = [key for key in CS_l1b_mds['Data'].keys() if not re.search(r'Spare',key)]
Geometry_keys = [key for key in CS_l1b_mds['Geometry'].keys() if not re.search(r'Spare',key)]
Wfm_1Hz_keys = [key for key in CS_l1b_mds['Waveform_1Hz'].keys() if not re.search(r'Spare',key)]
Wfm_20Hz_keys = [key for key in CS_l1b_mds['Waveform_20Hz'].keys() if not re.search(r'Spare',key)]
for key in Location_keys:
CS_l1b_mds['Location'][key].mask = mask_20Hz.copy()
for key in Data_keys:
CS_l1b_mds['Data'][key].mask = mask_20Hz.copy()
# return the output dictionary
return CS_l1b_mds
def cryosat_baseline_D(self, full_filename, unpack=False):
"""
Read L1b MDS variables for CryoSat Baseline D (netCDF4)
"""
# open netCDF4 file for reading
fid = netCDF4.Dataset(os.path.expanduser(full_filename),'r')
# use original unscaled units unless unpack=True
fid.set_auto_scale(unpack)
# get dimensions
ind_first_meas_20hz_01 = fid.variables['ind_first_meas_20hz_01'][:].copy()
ind_meas_1hz_20_ku = fid.variables['ind_meas_1hz_20_ku'][:].copy()
n_records = len(ind_first_meas_20hz_01)
n_SARIN_D_RW = 1024
n_SARIN_RW = 512
n_SAR_D_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
# Bind all the variables of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
# CryoSat-2 Time and Orbit Group
CS_l1b_mds['Location'] = {}
# MDS Time
CS_l1b_mds['Location']['Time'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Time'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
time_20_ku = fid.variables['time_20_ku'][:].copy()
# Time: day part
CS_l1b_mds['Location']['Day'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Day'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
# Time: second part
CS_l1b_mds['Location']['Second'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Second'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
# Time: microsecond part
CS_l1b_mds['Location']['Micsec'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Micsec'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
# USO correction factor
CS_l1b_mds['Location']['USO_Corr'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['USO_Corr'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
uso_cor_20_ku = fid.variables['uso_cor_20_ku'][:].copy()
# Mode ID
CS_l1b_mds['Location']['Mode_ID'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Mode_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_op_20_ku =fid.variables['flag_instr_mode_op_20_ku'][:].copy()
# Mode Flags
CS_l1b_mds['Location']['Mode_flags'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Mode_flags'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_flags_20_ku =fid.variables['flag_instr_mode_flags_20_ku'][:].copy()
# Platform attitude control mode
CS_l1b_mds['Location']['Att_control'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Att_control'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_att_ctrl_20_ku =fid.variables['flag_instr_mode_att_ctrl_20_ku'][:].copy()
# Instrument configuration
CS_l1b_mds['Location']['Inst_config'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Inst_config'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_flags_20_ku = fid.variables['flag_instr_conf_rx_flags_20_ku'][:].copy()
# acquisition band
CS_l1b_mds['Location']['Inst_band'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Inst_band'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_bwdt_20_ku = fid.variables['flag_instr_conf_rx_bwdt_20_ku'][:].copy()
# instrument channel
CS_l1b_mds['Location']['Inst_channel'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Inst_channel'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_in_use_20_ku = fid.variables['flag_instr_conf_rx_in_use_20_ku'][:].copy()
# tracking mode
CS_l1b_mds['Location']['Tracking_mode'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Tracking_mode'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_trk_mode_20_ku = fid.variables['flag_instr_conf_rx_trk_mode_20_ku'][:].copy()
# Source sequence counter
CS_l1b_mds['Location']['SSC'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['SSC'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
seq_count_20_ku = fid.variables['seq_count_20_ku'][:].copy()
# Record Counter
CS_l1b_mds['Location']['Rec_Count'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Rec_Count'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
rec_count_20_ku = fid.variables['rec_count_20_ku'][:].copy()
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lat'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Lat'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lat_20_ku = fid.variables['lat_20_ku'][:].copy()
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lon'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Lon'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lon_20_ku = fid.variables['lon_20_ku'][:].copy()
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Location']['Alt'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Alt'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
alt_20_ku = fid.variables['alt_20_ku'][:].copy()
# Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
CS_l1b_mds['Location']['Alt_rate'] = np.ma.zeros((n_records,n_blocks))
CS_l1b_mds['Location']['Alt_rate'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
orb_alt_rate_20_ku = fid.variables['orb_alt_rate_20_ku'][:].copy()
# Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
# ITRF= International Terrestrial Reference Frame
CS_l1b_mds['Location']['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3))
CS_l1b_mds['Location']['Sat_velocity'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
sat_vel_vec_20_ku = fid.variables['sat_vel_vec_20_ku'][:].copy()
# Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
# CRF= CryoSat Reference Frame.
CS_l1b_mds['Location']['Real_beam'] = np.ma.zeros((n_records,n_blocks,3))
CS_l1b_mds['Location']['Real_beam'].mask = | np.zeros((n_records,n_blocks),dtype=np.bool) | numpy.zeros |
'''
Test the helper functions
Author: <NAME> - <EMAIL>
2019
'''
import pytest
from numpy.random import randint, rand
import numpy as np
import scipy.io as sio
from helpers import *
@pytest.fixture(scope="module")
def X_lighthouse():
'''Return the lighthouse image X'''
return sio.loadmat('test_mat/lighthouse.mat')['X'].astype(float)
@pytest.fixture(scope="module")
def h_simple():
'''Return the simple 3-tap filter in Handout Section 6.1'''
return np.array([1, 2, 1]) / 4
@pytest.fixture(scope="module")
def matlab_output():
'''Return the expected outputs from MATLAB'''
return sio.loadmat('test_mat/matlabout.mat')
@pytest.fixture(scope="module")
def pot_ii_dat():
"""Return the expected outputs from MATLAB"""
return sio.loadmat('test_mat/pot_ii.mat')
@pytest.fixture(scope="module")
def dwt_idwt_dat():
"""Return the expected outputs from MATLAB"""
return sio.loadmat('test_mat/dwt_idwt_dat.mat')
def X_odd():
'''Return a random 3 x 3 matrix'''
return randint(0, 256, (3, 3))
def X_even():
'''Return a random 4 x 4 matrix'''
return randint(0, 256, (4, 4))
def h_odd():
'''Return a random filter of length 3'''
h = rand(3) - 0.5
return h / sum(h)
def h_even():
'''Return a random filter of length 4'''
h = rand(4) - 0.5
return h / sum(h)
@pytest.mark.parametrize("X, h, align", [
(X, h, align) for X in (X_odd(), X_even()) for h in (h_odd(), h_even()) for align in (True, False)
])
def test_rowdec_random(X, h, align):
'''Test if rowdec handles odd and even dimensions correctly and triggers no index out of range errors'''
rowdec(X, h, align_with_first=align)
@pytest.mark.parametrize("X, h, align", [
(X, h, align) for X in (X_odd(), X_even()) for h in (h_odd(), h_even()) for align in (True, False)
])
def test_rowint_random(X, h, align):
'''Test if rowint handles odd and even dimensions correctly and triggers no index out of range errors'''
rowint(X, h, align_with_first=align)
@pytest.mark.parametrize("X, h, align, expected", [
(np.array([[1, 2, 3, 4]]), np.array([1, 2, 1]) / 4,
True, np.array([[1.5, 3]])),
(np.array([[1, 2, 3, 4]]), np.array([1, 2, 1]) / 4,
False, np.array([[2., 3.5]])),
(np.array([[1, 2, 3, 4, 5, 6]]), np.array([2, 3]) / 5,
True, np.array([[1.6, 3.6, 5.6]])),
(np.array([[1, 2, 3, 4, 5, 6]]), np.array([2, 3]) / 5,
False, np.array([[2.6, 4.6]])),
])
def test_rowdec_small(X, h, align, expected):
'''Test for accurate answer for small test cases'''
assert np.allclose(rowdec(X, h, align_with_first=align), expected)
@pytest.mark.parametrize("X, h, align, expected", [
(np.array([[1, 2, 3]]), np.array([1, 2, 1]) / 4,
True, np.array([[0.5, 0.75, 1., 1.25, 1.5, 1.5]])),
(np.array([[1, 2, 3]]), np.array([1, 2, 1]) / 4,
False, np.array([[0.5, 0.5, 0.75, 1., 1.25, 1.5]])),
(np.array([[1, 2, 3]]), np.array([2, 3, 2, 3]) / 10,
True, np.array([[0.4, 0.9, 0.6, 1.5, 1., 1.8]])),
(np.array([[1, 2, 3]]), np.array([2, 3, 2, 3]) / 10,
False, np.array([[0.4, 0.9, 0.6, 1.5, 1., 1.8]])),
])
def test_rowint_small(X, h, align, expected):
'''Test for accurate answer for small test cases'''
assert np.allclose(rowint(X, h, align_with_first=align), expected)
def test_rowdec(X_lighthouse, h_simple, matlab_output):
'''Compare the output with Matlab using maximum absolute difference'''
assert np.max(abs(
rowdec(X_lighthouse, h_simple) - matlab_output['rowdecXh'])) == 0
def test_rowint(X_lighthouse, h_simple, matlab_output):
'''Compare the output with Matlab using maximum absolute difference'''
assert np.max(abs(
rowint(X_lighthouse, 2 * h_simple) - matlab_output['rowintX2h'])) == 0
@pytest.mark.parametrize("X, entropy", [
(np.array([[1, -2], [3, -4]]), 2), # log2(4)
(np.array([[-0.3, 1.51], [2.3, 0.49]]), 1), # [0, 2, 2, 0] -> log2(2)
(np.array([-128, -127.49, 127, 126.49]), 2) # log2(4)
])
def test_bpp(X, entropy):
'''Simple tests for bits per pixel'''
assert(bpp(X) == entropy)
@pytest.mark.parametrize("X, step, Xq", [
(np.array([[1.49, 1.51], [1.51, 1.49]]), 1, np.array([[1, 2], [2, 1]])),
(np.array([[1.49, 1.51], [1.51, 1.49]]), 2, np.array([[2, 2], [2, 2]]))
])
def test_quantise(X, step, Xq):
'''Simple quantise tests'''
assert np.array_equal(quantise(X, step), Xq)
@pytest.mark.parametrize("N, C", [
(1, np.array([[1]])),
(2, np.array([[1/(2 ** 0.5), 1/(2 ** 0.5)],
[np.cos(np.pi/4), np.cos(3 * np.pi/4)]]))
])
def test_dct_ii(N, C):
assert np.allclose(dct_ii(N), C)
def test_dct_ii_matlabout(matlab_output):
assert np.allclose(dct_ii(8), matlab_output['C8'])
@pytest.mark.parametrize("N, C", [
(1, np.array([[1.0]])),
(2, np.array([[np.cos(np.pi/8), np.cos(3 * np.pi/8)],
[np.cos(3 * np.pi/8), np.cos(9 * np.pi/8)]]))
])
def test_dct_iv(N, C):
assert np.allclose(dct_iv(N), C)
@pytest.mark.parametrize("X, C, Y", [
(np.ones((4, 4)), np.ones((2, 2)), np.array(
[[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])),
(np.arange(16).reshape((4, 4)), np.eye(2)[::-1], # [[0, 1], [1, 0]] swap every two rows
np.array([[4, 5, 6, 7], [0, 1, 2, 3], [12, 13, 14, 15], [8, 9, 10, 11]])),
# This should be the test for extend_X_colxfm
# (np.ones((3, 3)), np.ones((2, 2)), np.array(
# [[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]))
])
def test_colxfm(X, C, Y):
assert np.array_equal(Y, colxfm(X, C))
def test_colxfm_matlabout(matlab_output):
X, Y, Z, C8 = (matlab_output[key] for key in ('X', 'Y', 'Z', 'C8'))
assert np.allclose(Y, colxfm(colxfm(X, C8).T, C8).T)
assert np.allclose(Z, colxfm(colxfm(Y.T, C8.T).T, C8.T))
assert np.allclose(X, Z)
@pytest.mark.parametrize("Y_regrouped, Y, N", [
(np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]]), np.array(
[[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]), 2),
(np.array([[1, 1, 2, 2], [3, 3, 4, 4], [1, 1, 2, 2], [3, 3, 4, 4]]), np.array(
[[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]), [1, 2]),
(np.array([[1, 2, 1, 2], [1, 2, 1, 2], [3, 4, 3, 4], [3, 4, 3, 4]]), np.array(
[[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]), [2, 1]),
(np.array([
[0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11],
[24, 27, 30, 33, 25, 28, 31, 34, 26, 29, 32, 35],
[48, 51, 54, 57, 49, 52, 55, 58, 50, 53, 56, 59],
[72, 75, 78, 81, 73, 76, 79, 82, 74, 77, 80, 83],
[96, 99, 102, 105, 97, 100, 103, 106, 98, 101, 104, 107],
[120, 123, 126, 129, 121, 124, 127, 130, 122, 125, 128, 131],
[12, 15, 18, 21, 13, 16, 19, 22, 14, 17, 20, 23],
[36, 39, 42, 45, 37, 40, 43, 46, 38, 41, 44, 47],
[60, 63, 66, 69, 61, 64, 67, 70, 62, 65, 68, 71],
[84, 87, 90, 93, 85, 88, 91, 94, 86, 89, 92, 95],
[108, 111, 114, 117, 109, 112, 115, 118, 110, 113, 116, 119],
[132, 135, 138, 141, 133, 136, 139, 142, 134, 137, 140, 143]]),
| np.arange(144) | numpy.arange |
import pandas as pd
import os
import numpy as np
from tqdm import tqdm
import torch
import argparse
from rdkit import Chem
from bms.utils import get_file_path
from bms.dataset import BMSSumbissionDataset
from bms.transforms import get_val_transforms
from bms.model import EncoderCNN, DecoderWithAttention
from bms.model_config import model_config
from bms.utils import load_pretrain_model
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
tqdm.pandas()
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def make_inchi_from_smile(smile):
inchi = 'InChI=1S/'
try:
inchi = Chem.MolToInchi(Chem.MolFromSmiles(smile))
except:
pass
return inchi
def test_loop(data_loader, encoder, decoder, tokenizer, max_seq_length):
if decoder.training:
decoder.eval()
if encoder.training:
encoder.eval()
text_preds = []
tq = tqdm(data_loader, total=len(data_loader))
for images in tq:
images = images.to(DEVICE)
with torch.cuda.amp.autocast():
with torch.no_grad():
features = encoder(images)
predictions = decoder.predict(
features, max_seq_length, tokenizer.token2idx["<sos>"])
predicted_sequence = torch.argmax(predictions.detach().cpu(), -1).numpy()
text_preds.append(
tokenizer.predict_captions(predicted_sequence))
return | np.concatenate(text_preds) | numpy.concatenate |
# This file contains an attempt at actually putting the network trained in EncDec.py to practice
import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model, load_model
import pandas as pd
import pandas_ml as pdml
from matplotlib.widgets import Slider
def decode(onehot):
return np.argmax(onehot)
SNR = 6
M = 64
C = 1
L = 5
graph = False
confusion = False
graph_pretty = True
# Generate random signal of length 32 with 64 possible values
siglength = 100000
sig = np.random.randint(0, M, siglength)
data = np.array(sig)
data = keras.utils.to_categorical(data, num_classes=M)
data = data.astype(int)
data = np.reshape(data, (data.shape[0], 1, 1, data.shape[1]))
# Load model and compile encoder and decoder portions
model = load_model('Trained/inputs_'+str(M)+'_L_'+str(L)+'_snr_20.h5')
x = keras.layers.Input(shape=(1,1,M))
encoder = Model(x, model.layers[2](model.layers[1](x)))
decoder = model.layers[3]
encoder.save_weights('encoder_weights.h5')
decoder.save_weights('decoder_weights.h5')
encoder.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
decoder.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
encoder.load_weights('encoder_weights.h5')
decoder.load_weights('decoder_weights.h5')
# Pass input through network and decode output (implement a LUT)
predicted = encoder.predict(data)
noise = np.random.normal(0, np.sqrt(C/(10**(SNR/10))), predicted.size)
noisysig = np.reshape(noise, predicted.shape)+predicted
encoded = decoder.predict(noisysig)
sig_hat = np.zeros(siglength)
for i in range(encoded.shape[0]):
sig_hat[i] = decode(encoded[i])
# Check what kind of plot we want
if graph == True:
if graph_pretty == False:
SIG = sig
SIG_HAT = sig_hat
numpoints = siglength
else:
SIG = np.zeros(siglength*10-9)
SIG[:] = np.nan
for n in np.arange(0, siglength*10-9, 10):
SIG[n] = (sig[int(n/10)]-M/2) * 5 / (M/2)
SIG_HAT = np.zeros(siglength*10-9)
SIG_HAT[:] = np.nan
for n in np.arange(0, siglength*10-9, 10):
SIG_HAT[n] = (sig_hat[int(n/10)]-M/2) * 5 / (M/2)
numpoints = siglength*10-9
# Plot both signals
sigtoplot = pd.Series(SIG)
sigtoplot.set_axis(np.linspace(0.0, 9.9, num=numpoints, endpoint=True), inplace=True)
sigtoplot = sigtoplot.interpolate(method='cubic')
sigtoplot.plot(linewidth=3, color='red')
sigtoplot = pd.Series(SIG_HAT)
sigtoplot.set_axis(np.linspace(0.0, 9.9, num=numpoints, endpoint=True), inplace=True)
sigtoplot = sigtoplot.interpolate(method='cubic')
sigtoplot.plot(linestyle='--', color='black')
plt.title('Signal Comparison')
plt.ylabel('Signal Voltage')
plt.xlabel('Time (s)')
plt.legend(['Input', 'Output'], loc='upper left')
plt.show()
symbol_diff = 0
for n in | np.arange(sig_hat.size) | numpy.arange |
from math import sqrt
import networkx as nx
from sklearn.linear_model import TheilSenRegressor, LinearRegression, HuberRegressor
from copy import deepcopy
from collections import Counter
import numpy as np
def class_disbalance(cluster):
signal = []
for node in cluster:
signal.append(node['signal'])
return list(zip(*np.unique(signal, return_counts=True)))
def class_disbalance__(cluster):
signal = []
for node in cluster:
signal.append(node['signal'])
return np.unique(signal, return_counts=True)
def estimate_start_xyz(cluster, k=3, shift_x=0., shift_y=0., shift_z=-2000.):
xs = []
ys = []
zs = []
for i in range(len(cluster)):
xs.append(cluster[i]['features']['SX'])
ys.append(cluster[i]['features']['SY'])
zs.append(cluster[i]['features']['SZ'])
xs = np.array(xs)
ys = np.array(ys)
zs = np.array(zs)
argosorted_z = np.argsort(zs)
x = np.median(np.median(xs[argosorted_z][:k])) + shift_x
y = np.median(np.median(ys[argosorted_z][:k])) + shift_y
z = np.median( | np.median(zs[argosorted_z][:k]) | numpy.median |
# create maps
from sqlays import export_sql, import_sql
from iscays import isc_xlsx
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import maskoceans
# brew install geos
# pip3 install https://github.com/matplotlib/basemap/archive/master.zip
# for DIVA tools
# https://github.com/gher-ulg/DivaPythonTools
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import numpy as np
import sys, os, glob, re
from scipy.interpolate import griddata
import scipy.ndimage
import matplotlib.tri as tri
import math
from timeinfo import day_night
from datetime import datetime
def station_map (dict_cruise_pos, topo_ary, lat_min, lat_max, lon_min, lon_max, label_color):
'''
create maps showing the location of stations
the form of dictionary should be like below:
dict = {'cruise1': ((lat),(lon)), 'cruise2': ((lat),(lon))}
'''
#################################################################################
# 1. create map
fig, ax = plt.subplots(figsize=(10,10))
m = Basemap(projection='merc', lat_0 = (lat_min+lat_max)/2, lon_0 = (lon_min+lon_max)/2, resolution = 'h',
llcrnrlon = lon_min, llcrnrlat = lat_min, urcrnrlon = lon_max, urcrnrlat = lat_max, ax=ax)
m.drawcoastlines()
m.drawcountries()
m.etopo()
m.shadedrelief()
m.drawmapboundary()
m.fillcontinents(color='grey')
#################################################################################
# 2. draw lat/lon grid lines every 5 degrees. labels = [left, right, top, bottom]
m.drawmeridians(np.arange(lon_min, lon_max, math.ceil(abs((lon_max-lon_min)/3))), labels=[0,1,0,1], fontsize=10) # line for longitude
m.drawparallels(np.arange(lat_min, lat_max, math.ceil(abs((lat_max-lat_min)/3))), labels=[1,0,1,0], fontsize=10) # line for latitude
#################################################################################
# 3. draw the contour of bathymetry
x = topo_ary[:,0] # lat
y = topo_ary[:,1] # lon
z = topo_ary[:,2] # topo
lon, lat = np.meshgrid(np.linspace(np.min(y), np.max(y), 100), np.linspace(np.min(x), np.max(x),100))
topo = griddata((y, x), z, (lon, lat), method='cubic')
lon_m, lat_m = m(lon, lat)
mask_ocean = topo >= 0 # mask inland
topo_ocean = np.ma.masked_array(topo, mask=mask_ocean)
#topo_ocean = maskoceans(lon_m, lat_m, topo, inlands=False, grid=10)
m.contourf(lon_m, lat_m, topo_ocean, cmap = 'Blues_r')
m.contour(lon_m, lat_m, topo_ocean, colors = 'black', linewidths = 0.3)
#################################################################################
# 4. locate the station on the map
# get the data frame from SQL server and drop the duplication filtered by station name
color_list = label_color; c = 0
for cruise, pos in dict_cruise_pos.items():
lat_list = pos[0]
lon_list = pos[1]
lons_m, lats_m = m(lon_list,lat_list)
m.scatter(lons_m,lats_m, marker='o', s=15, label=cruise, color=color_list[c], edgecolors='black')
c += 1
ax.legend(loc='upper right')
################################################################################
return ax, m
def bathy_data (minlat, maxlat, minlon, maxlon):
'''
return an array : [[lat, lon, topo], [lat, lon, topo], ...]
data from : https://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html
'''
import io, csv, json
import urllib.request as urllib2
url = 'https://coastwatch.pfeg.noaa.gov/erddap/griddap/srtm30plus_LonPM180.json?z[(%s):100:(%s)][(%s):100:(%s)]'%(minlat, maxlat, minlon, maxlon)
response = urllib2.urlopen(url)
data = response.read()
data_dic = json.loads(data.decode('utf-8'))
topo = | np.asarray(data_dic['table']['rows']) | numpy.asarray |
import numpy as np
import matplotlib.pyplot as plt
import os
import pydicom as pyd
from glob import glob
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
import bisect
import random
import math
from Augmentor import *
def import_dicom_data(path):
data_path = path + '/images/'
annot_path = path + '/labels/'
data_list = glob(data_path + '*.dcm')
annot_list = glob(annot_path + '*.dcm')
N = len(data_list)
data = []
annot = []
annot_frames = np.zeros((N))
print('Data Image Resolutions')
for i in range(N):
x = pyd.read_file(data_list[i]).pixel_array
x = x[:len(x) / 2]
y = pyd.read_file(annot_list[i]).pixel_array
y = y[:len(y) / 2]
n_frame = 0
for j in range(y.shape[0]):
if np.where(y[j] == 1)[0].size > 0:
n_frame += 1
annot_frames[i] = n_frame
print(x.shape, n_frame)
data.append(x)
annot.append(y)
return data, annot
def zeropad(data, annot, h_max, w_max):
# If the data is a list of images of different resolutions
# useful in testing
if isinstance(data, list):
n = len(data)
data_pad = np.zeros((n, h_max, w_max))
annot_pad = np.zeros((n, h_max, w_max))
for i in range(n):
pad_l1 = (h_max - data[i].shape[0]) // 2
pad_l2 = (h_max - data[i].shape[0]) - (h_max - data[i].shape[0]) // 2
pad_h1 = (w_max - data[i].shape[1]) // 2
pad_h2 = (w_max - data[i].shape[1]) - (w_max - data[i].shape[1]) // 2
data_pad[i] = np.pad(data[i], ((pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0)))
annot_pad[i] = np.pad(annot[i], ((pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0)))
# If data is a numpy array with images of same resolution
else:
pad_l1 = (h_max - data.shape[1]) // 2
pad_l2 = (h_max - data.shape[1]) - (h_max - data.shape[1]) // 2
pad_h1 = (w_max - data.shape[2]) // 2
pad_h2 = (w_max - data.shape[2]) - (w_max - data.shape[2]) // 2
data_pad = np.pad(data, ((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0), (0, 0)))
annot_pad = np.pad(annot, ((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0), (0, 0)))
return data_pad, annot_pad
def data_augment(imgs, lb):
p = Pipeline()
p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10)
imgs_temp, lb_temp = np.zeros(imgs.shape), np.zeros(imgs.shape)
for i in range(imgs.shape[0]):
pil_images = p.sample_with_array(imgs[i], ground_truth=lb[i], mode='L')
imgs_temp[i], lb_temp[i] = np.asarray(pil_images[0]), np.asarray(pil_images[1])
return imgs_temp, lb_temp
def get_weighted_batch(imgs, labels, batch_size, data_aug, high_skew=False):
while 1:
thy_re = [np.count_nonzero(labels[i] == 1) * 1.0 / np.prod(labels[i].shape) for i in range(imgs.shape[0])]
if high_skew==True:
thy_re = [el**2 for el in thy_re]
cumul = [thy_re[0]]
for item in thy_re[1:]: cumul.append(cumul[-1] + item)
total_prob = sum(thy_re)
ar_inds = [bisect.bisect_right(cumul, random.uniform(0, total_prob)) for i in range(batch_size)]
lb, batch_imgs = labels[ar_inds], imgs[ar_inds]
l, r, t, b = 0, batch_imgs.shape[1], 0, batch_imgs.shape[2]
for i in range(batch_imgs.shape[1]):
if np.all(batch_imgs[:, i, :] == 0):
l = i + 1
else:
break
for i in range(batch_imgs.shape[1] - 1, -1, -1):
if np.all(batch_imgs[:, i, :] == 0):
r = i
else:
break
for i in range(batch_imgs.shape[2]):
if np.all(batch_imgs[:, :, i] == 0):
t = i + 1
else:
break
for i in range(batch_imgs.shape[2] - 1, -1, -1):
if np.all(batch_imgs[:, :, i] == 0):
b = i
else:
break
l, r, t, b = (l // 16) * 16, math.ceil(r * 1.0 / 16) * 16, (t // 16) * 16, math.ceil(b * 1.0 / 16) * 16
l, r, t, b = int(l), int(r), int(t), int(b)
batch_imgs, lb = batch_imgs[:, l:r, t:b], lb[:, l:r, t:b]
if (data_aug):
batch_imgs, lb = data_augment(batch_imgs, lb)
yield np.expand_dims(batch_imgs, axis=3),np.expand_dims(lb, axis=3)
def get_weighted_batch_window_2d(imgs, labels, batch_size, data_aug, n_window=0, high_skew=False):
# a=0
# if a==0:
# print('datagen')
while 1:
thy_re = [np.count_nonzero(labels[i] == 1) * 1.0 / np.prod(labels[i].shape) for i in range(imgs.shape[0])]
if high_skew==True:
thy_re = [el**2 for el in thy_re]
cumul = [thy_re[0]]
for item in thy_re[1:]: cumul.append(cumul[-1] + item)
total_prob = sum(thy_re)
ar_inds = [bisect.bisect_right(cumul, random.uniform(0, total_prob)) for i in range(batch_size)]
if n_window==0:
batch_imgs = imgs[ar_inds]
# Get n_window frames per index.
else:
batch_imgs = | np.zeros((batch_size*n_window,imgs.shape[1],imgs.shape[2])) | numpy.zeros |
import pandas as pd
import numpy as np
import scipy as sp
import scipy.fftpack
import matplotlib.pyplot as plt
from scipy import signal as spsig
from scipy import ndimage
from tqdm import tqdm
import math
def conv_filter(signal, window_size, filter='gaussian', std=None, num_filtering=1):
"""
Args:
filter : 'gaussian', 'average'
"""
if filter == 'gaussian':
std = std if std is not None else (window_size - 1) / 4
w = spsig.gaussian(window_size, std)
w = w / np.sum(w)
elif filter == 'average':
w = np.ones(window_size) / window_size
filtered_sig = signal.copy()
for i in range(num_filtering):
filtered_sig = np.pad(filtered_sig, (window_size//2, window_size//2), 'reflect')
filtered_sig = np.convolve(filtered_sig, w, 'valid')
#print('size signal / filtered signal : {0} / {1}'.format(len(signal), len(filtered_sig)))
return filtered_sig
def gaussian_filter(signal, std, num_filtering=1):
filtered_sig = signal.copy()
for i in range(num_filtering):
filtered_sig = ndimage.gaussian_filter(filtered_sig, std, mode='reflect')
return filtered_sig
def to_const_filter(signal, filter='median'):
if filter == 'median':
const = np.median(signal)
elif filter == 'average':
const = np.average(signal)
filtered_sig = np.ones_like(signal) * const
return filtered_sig
def open_channel_filter(signal, open_channels, oc_to_use=None):
if oc_to_use is None:
uni_oc, count = np.unique(open_channels, return_counts=True)
oc_to_use = uni_oc[np.argmax(count)]
filtered_sig = signal.copy()
filtered_sig[open_channels != oc_to_use] = np.nan
filtered_sig = pd.Series(filtered_sig)
filtered_sig = filtered_sig.interpolate(method='linear', limit_direction='both')
filtered_sig = filtered_sig.interpolate(method='linear', limit_direction='forward')
filtered_sig = filtered_sig.interpolate(method='linear', limit_direction='backward')
filtered_sig = filtered_sig.values
return filtered_sig
def shift(signal, n):
fill_val = signal[0] if n > 0 else signal[-1]
shifted_sig = np.ones_like(signal) * fill_val
if n > 0:
shifted_sig[n:] = signal[:-n]
else:
shifted_sig[:n] = signal[-n:]
return shifted_sig
def max_log_likelihood(init_value, signal, serch_range, n_div, trunc_range):
"""
https://www.kaggle.com/statsu/average-signal
calculate maximum log likelihood near init_value.
"""
xgrid = np.linspace(init_value-serch_range, init_value+serch_range, n_div)
logll_max = None
x_max = None
for x in xgrid:
tg_sig = signal[np.abs(signal - x) < trunc_range]
logll = - np.average((tg_sig - x)**2) / 2
if logll_max is None:
logll_max = logll
x_max = x
elif logll_max < logll:
logll_max = logll
x_max = x
return x_max
def distance_from_ave_wo_label(signal, serch_range, n_div, trunc_range, max_channel, sig_dist, dist_coef):
init_value = np.median(signal)
base_ave_sig = max_log_likelihood(init_value, signal, serch_range, n_div, trunc_range)
print('base_ave_sig ', base_ave_sig)
# average signals of each open channels
ave_sigs = base_ave_sig + np.arange(-max_channel, max_channel + 1) * sig_dist
# signal : (time,)
# ave_sigs : (max_channel*2-1,)
# distance of average signals of each open channels
dists = np.exp(- (signal[:,None] - ave_sigs[None,:])**2 / sig_dist**2 * dist_coef) # (time, max_channel*2-1)
return dists
def distance_from_ave_with_label(signal, open_channels, max_channel, sig_dist, dist_coef, use_ave=True, use_middle=False):
uni_oc, count = np.unique(open_channels, return_counts=True)
# calc base channel and average signal
base_oc = uni_oc[np.argmax(count)]
if use_ave:
base_ave_sig = np.average(signal[open_channels==base_oc])
else:
base_ave_sig = np.median(signal[open_channels==base_oc])
# calc distance of average signals of each open channels
if sig_dist is None:
second_oc = uni_oc[np.argsort(count)[-2]]
if use_ave:
second_ave_sig = np.average(signal[open_channels==second_oc])
else:
second_ave_sig = np.median(signal[open_channels==second_oc])
sig_dist = np.abs(base_ave_sig - second_ave_sig) / np.abs(base_oc - second_oc)
ave_sigs = np.arange(0, max_channel+1) * sig_dist - base_oc * sig_dist + base_ave_sig
# middle
if use_middle:
asigs = []
for i in range(len(ave_sigs)):
asigs.append(ave_sigs[i])
if i < len(ave_sigs) - 1:
asigs.append((ave_sigs[i] + ave_sigs[i+1])*0.5)
ave_sigs = np.array(asigs)
# calc dist_coef
if dist_coef is None:
tg_sig = signal[open_channels==base_oc]
if use_ave:
s = np.std(tg_sig)
else:
# normalized interquartile range
s = (np.percentile(tg_sig, 75) - np.percentile(tg_sig, 25)) * 0.5 * 1.3490
dist_coef = 1.0 / (2.0 * s ** 2) * sig_dist**2
# signal : (time,)
# ave_sigs : (max_channel*2-1,)
# distance of average signals of each open channels
dists = np.exp(- (signal[:,None] - ave_sigs[None,:])**2 / sig_dist**2 * dist_coef) # (time, max_channel*2-1)
return dists
def apply_each_group(signal, group, func, args, open_channels=None):
num_groups = len(np.unique(group))
sigs = []
start_idx = 0
for gr in tqdm(range(num_groups)):
num_element = np.sum(group == gr)
if open_channels is None:
sig = signal[start_idx : start_idx+num_element]
sig = func(sig, *args)
else:
sig = signal[start_idx : start_idx+num_element]
oc = open_channels[start_idx : start_idx+num_element]
sig = func(sig, oc, *args)
sigs.append(sig)
start_idx += num_element
sigs = np.concatenate(sigs)
return sigs
def plot_signal(signal):
res = 1
plt.figure(figsize=(20,5))
plt.plot(range(0,len(signal), res), signal[0::res])
plt.xlabel('Row',size=16); plt.ylabel('Signal',size=16)
plt.show()
return
class PreProcess_v1:
def __init__(self):
self.signal_average = np.array([1.386246,]).astype('float32')
self.signal_std = np.array([3.336219,]).astype('float32')
self.input_channels = 1
def preprocessing(self, data_df):
# signal
sig = data_df.signal.values.astype('float32')
sig = sig[:, None] # (time, channel)
# group
group = data_df.group.values.astype('int64')
# open_channels
if 'open_channels' in data_df.columns:
open_channels = data_df.open_channels.values.astype('int64')
else:
open_channels = None
# check
self.check_value(sig, group, open_channels)
return sig, group, open_channels
def check_value(self, sig, group, open_channels):
print('ave : data {0} / constant {1}'.format(np.average(sig, axis=0), self.signal_average))
print('std : data {0} / constant {1}'.format(np.std(sig, axis=0), self.signal_std))
class PreProcess_v2:
"""
no implementation
"""
def __init__(self):
return
class PreProcess_v3_0_1:
def __init__(self):
self.signal_average = np.array([1.3673096e-06,]).astype('float32')
self.signal_std = np.array([1.139225,]).astype('float32')
self.input_channels = 1
# filter
self.window_size = 10001
self.filter='gaussian'
self.std = (self.window_size - 1) / 4
self.num_filtering = 1
return
def preprocessing(self, data_df):
# signal
sig = data_df.signal.values
sig = sig - apply_each_group(sig, data_df.group.values, conv_filter,
[self.window_size, self.filter, self.std, self.num_filtering])
sig = sig[:, None].astype('float32') # (time, channel)
# group
group = data_df.group.values.astype('int64')
# open_channels
if 'open_channels' in data_df.columns:
open_channels = data_df.open_channels.values.astype('int64')
else:
open_channels = None
# check
self.check_value(sig, group, open_channels)
#plot_signal(sig)
return sig, group, open_channels
def check_value(self, sig, group, open_channels):
print('ave : data {0} / constant {1}'.format(np.average(sig, axis=0), self.signal_average))
print('std : data {0} / constant {1}'.format(np.std(sig, axis=0), self.signal_std))
class PreProcess_v3_1_5:
def __init__(self):
self.signal_average = np.array([1.3700463e-06, 1.3901746e+00]).astype('float32')
self.signal_std = np.array([1.1374537, 3.1242452]).astype('float32')
self.input_channels = 2
# filter
self.window_size = 10001
self.filter='gaussian'
self.std = (self.window_size - 1) / 4
self.num_filtering = 1
return
def preprocessing(self, data_df):
# signal
sig = data_df.signal.values
sig2 = apply_each_group(sig, data_df.group.values, conv_filter,
[self.window_size, self.filter, self.std, self.num_filtering])
sig = np.concatenate([(sig-sig2)[:, None], sig2[:, None]], axis=1).astype('float32') # (time, channel)
# group
group = data_df.group.values.astype('int64')
# open_channels
if 'open_channels' in data_df.columns:
open_channels = data_df.open_channels.values.astype('int64')
else:
open_channels = None
# check
self.check_value(sig, group, open_channels)
#plot_signal(sig)
return sig, group, open_channels
def check_value(self, sig, group, open_channels):
print('ave : data {0} / constant {1}'.format(np.average(sig, axis=0), self.signal_average))
print('std : data {0} / constant {1}'.format(np.std(sig, axis=0), self.signal_std))
# combine before after
class PreProcess_v4_0_1:
def __init__(self):
self.signal_average = np.array([1.3901746e+00] + [1.3700463e-06]*11).astype('float32')
self.signal_std = np.array([3.1242452] + [1.1374537]*11).astype('float32')
self.input_channels = 12
# filter
self.window_size = 10001
self.filter='gaussian'
self.std = (self.window_size - 1) / 4
self.num_filtering = 1
# shift
self.shift_lens = (-5, -4, -3, -2, -1, 1, 2, 3, 4, 5)
return
def preprocessing(self, data_df):
# signal
sigs = []
sig = data_df.signal.values
sig2 = apply_each_group(sig, data_df.group.values, conv_filter,
[self.window_size, self.filter, self.std, self.num_filtering])
sigs.append(sig2[:,None])
sig = sig - sig2
sigs.append(sig[:,None])
for sh in self.shift_lens:
sigs.append(shift(sig, sh)[:,None])
sigs = np.concatenate(sigs, axis=1).astype('float32') # (time, channel)
# group
group = data_df.group.values.astype('int64')
# open_channels
if 'open_channels' in data_df.columns:
open_channels = data_df.open_channels.values.astype('int64')
else:
open_channels = None
# check
self.check_value(sigs, group, open_channels)
#plot_signal(sig)
return sigs, group, open_channels
def check_value(self, sig, group, open_channels):
print('ave : data {0} / constant {1}'.format(np.average(sig, axis=0), self.signal_average))
print('std : data {0} / constant {1}'.format(np.std(sig, axis=0), self.signal_std))
# use signal center of each channel without label
class PreProcess_v5_0_0:
def __init__(self):
self.signal_average = np.array([0]).astype('float32')
self.signal_std = np.array([1]).astype('float32')
self.input_channels = 21
# filter
self.window_size = 10001
self.filter='gaussian'
self.std = (self.window_size - 1) / 4
self.num_filtering = 1
# ave_signal_base_open_channel
self.serch_range = 0.8
self.n_div = 500
self.trunc_range = 0.3
self.max_channel = 10
self.sig_dist = 1.21
self.dist_coef = 1.0
return
def preprocessing(self, data_df):
# signal
sigs = data_df.signal.values
sigs = sigs - apply_each_group(sigs, data_df.group.values, conv_filter,
[self.window_size, self.filter, self.std, self.num_filtering])
sigs = apply_each_group(sigs, data_df.group.values, distance_from_ave_wo_label,
[self.serch_range, self.n_div, self.trunc_range, self.max_channel, self.sig_dist, self.dist_coef])
sigs = sigs.astype('float32')
# group
group = data_df.group.values.astype('int64')
# open_channels
if 'open_channels' in data_df.columns:
open_channels = data_df.open_channels.values.astype('int64')
else:
open_channels = None
# check
self.check_value(sigs, group, open_channels)
#plot_signal(sig)
return sigs, group, open_channels
def check_value(self, sig, group, open_channels):
print('ave : data {0} / constant {1}'.format(np.average(sig, axis=0), self.signal_average))
print('std : data {0} / constant {1}'.format(np.std(sig, axis=0), self.signal_std))
# use signal center of each channel with label
class PreProcess_v6_0_0:
def __init__(self):
self.signal_average = np.array([0]).astype('float32')
self.signal_std = | np.array([1]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 23:08:32 2021
@author: jimlee
"""
import csv
import numpy as np
import math
# open the csv file
rawData = np.genfromtxt('train.csv', delimiter=',')
data = rawData[1:,3:] # data is ready, but need to be reorganized
# Test NR
print(data[10,0])
''' Here change the NaN term to 0'''
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if(math.isnan(data[i,j])):
data[i,j] = 0
''' Here change the NaN term to 0'''
# Check if NR changed to 0.0
print(data[10,0])
'''
Now We Need To Change The Data To Some Form Like This:
Let x be the feature vector(dim = 18x1)
And x1_1_0 means that the feature vector on 1/1 0:00 and so on
[ x1_1_0 ... x1_1_23 x1_2_0 ... x1_2_23 ... x12_20_0 ... x12_20_23]
The dimension of the matrix must be 18x5760
'''
reorganizedData = np.zeros((18,5760))
startRowIndex = 0
startColumnIndex = 0
counter = 1
for i in range(data.shape[0]):
if counter % 18 == 0:
reorganizedData[:,startColumnIndex:startColumnIndex + 24] = data[startRowIndex:i + 1, :]
startRowIndex = i + 1
startColumnIndex = startColumnIndex + 24
counter += 1
'''Now We Have The ReorganizedData, We Have To Seperate the Train_x, Train_y from it'''
X = np.zeros((5652, 162)) # Train x
y_head = np.zeros((5652,1)) # Train y
for month in range(12):
for hour in range(471):
xi = []
for i in range(hour,hour + 9):
xi = np.append(xi,np.transpose(reorganizedData[:, month * 480 + i]))
y_head[month * 471 + hour, 0] = reorganizedData[9, month * 480 + hour + 9]
X[month * 471 + hour,:] = xi
''' The training data need to be normalized'''
for column in range(X.shape[1]):
X[:,column] = (X[:,column] - X[:,column].mean()) / math.sqrt(X[:,column].var())
''' Now we have successfully sample 5652 sets of training data. It's time to do the iteration'''
''' Define the way of training method'''
method = "ADAM_CUBIC_MODEL"
if method == "ADAGRAD":
print("ADAGRAD")
X = np.concatenate((np.ones((X.shape[0], 1 )), X) , axis = 1).astype(float)
lr = 0.01
w = np.zeros((163,1))
prevGrad = np.zeros((163,1))
eipsilon = 1E-8 # this is for numerical stability
for i in range(1, 100000):
y = np.dot(X,w)
grad = 2 * (np.dot(np.transpose(X),y-y_head))
prevGrad += grad**2
#w = w - lr * grad / (np.sqrt(prevGrad / n))
w -= lr * grad / (np.sqrt(prevGrad) + 1E-8) # 1E-8 is for numerical stable
#w -= lr * grad
''' Calculate the error'''
if i % 1000 == 0:
print("Loss:",np.power(np.sum(np.power(y - y_head, 2 ))/ X.shape[0],0.5))
print(np.dot(np.transpose(y-y_head), (y-y_head)))
elif method == "ADAM":
print("ADAM")
X = np.concatenate((np.ones((X.shape[0], 1 )), X) , axis = 1).astype(float)
lr = 0.1
w = np.zeros((163,1))
beta1 = 0.9
beta2 = 0.999
eipsilon = 1E-8 # this is for numerical stability
v = np.zeros([163,1])
s = np.zeros([163,1])
for i in range(1, 100000):
y = | np.dot(X,w) | numpy.dot |
"""
This module contains the definition for the high-level Rigol1000z driver.
"""
import numpy as _np
import tqdm as _tqdm
import pyvisa as _visa
from time import sleep
from Rigol1000z.commands import *
from typing import List
class Rigol1000z(Rigol1000zCommandMenu):
"""
The Rigol DS1000z series oscilloscope driver.
"""
def __init__(self, visa_resource: _visa.Resource):
# Instantiate The scope as a visa command menu
super().__init__(visa_resource)
# Initialize IEEE device identifier command in order to determine the model
brand, model, serial_number, software_version, *add_args = self._idn_cache.split(",")
# Ensure a valid model is being used
assert brand == "RIGOL TECHNOLOGIES"
assert model in {
ScopeModel.DS1104Z_S_Plus, ScopeModel.DS1104Z_Plus, ScopeModel.DS1104Z, # 100MHz models
ScopeModel.DS1074Z_S_Plus, ScopeModel.DS1074Z_Plus, # 70MHz models
ScopeModel.DS1054Z # 50MHz models
}
# Define Channels 1-4
self.channel_list: List[Channel] = [Channel(self.visa_resource, c) for c in range(1, 5)]
"""
A four-item list of commands.Channel objects
"""
# acquire must be able to count enabled channels
self.acquire = Acquire(self.visa_resource, self.channel_list)
"""
Hierarchy commands.Acquire object
"""
self.calibrate = Calibrate(self.visa_resource)
"""
Hierarchy commands.Calibrate object
"""
self.cursor = Cursor(self.visa_resource) # NC
self.decoder = Decoder(self.visa_resource) # NC
self.display = Display(self.visa_resource)
"""
Hierarchy commands.Display object
"""
self.event_tables = [EventTable(self.visa_resource, et + 1) for et in range(2)]
"""
A two-item list of commands.EventTable objects used to detect decode events.
"""
self.function = Function(self.visa_resource) # NC
self.ieee488 = IEEE488(self.visa_resource)
"""
Hierarchy commands.IEEE488 object
"""
if self.has_digital:
self.la = LA(self.visa_resource) # NC
self.lan = LAN(self.visa_resource) # NC
self.math = Math(self.visa_resource) # NC
self.mask = Mask(self.visa_resource) # NC
self.measure = Measure(self.visa_resource)
"""
Hierarchy commands.Measure object
"""
self.reference = Reference(self.visa_resource) # NC
if model in {ScopeModel.DS1104Z_S_Plus, ScopeModel.DS1074Z_S_Plus}: # Only for "S" models
self.source = Source(self.visa_resource) # NC
self.storage = Storage(self.visa_resource) # NC
self.system = System(self.visa_resource) # NC
self.trace = Trace(self.visa_resource) # NC
self.timebase = Timebase(self.visa_resource)
"""
Hierarchy commands.Timebase object
"""
self.trigger = Trigger(self.visa_resource) # NC
self.waveform = Waveform(self.visa_resource)
"""
Hierarchy commands.Waveform object
"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.visa_resource.close()
return False
def __del__(self):
self.visa_resource.close()
def __getitem__(self, i) -> Channel:
"""
Channels 1 through 4 (or 2 depending on the oscilloscope model) are accessed
using `[channel_number]`. e.g. osc[2] for channel 2. Channel 1 corresponds
to index 1 (not 0).
:param i: Channel number to retrieve
:return:
"""
# assert i in {c.channel for c in self._channels}
assert 1 <= i <= 4, 'Not a valid channel.'
return self.channel_list[i - 1]
def __len__(self):
return len(self.channel_list)
def autoscale(self):
print("Autoscaling can take several seconds to complete")
old_timeout = self.visa_resource.timeout
self.visa_resource.timeout = None
self.visa_write(':aut')
wait_for_resp = self.ieee488.operation_complete # Wait for queued response before moving onto next command
self.visa_resource.timeout = old_timeout
print("Autoscaling complete")
def clear(self):
self.visa_write(':clear')
def run(self):
self.visa_write(':run')
def stop(self):
self.visa_write(':stop')
def set_single_shot(self):
self.visa_write(':sing')
def force(self):
self.visa_write(':tfor')
def get_channels_enabled(self):
return [c.enabled() for c in self.channel_list]
# todo: make this more closely knit with the library
def get_screenshot(self, filename=None):
"""
Downloads a screenshot from the oscilloscope.
Args:
filename (str): The name of the image file. The appropriate
extension should be included (i.e. jpg, png, bmp or tif).
"""
img_format = None
# The format image that should be downloaded.
# Options are 'jpeg, 'png', 'bmp8', 'bmp24' and 'tiff'.
# It appears that 'jpeg' takes <3sec to download
# other formats take <0.5sec.
# Default is 'png'.
try:
img_format = filename.split(".")[-1].lower()
except KeyError:
img_format = "png"
assert img_format in ('jpeg', 'png', 'bmp8', 'bmp24', 'tiff')
sleep(0.5) # Wait for display to update
# Due to the up to 3s delay, we are setting timeout to None for this operation only
old_timeout = self.visa_resource.timeout
self.visa_resource.timeout = None
# Collect the image data from the scope
raw_img = self.visa_ask_raw(f':disp:data? on,off,{img_format}', 3850780)[11:-4]
self.visa_resource.timeout = old_timeout
if filename:
try:
os.remove(filename)
except OSError:
pass
with open(filename, 'wb') as fs:
fs.write(raw_img)
return raw_img
def get_data(self, mode=EWaveformMode.Normal, filename=None):
"""
Download the captured voltage points from the oscilloscope.
Args:
mode (str): 'norm' if only the points on the screen should be
downloaded, and 'raw' if all the points the ADC has captured
should be downloaded. Default is 'norm'.
filename (None, str): Filename the data should be saved to. Default
is `None`; the data is not saved to a file.
Returns:
2-tuple: A tuple of two lists. The first list is the time values
and the second list is the voltage values.
"""
# Stop scope to capture waveform state
self.stop()
# Set mode
assert mode in {EWaveformMode.Normal, EWaveformMode.Raw}
self.waveform.mode = mode
# Set transmission format
self.waveform.read_format = EWaveformReadFormat.Byte
# Create data structures to populate
time_series = None
all_channel_data = []
# Iterate over possible channels
for c in range(1, 5):
# Capture the waveform if the channel is enabled
if self[c].enabled:
self.waveform.source = self[c].name
# retrieve the data preable
info: PreambleContext = self.waveform.data_premable
# Generate the time series for the data
time_series = | _np.arange(0, info.points * info.x_increment, info.x_increment) | numpy.arange |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__all__ = ["Discontinuity"]
import numpy as np
from ..pipeline import Pipeline
from .prepare import LightCurve
class Discontinuity(Pipeline):
query_parameters = dict(
discont_window=(51, False),
discont_duration=(0.4, False),
discont_min_sig=(75., False),
discont_min_fact=(0.5, False),
discont_min_dt=(1.0, False),
discont_min_size=(20, False),
)
def get_result(self, query, parent_response):
lcs = parent_response.light_curves
# Parameters.
N = query["discont_window"]
duration = query["discont_duration"]
min_dis_sig = query["discont_min_sig"]
min_dis_fact = query["discont_min_fact"]
min_dis_dt = query["discont_min_dt"]
min_dis_size = query["discont_min_size"]
# Pre-allocate some shit.
t0 = N // 2
x = np.arange(N)
A = np.vander(x, 2)
lc_out = []
for k, lc in enumerate(lcs):
# Compute the typical time spacing in the LC.
dt = int(0.5 * duration / np.median(np.diff(lc.time)))
# The step function hypothesis.
model1 = np.ones(N)
model1[t0:] = -1.0
# The transit hypothesis.
model2 = np.zeros(N)
model2[t0-dt:t0+dt] = -1.0
# Initialize the work arrays.
chi2 = np.empty((len(lc.time) - N, 3))
# Loop over each time and compare the hypotheses.
for i in range(len(lc.time) - N):
y = np.array(lc.flux[i:i+N])
ivar = 1. / np.array(lc.ferr[i:i+N]) ** 2
# Loop over the different models, do the fit, and compute the
# chi^2.
for j, model in enumerate((None, model1, model2)):
if model is not None:
A1 = np.hstack((A, np.atleast_2d(model).T))
else:
A1 = np.array(A)
ATA = np.dot(A1.T, A1 * ivar[:, None])
w = np.linalg.solve(ATA, np.dot(A1.T, y * ivar))
pred = np.dot(A1, w)
chi2[i, j] = | np.sum((pred - y) ** 2 * ivar) | numpy.sum |
#!/usr/bin/env python
from __future__ import division, print_function
import rospy
import time
import numpy as np
import cv2
from scipy.ndimage.filters import gaussian_filter
import dougsm_helpers.tf_helpers as tfh
from tf import transformations as tft
from dougsm_helpers.timeit import TimeIt
from ggcnn.ggcnn import predict, process_depth_image
from mvp_grasping.grasp_stats import update_batch, update_histogram_angle
from mvp_grasping.gridworld import GridWorld
from dougsm_helpers.gridshow import gridshow
from mvp_grasping.srv import NextViewpoint, NextViewpointResponse, AddFailurePoint, AddFailurePointResponse
from sensor_msgs.msg import Image, CameraInfo
from std_srvs.srv import Empty as EmptySrv, EmptyResponse as EmptySrvResponse
import cv_bridge
bridge = cv_bridge.CvBridge()
TimeIt.print_output = False
class ViewpointEntropyCalculator:
"""
This class implements the Grid World portion of the Multi-View controller.
"""
def __init__(self):
self.hist_bins_q = rospy.get_param('~histogram/bins/quality')
self.hist_bins_a = rospy.get_param('~histogram/bins/angle')
self.dist_from_best_scale = rospy.get_param('~cost/dist_from_best_scale')
self.dist_from_best_gain = rospy.get_param('~cost/dist_from_best_gain')
self.dist_from_prev_view_scale = rospy.get_param('~cost/dist_from_prev_view_scale')
self.dist_from_prev_view_gain = rospy.get_param('~cost/dist_from_prev_view_gain')
self.height = (rospy.get_param('~height/z1'), rospy.get_param('~height/z2'))
# Create a GridWorld where we will store values.
self.gw_bounds = np.array([
[rospy.get_param('~histogram/bounds/x1'), rospy.get_param('~histogram/bounds/y1')],
[rospy.get_param('~histogram/bounds/x2'), rospy.get_param('~histogram/bounds/y2')]
])
self.gw_res = rospy.get_param('~histogram/resolution')
self.reset_gridworld(EmptySrv())
self.hist_mean = 0
self.fgw = GridWorld(self.gw_bounds, self.gw_res)
self.fgw.add_grid('failures', 0.0)
# Useful meshgrid for distance calculations
xs = np.arange(self.gw.bounds[0, 0], self.gw.bounds[1, 0] - 1e-6, self.gw.res) + self.gw.res / 2
ys = np.arange(self.gw.bounds[0, 1], self.gw.bounds[1, 1] - 1e-6, self.gw.res) + self.gw.res / 2
self._xv, self._yv = np.meshgrid(xs, ys)
# Get the camera parameters
cam_info_topic = rospy.get_param('~camera/info_topic')
camera_info_msg = rospy.wait_for_message(cam_info_topic, CameraInfo)
self.cam_K = np.array(camera_info_msg.K).reshape((3, 3))
self.img_pub = rospy.Publisher('~visualisation', Image, queue_size=1)
rospy.Service('~update_grid', NextViewpoint, self.update_service_handler)
rospy.Service('~reset_grid', EmptySrv, self.reset_gridworld)
rospy.Service('~add_failure_point', AddFailurePoint, self.add_failure_point_callback)
self.base_frame = rospy.get_param('~camera/base_frame')
self.camera_frame = rospy.get_param('~camera/camera_frame')
self.img_crop_size = rospy.get_param('~camera/crop_size')
self.img_crop_y_offset = rospy.get_param('~camera/crop_y_offset')
self.cam_fov = rospy.get_param('~camera/fov')
self.counter = 0
self.curr_depth_img = None
self.curr_img_time = 0
self.last_image_pose = None
rospy.Subscriber(rospy.get_param('~camera/depth_topic'), Image, self._depth_img_callback, queue_size=1)
def _depth_img_callback(self, msg):
"""
Doing a rospy.wait_for_message is super slow, compared to just subscribing and keeping the newest one.
"""
self.curr_img_time = time.time()
self.last_image_pose = tfh.current_robot_pose(self.base_frame, self.camera_frame)
self.curr_depth_img = bridge.imgmsg_to_cv2(msg)
def update_service_handler(self, req):
"""
Update the GridWorld with a new observation, compute the viewpoint entropy and generate a new command.
:param req: Ignored
:return: NextViewpointResponse (success flag, best grsap, velocity command)
"""
# Some initial checks
if self.curr_depth_img is None:
rospy.logerr('No depth image received yet.')
rospy.sleep(0.5)
if time.time() - self.curr_img_time > 0.5:
rospy.logerr('The Realsense node has died')
return NextViewpointResponse()
with TimeIt('Total'):
with TimeIt('Update Histogram'):
# Step 1: Perform a GG-CNN prediction and update the grid world with the observations
self.no_viewpoints += 1
depth = self.curr_depth_img.copy()
camera_pose = self.last_image_pose
cam_p = camera_pose.position
self.position_history.append(np.array([cam_p.x, cam_p.y, cam_p.z, 0]))
# For display purposes.
newpos_pixel = self.gw.pos_to_cell(np.array([[cam_p.x, cam_p.y]]))[0]
self.gw.visited[newpos_pixel[0], newpos_pixel[1]] = self.gw.visited.max() + 1
camera_rot = tft.quaternion_matrix(tfh.quaternion_to_list(camera_pose.orientation))[0:3, 0:3]
# Do grasp prediction
depth_crop, depth_nan_mask = process_depth_image(depth, self.img_crop_size, 300, return_mask=True, crop_y_offset=self.img_crop_y_offset)
points, angle, width_img, _ = predict(depth_crop, process_depth=False, depth_nan_mask=depth_nan_mask)
angle -= np.arcsin(camera_rot[0, 1]) # Correct for the rotation of the camera
angle = (angle + np.pi/2) % np.pi # Wrap [0, pi]
# Convert to 3D positions.
imh, imw = depth.shape
x = ((np.vstack((np.linspace((imw - self.img_crop_size) // 2, (imw - self.img_crop_size) // 2 + self.img_crop_size, depth_crop.shape[1], np.float), )*depth_crop.shape[0]) - self.cam_K[0, 2])/self.cam_K[0, 0] * depth_crop).flatten()
y = ((np.vstack((np.linspace((imh - self.img_crop_size) // 2 - self.img_crop_y_offset, (imh - self.img_crop_size) // 2 + self.img_crop_size - self.img_crop_y_offset, depth_crop.shape[0], np.float), )*depth_crop.shape[1]).T - self.cam_K[1,2])/self.cam_K[1, 1] * depth_crop).flatten()
pos = np.dot(camera_rot, np.stack((x, y, depth_crop.flatten()))).T + np.array([[cam_p.x, cam_p.y, cam_p.z]])
# Clean the data a bit.
pos[depth_nan_mask.flatten() == 1, :] = 0 # Get rid of NaNs
pos[pos[:, 2] > 0.17, :] = 0 # Ignore obvious noise.
pos[pos[:, 2] < 0.0, :] = 0 # Ignore obvious noise.
cell_ids = self.gw.pos_to_cell(pos[:, :2])
width_m = width_img / 300.0 * 2.0 * depth_crop * np.tan(self.cam_fov * self.img_crop_size/depth.shape[0] / 2.0 / 180.0 * np.pi)
update_batch([pos[:, 2], width_m.flatten()], cell_ids, self.gw.count, [self.gw.depth_mean, self.gw.width_mean], [self.gw.depth_var, self.gw.width_var])
update_histogram_angle(points.flatten(), angle.flatten(), cell_ids, self.gw.hist)
with TimeIt('Calculate Best Grasp'):
# Step 2: Compute the position of the best grasp in the GridWorld
# Sum over all angles to get the grasp quality only.
hist_sum_q = np.sum(self.gw.hist, axis=2)
weights = np.arange(0.5/self.hist_bins_q, 1.0, 1/self.hist_bins_q)
hist_mean = np.sum(hist_sum_q * weights.reshape((1, 1, -1)), axis=2)/(np.sum(hist_sum_q, axis=2) + 1e-6)
hist_mean[self.gw.count == 0] = 0 # Ignore areas we haven't seen yet.
hist_mean[0, :] = 0 # Ignore single pixel along each edge.
hist_mean[-1, :] = 0
hist_mean[:, 0] = 0
hist_mean[:, -1] = 0
hist_mean -= self.fgw.failures
hist_mean = np.clip(hist_mean, 0.0, 1.0)
# ArgMax of grasp quality
q_am = np.unravel_index(np.argmax(hist_mean), hist_mean.shape)
# Interpolate position between the neighbours of the best grasp, weighted by quality
q_ama = np.array(q_am)
conn_neighbours = np.array([q_ama]) # Disable rounding
neighbour_weights = hist_mean[conn_neighbours[:, 0], conn_neighbours[:, 1]]
q_am_neigh = self.gw.cell_to_pos(conn_neighbours)
q_am_neigh_avg = np.average(q_am_neigh, weights=neighbour_weights, axis=0)
q_am_pos = (q_am_neigh_avg[0], q_am_neigh_avg[1]) # This is the grasp center
# Perform same weighted averaging of the angles.
best_grasp_hist = self.gw.hist[conn_neighbours[:, 0], conn_neighbours[:, 1], :, :]
angle_weights = np.sum((best_grasp_hist - 1) * weights.reshape((1, 1, -1)), axis=2)
ang_bins = (np.arange(0.5/self.hist_bins_a, 1.0, 1/self.hist_bins_a) * np.pi).reshape(1, -1)
# Compute the weighted vector mean of the sin/cos components of the angle predictions
# Do double angles so that -np.pi/2 == np.pi/2, then unwrap
q_am_ang = np.arctan2(
np.sum(np.sin(ang_bins*2) * angle_weights * neighbour_weights.reshape(-1, 1)),
np.sum(np.cos(ang_bins*2) * angle_weights * neighbour_weights.reshape(-1, 1))
)
if q_am_ang < 0:
q_am_ang += 2*np.pi
q_am_ang = q_am_ang/2.0 - np.pi/2
# Get the depth and width at the grasp center
q_am_dep = self.gw.depth_mean[q_am]
q_am_wid = self.gw.width_mean[q_am]
with TimeIt('Calculate Information Gain'):
# Step 3: Compute the expected information gain from a viewpoint above every cell in the GridWorld
# Compute entropy per cell.
hist_p = hist_sum_q / np.expand_dims(np.sum(hist_sum_q, axis=2) + 1e-6, -1)
hist_ent = -np.sum(hist_p * np.log(hist_p+1e-6), axis=2)
# Treat camera field of view as a Gaussian
# Field of view in number gridworld cells
fov = int(cam_p.z * 2 * np.tan(self.cam_fov*self.img_crop_size/depth.shape[0]/2.0 / 180.0 * np.pi) / self.gw.res)
exp_inf_gain = gaussian_filter(hist_ent, fov/6, truncate=3)
# Track changes by KL Divergence (not used/disabled by default)
kl_divergence = np.sum(hist_p * np.log((hist_p+1e-6)/(self.gw.hist_p_prev+1e-6)), axis=2)
self.gw.hist_p_prev = hist_p
kl_divergence[0, :] = 0
kl_divergence[-1, :] = 0
kl_divergence[:, 0] = 0
kl_divergence[:, -1] = 0
norm_i_gain = 1 - np.exp(-1 * kl_divergence.sum())
self.position_history[-1][-1] = norm_i_gain
with TimeIt('Calculate Travel Cost'):
# Step 4: Compute cost of moving away from the best detected grasp.
# Distance from current robot pos.
d_from_robot = np.sqrt((self._xv - cam_p.x)**2 + (self._yv - cam_p.y)**2)
# Distance from best detected grasp, weighted by the robot's current height (Z axis)
d_from_best_q = np.sqrt((self._xv - q_am_pos[0])**2 + (self._yv - q_am_pos[1])**2) # Cost of moving away from the best grasp.
height_weight = (cam_p.z - self.height[1])/(self.height[0]-self.height[1]) + 1e-2
height_weight = max(min(height_weight, 1.0), 0.0)
best_cost = (d_from_best_q / self.dist_from_best_scale) * (1-height_weight) * self.dist_from_best_gain
# Distance from previous viewpoints (dist_from_prev_view_gain is 0 by default)
d_from_prev_view = np.zeros(self.gw.shape)
for x, y, z, kl in self.position_history:
d_from_prev_view += np.clip(1 - (np.sqrt((self._xv - x)**2 + (self._yv - y)**2 + 0*(cam_p.z - z)**2)/self.dist_from_prev_view_scale), 0, 1) * (1-kl)
prev_view_cost = d_from_prev_view * self.dist_from_prev_view_gain
# Calculate total expected information gain.
exp_inf_gain_before = exp_inf_gain.copy()
exp_inf_gain -= best_cost
exp_inf_gain -= prev_view_cost
# Compute local direction of maximum information gain
exp_inf_gain_mask = exp_inf_gain.copy()
greedy_window = 0.1
exp_inf_gain_mask[d_from_robot > greedy_window] = exp_inf_gain.min()
ig_am = np.unravel_index(np.argmax(exp_inf_gain_mask), exp_inf_gain.shape)
maxpos = self.gw.cell_to_pos([ig_am])[0]
diff = (maxpos - np.array([cam_p.x, cam_p.y]))/greedy_window
# Maximum of 1
if | np.linalg.norm(diff) | numpy.linalg.norm |
import os
import time
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils import CrossEntropyLoss2d
from models import reinforcement_net, reactive_net
from scipy import ndimage
import matplotlib.pyplot as plt
from constants import color_mean, color_std, depth_mean, depth_std, DEPTH_MIN, is_real
class Trainer(object):
def __init__(self, method, push_rewards, future_reward_discount,
is_testing, load_snapshot, snapshot_file, force_cpu):
self.method = method
# Check if CUDA can be used
if torch.cuda.is_available() and not force_cpu:
print("CUDA detected. Running with GPU acceleration.")
self.use_cuda = True
elif force_cpu:
print("CUDA detected, but overriding with option '--cpu'. Running with only CPU.")
self.use_cuda = False
else:
print("CUDA is *NOT* detected. Running with only CPU.")
self.use_cuda = False
# Fully convolutional classification network for supervised learning
if self.method == 'reactive':
self.model = reactive_net(self.use_cuda)
# self.push_rewards = push_rewards
# self.future_reward_discount = future_reward_discount
# # Initialize Huber loss
self.push_criterion = torch.nn.SmoothL1Loss(reduction='none')
self.grasp_criterion = torch.nn.BCEWithLogitsLoss(reduction='none')
if self.use_cuda:
self.push_criterion = self.push_criterion.cuda()
self.grasp_criterion = self.grasp_criterion.cuda()
# Initialize classification loss
# push_num_classes = 3 # 0 - push, 1 - no change push, 2 - no loss
# push_class_weights = torch.ones(push_num_classes)
# push_class_weights[push_num_classes - 1] = 0
# if self.use_cuda:
# self.push_criterion = CrossEntropyLoss2d(push_class_weights.cuda()).cuda()
# else:
# self.push_criterion = CrossEntropyLoss2d(push_class_weights)
# grasp_num_classes = 3 # 0 - grasp, 1 - failed grasp, 2 - no loss
# grasp_class_weights = torch.ones(grasp_num_classes)
# grasp_class_weights[grasp_num_classes - 1] = 0
# if self.use_cuda:
# self.grasp_criterion = CrossEntropyLoss2d(grasp_class_weights.cuda()).cuda()
# else:
# self.grasp_criterion = CrossEntropyLoss2d(grasp_class_weights)
# Fully convolutional Q network for deep reinforcement learning
elif self.method == 'reinforcement':
self.model = reinforcement_net(self.use_cuda)
self.push_rewards = push_rewards
self.future_reward_discount = future_reward_discount
# Initialize Huber loss
self.push_criterion = torch.nn.SmoothL1Loss(reduction='none') # Huber loss
self.grasp_criterion = torch.nn.SmoothL1Loss(reduction='none') # Huber loss
# self.push_criterion = torch.nn.BCEWithLogitsLoss(reduction='none')
# self.grasp_criterion = torch.nn.BCEWithLogitsLoss(reduction='none')
if self.use_cuda:
self.push_criterion = self.push_criterion.cuda()
self.grasp_criterion = self.grasp_criterion.cuda()
# Load pre-trained model
if load_snapshot:
self.model.load_state_dict(torch.load(snapshot_file))
print('Pre-trained model snapshot loaded from: %s' % (snapshot_file))
# Convert model from CPU to GPU
if self.use_cuda:
self.model = self.model.cuda()
# Set model to training mode
self.model.train()
# Initialize optimizer
self.iteration = 0
if is_testing:
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-5, momentum=0.9, weight_decay=2e-5)
else:
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=5e-5, momentum=0.9, weight_decay=2e-5)
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=500, gamma=0.5)
# Initialize lists to save execution info and RL variables
self.executed_action_log = []
self.label_value_log = []
self.reward_value_log = []
self.predicted_value_log = []
self.use_heuristic_log = []
self.is_exploit_log = []
self.clearance_log = []
self.loss_log = []
if is_testing:
# self.model.eval()
self.batch_size = 2
else:
self.batch_size = 8
self.loss_list = []
# Pre-load execution info and RL variables
def preload(self, transitions_directory):
self.executed_action_log = np.loadtxt(
os.path.join(
transitions_directory,
'executed-action.log.txt'),
delimiter=' ')
self.iteration = self.executed_action_log.shape[0] - 2
self.executed_action_log = self.executed_action_log[0:self.iteration, :]
self.executed_action_log = self.executed_action_log.tolist()
self.label_value_log = np.loadtxt(os.path.join(transitions_directory, 'label-value.log.txt'), delimiter=' ')
self.label_value_log = self.label_value_log[0:self.iteration]
self.label_value_log.shape = (self.iteration, 1)
self.label_value_log = self.label_value_log.tolist()
self.predicted_value_log = np.loadtxt(
os.path.join(
transitions_directory,
'predicted-value.log.txt'),
delimiter=' ')
self.predicted_value_log = self.predicted_value_log[0:self.iteration]
self.predicted_value_log.shape = (self.iteration, 1)
self.predicted_value_log = self.predicted_value_log.tolist()
self.reward_value_log = np.loadtxt(os.path.join(transitions_directory, 'reward-value.log.txt'), delimiter=' ')
self.reward_value_log = self.reward_value_log[0:self.iteration]
self.reward_value_log.shape = (self.iteration, 1)
self.reward_value_log = self.reward_value_log.tolist()
self.use_heuristic_log = np.loadtxt(os.path.join(transitions_directory, 'use-heuristic.log.txt'), delimiter=' ')
self.use_heuristic_log = self.use_heuristic_log[0:self.iteration]
self.use_heuristic_log.shape = (self.iteration, 1)
self.use_heuristic_log = self.use_heuristic_log.tolist()
self.is_exploit_log = np.loadtxt(os.path.join(transitions_directory, 'is-exploit.log.txt'), delimiter=' ')
self.is_exploit_log = self.is_exploit_log[0:self.iteration]
self.is_exploit_log.shape = (self.iteration, 1)
self.is_exploit_log = self.is_exploit_log.tolist()
self.clearance_log = np.loadtxt(os.path.join(transitions_directory, 'clearance.log.txt'), delimiter=' ')
self.clearance_log.shape = (self.clearance_log.shape[0], 1)
self.clearance_log = self.clearance_log.tolist()
# Compute forward pass through model to compute affordances/Q
def forward(self, color_heightmap, depth_heightmap, is_volatile=False, specific_rotation=-1, use_push=True):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# Add extra padding (to handle rotations inside network)
diag_length = float(color_heightmap.shape[0]) * np.sqrt(2)
diag_length = np.ceil(diag_length / 32) * 32
padding_width = int((diag_length - color_heightmap.shape[0]) / 2)
color_heightmap_pad_r = np.pad(color_heightmap_pad[:, :, 0], padding_width, 'constant', constant_values=0)
color_heightmap_pad_r.shape = (color_heightmap_pad_r.shape[0], color_heightmap_pad_r.shape[1], 1)
color_heightmap_pad_g = np.pad(color_heightmap_pad[:, :, 1], padding_width, 'constant', constant_values=0)
color_heightmap_pad_g.shape = (color_heightmap_pad_g.shape[0], color_heightmap_pad_g.shape[1], 1)
color_heightmap_pad_b = np.pad(color_heightmap_pad[:, :, 2], padding_width, 'constant', constant_values=0)
color_heightmap_pad_b.shape = (color_heightmap_pad_b.shape[0], color_heightmap_pad_b.shape[1], 1)
color_heightmap_pad = np.concatenate(
(color_heightmap_pad_r, color_heightmap_pad_g, color_heightmap_pad_b), axis=2)
depth_heightmap_pad = np.pad(depth_heightmap_pad, padding_width, 'constant', constant_values=0)
# Pre-process color image (scale and normalize)
image_mean = color_mean
image_std = color_std
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = depth_mean
image_std = depth_std
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(3, 2, 0, 1)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(3, 2, 0, 1)
# Pass input data through model
output_prob = self.model(input_color_data, input_depth_data, is_volatile, specific_rotation, use_push)
if self.method == 'reactive':
for rotate_idx in range(len(output_prob)):
if rotate_idx == 0:
if use_push:
push_predictions = output_prob[rotate_idx][0].cpu().data.numpy()[:, 0, int(padding_width):int(
color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(color_heightmap_pad.shape[1] - padding_width)]
grasp_predictions = output_prob[rotate_idx][1].cpu().data.numpy()[:, 0, int(padding_width):int(
color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(color_heightmap_pad.shape[1] - padding_width)]
else:
push_predictions = 0
grasp_predictions = output_prob[rotate_idx][1].cpu().data.numpy()[:, 0, int(padding_width):int(
color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(color_heightmap_pad.shape[1] - padding_width)]
else:
if use_push:
push_predictions = np.concatenate((push_predictions, output_prob[rotate_idx][0].cpu().data.numpy()[
:, 0, int(padding_width):int(color_heightmap_pad.shape[0] - padding_width),
int(padding_width):int(color_heightmap_pad.shape[1] - padding_width)]), axis=0)
grasp_predictions = np.concatenate((grasp_predictions, output_prob[rotate_idx][1].cpu().data.numpy()[
:, 0, int(padding_width):int(color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(
color_heightmap_pad.shape[1] - padding_width)]), axis=0)
else:
push_predictions = 0
grasp_predictions = np.concatenate((grasp_predictions, output_prob[rotate_idx][1].cpu().data.numpy()[
:, 0, int(padding_width):int(color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(
color_heightmap_pad.shape[1] - padding_width)]), axis=0)
elif self.method == 'reinforcement':
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
if rotate_idx == 0:
if not use_push:
push_predictions = 0
grasp_predictions = output_prob[rotate_idx][1].cpu().data.numpy()[:, 0, int(padding_width):int(
color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(color_heightmap_pad.shape[1] - padding_width)]
else:
push_predictions = output_prob[rotate_idx][0].cpu().data.numpy()[:, 0, int(padding_width):int(
color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(color_heightmap_pad.shape[1] - padding_width)]
grasp_predictions = output_prob[rotate_idx][1].cpu().data.numpy()[:, 0, int(padding_width):int(
color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(color_heightmap_pad.shape[1] - padding_width)]
else:
if not use_push:
push_predictions = 0
grasp_predictions = np.concatenate((grasp_predictions, output_prob[rotate_idx][1].cpu().data.numpy()[
:, 0, int(padding_width):int(color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(
color_heightmap_pad.shape[1] - padding_width)]), axis=0)
else:
push_predictions = np.concatenate((push_predictions, output_prob[rotate_idx][0].cpu().data.numpy()[
:, 0, int(padding_width):int(color_heightmap_pad.shape[0] - padding_width),
int(padding_width):int(color_heightmap_pad.shape[1] - padding_width)]), axis=0)
grasp_predictions = np.concatenate((grasp_predictions, output_prob[rotate_idx][1].cpu().data.numpy()[
:, 0, int(padding_width):int(color_heightmap_pad.shape[0] - padding_width), int(padding_width):int(
color_heightmap_pad.shape[1] - padding_width)]), axis=0)
return push_predictions, grasp_predictions
def get_label_value(self, primitive_action, push_success, grasp_success, change_detected, prev_push_predictions,
prev_grasp_predictions, next_color_heightmap, next_depth_heightmap, prev_depth_heightmap, use_push=True):
if self.method == 'reactive':
# Compute label value
label_value = 0
if primitive_action == 'push':
if change_detected:
next_push_predictions, next_grasp_predictions = self.forward(
next_color_heightmap, next_depth_heightmap, is_volatile=True)
if np.max(next_grasp_predictions) > np.max(prev_grasp_predictions) * 1.1:
current_reward = (np.max(next_grasp_predictions) + np.max(prev_grasp_predictions)) / 2
print("Prediction:", np.max(prev_grasp_predictions), np.max(next_grasp_predictions))
# current_reward = 1
else:
future_reward = 0
delta_area = self.push_change_area(prev_depth_heightmap, next_depth_heightmap)
if delta_area > 300: # 300 can be changed
if current_reward < 0.5:
current_reward = 0.5
elif delta_area < -100:
current_reward = 0
label_value = 1
elif primitive_action == 'grasp':
if grasp_success:
label_value = 1
print('Label value: %d' % (label_value))
return label_value, label_value
elif self.method == 'reinforcement':
# Compute current reward
current_reward = 0
if primitive_action == 'push':
if change_detected:
current_reward = 0.0
elif primitive_action == 'grasp':
if grasp_success:
current_reward = 1.0
# Compute future reward
if not change_detected and not grasp_success:
future_reward = 0
else:
next_push_predictions, next_grasp_predictions = self.forward(
next_color_heightmap, next_depth_heightmap, is_volatile=True, use_push=use_push)
future_reward = 0 # no future reward
if primitive_action == 'push':
if np.max(next_grasp_predictions) > np.max(prev_grasp_predictions) * 1.1:
current_reward = (np.max(next_grasp_predictions) + np.max(prev_grasp_predictions)) / 2
else:
future_reward = 0
print("Prediction:", np.max(prev_grasp_predictions), np.max(next_grasp_predictions))
delta_area = self.push_change_area(prev_depth_heightmap, next_depth_heightmap)
if delta_area > 300: # 300 can be changed
if current_reward < 0.8:
current_reward = 0.8
elif delta_area < -100: # -100 can be changed
current_reward = 0
future_reward = 0
print('Current reward: %f' % (current_reward))
print('Future reward: %f' % (future_reward))
if primitive_action == 'push' and not self.push_rewards:
expected_reward = self.future_reward_discount * future_reward
print('Expected reward: %f + %f x %f = %f' %
(0.0, self.future_reward_discount, future_reward, expected_reward))
else:
expected_reward = current_reward + self.future_reward_discount * future_reward
print(
'Expected reward: %f + %f x %f = %f' %
(current_reward,
self.future_reward_discount,
future_reward,
expected_reward))
return expected_reward, current_reward
def get_neg(self, depth_heightmap, label, best_pix_ind):
depth_heightmap_pad = np.copy(depth_heightmap)
diag_length = float(depth_heightmap.shape[0]) * np.sqrt(2)
diag_length = np.ceil(diag_length / 32) * 32
padding_width = int((diag_length - depth_heightmap.shape[0]) / 2)
depth_heightmap_pad = np.pad(depth_heightmap_pad, padding_width, 'constant', constant_values=0)
depth_heightmap_pad = ndimage.rotate(depth_heightmap_pad, best_pix_ind * (360.0 / 16), reshape=False)
label = ndimage.rotate(label, best_pix_ind * (360.0 / 16), axes=(2, 1), reshape=False)
label = np.round(label)
x_y_idx = np.argwhere(label > 0)
for idx in x_y_idx:
_, x, y = tuple(idx)
if is_real:
left_area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
max(0, y - 27):max(0, y - 22)] # 2x3 pixels in each side
right_area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
min(depth_heightmap_pad.shape[1] - 1, y + 23):min(depth_heightmap_pad.shape[1], y + 28)] # 2x3 pixels in each side
if ((np.sum(left_area > DEPTH_MIN) > 0 and np.sum((left_area - depth_heightmap_pad[x, y]) > -0.05) > 0) or
(np.sum(right_area > DEPTH_MIN) > 0 and np.sum((right_area - depth_heightmap_pad[x, y]) > -0.05) > 0)):
label[0, x, y] = 0
else:
left_area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
max(0, y - 28):max(0, y - 18)] # 2x3 pixels in each side
right_area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
min(depth_heightmap_pad.shape[1] - 1, y + 19):min(depth_heightmap_pad.shape[1], y + 29)] # 2x3 pixels in each side
if ((np.sum(left_area > DEPTH_MIN) > 0 and np.sum((left_area - depth_heightmap_pad[x, y]) > -0.04) > 0) or
(np.sum(right_area > DEPTH_MIN) > 0 and np.sum((right_area - depth_heightmap_pad[x, y]) > -0.04) > 0)):
label[0, x, y] = 0
label = ndimage.rotate(label, -best_pix_ind * (360.0 / 16), axes=(2, 1), reshape=False)
label = np.round(label)
return label
# Compute labels and backpropagate
def backprop(self, color_heightmap, depth_heightmap, primitive_action, best_pix_ind, label_value, use_push=True):
if self.method == 'reactive':
# Compute labels
label = np.zeros((1, 320, 320))
action_area = np.zeros((224, 224))
action_area[best_pix_ind[1]][best_pix_ind[2]] = 1
tmp_label = np.zeros((224, 224))
tmp_label[action_area > 0] = label_value
label[0, 48:(320 - 48), 48:(320 - 48)] = tmp_label
# Compute label mask
label_weights = np.zeros(label.shape)
tmp_label_weights = np.zeros((224, 224))
tmp_label_weights[action_area > 0] = 1
label_weights[0, 48:(320 - 48), 48:(320 - 48)] = tmp_label_weights
# Compute loss and backward pass
if len(self.loss_list) == 0:
self.optimizer.zero_grad()
loss_value = 0
if primitive_action == 'grasp' and label_value > 0:
neg_loss = []
for i in range(self.model.num_rotations):
if i != best_pix_ind[0]:
neg_label = self.get_neg(depth_heightmap, label.copy(), i)
if neg_label[0, 48:(320 - 48), 48:(320 - 48)][best_pix_ind[1]][best_pix_ind[2]] == 0:
_, _ = self.forward(color_heightmap, depth_heightmap, is_volatile=False, specific_rotation=i, use_push=use_push)
loss = self.grasp_criterion(self.model.output_prob[0][1].view(1, 1, 320, 320), Variable(torch.from_numpy(neg_label).view(1, 1, 320, 320).float().cuda())) * Variable(
torch.from_numpy(label_weights).view(1, 1, 320, 320).float().cuda(), requires_grad=False)
loss = loss.sum()
neg_loss.append(loss)
if len(neg_loss) > 0:
self.loss_list.append(sum(neg_loss) / len(neg_loss))
if primitive_action == 'push':
if label_value > 0:
label_weights *= 2 # to compromise the less push operations
# Do forward pass with specified rotation (to save gradients)
_, _ = self.forward(color_heightmap, depth_heightmap, is_volatile=False,
specific_rotation=best_pix_ind[0], use_push=use_push)
if self.use_cuda:
loss = self.push_criterion(
self.model.output_prob[0][0].view(1, 1, 320, 320), Variable(torch.from_numpy(label).view(
1, 1, 320, 320).float().cuda())) * Variable(torch.from_numpy(label_weights).view(
1, 1, 320, 320).float().cuda(), requires_grad=False)
else:
loss = self.push_criterion(self.model.output_prob[0][0].view(1, 1, 320, 320), Variable(
torch.from_numpy(label).float())) * Variable(torch.from_numpy(label_weights).float(), requires_grad=False)
loss = loss.sum()
if len(self.loss_list) >= self.batch_size:
total_loss = sum(self.loss_list)
print('Batch Loss:', total_loss.cpu().item())
self.loss_log.append([self.iteration, total_loss.cpu()])
mean_loss = total_loss / len(self.loss_list)
mean_loss.backward()
self.loss_list = []
else:
self.loss_list.append(loss)
# loss.backward()
loss_value = loss.cpu().data.numpy()
elif primitive_action == 'grasp':
if label_value > 0:
label_weights *= 4
# Do forward pass with specified rotation (to save gradients)
_, _ = self.forward(color_heightmap, depth_heightmap, is_volatile=False,
specific_rotation=best_pix_ind[0], use_push=use_push)
if self.use_cuda:
loss = self.grasp_criterion(
self.model.output_prob[0][1].view(1, 1, 320, 320), Variable(
torch.from_numpy(label).view(1, 1, 320, 320).float().cuda())) * Variable(
torch.from_numpy(label_weights).view(1, 1, 320, 320).float().cuda(), requires_grad=False)
else:
loss = self.grasp_criterion(self.model.output_prob[0][1].view(1, 320, 320), Variable(
torch.from_numpy(label).float())) * Variable(torch.from_numpy(label_weights).float(), requires_grad=False)
loss = loss.sum()
self.loss_list.append(loss)
# loss.backward()
loss_value = loss.cpu().data.numpy()
opposite_rotate_idx = (best_pix_ind[0] + self.model.num_rotations / 2) % self.model.num_rotations
_, _ = self.forward(color_heightmap, depth_heightmap, is_volatile=False,
specific_rotation=opposite_rotate_idx, use_push=use_push)
if self.use_cuda:
loss = self.grasp_criterion(
self.model.output_prob[0][1].view(1, 1, 320, 320), Variable(
torch.from_numpy(label).view(1, 1, 320, 320).float().cuda())) * Variable(
torch.from_numpy(label_weights).view(1, 1, 320, 320).float().cuda(), requires_grad=False)
else:
loss = self.grasp_criterion(self.model.output_prob[0][1].view(1, 320, 320), Variable(
torch.from_numpy(label).float())) * Variable(torch.from_numpy(label_weights).float(), requires_grad=False)
loss = loss.sum()
if len(self.loss_list) >= self.batch_size:
total_loss = sum(self.loss_list)
print('Batch Loss:', total_loss.cpu().item())
self.loss_log.append([self.iteration, total_loss.cpu()])
mean_loss = total_loss / len(self.loss_list)
mean_loss.backward()
self.loss_list = []
else:
self.loss_list.append(loss)
# loss.backward()
loss_value += loss.cpu().data.numpy()
loss_value = loss_value / 2
print('Training loss: %f' % (loss_value.sum()))
if len(self.loss_list) == 0:
self.optimizer.step()
self.lr_scheduler.step()
elif self.method == 'reinforcement':
# Compute labels
label = np.zeros((1, 320, 320))
action_area = np.zeros((224, 224))
action_area[best_pix_ind[1]][best_pix_ind[2]] = 1
tmp_label = np.zeros((224, 224))
tmp_label[action_area > 0] = label_value
label[0, 48:(320 - 48), 48:(320 - 48)] = tmp_label
# Compute label mask
label_weights = np.zeros(label.shape)
tmp_label_weights = np.zeros((224, 224))
tmp_label_weights[action_area > 0] = 1
label_weights[0, 48:(320 - 48), 48:(320 - 48)] = tmp_label_weights
# Compute loss and backward pass
if len(self.loss_list) == 0:
self.optimizer.zero_grad()
loss_value = 0
if primitive_action == 'grasp' and label_value > 0:
neg_loss = []
for i in range(self.model.num_rotations):
if i != best_pix_ind[0]:
neg_label = self.get_neg(depth_heightmap, label.copy(), i)
if neg_label[0, 48:(320 - 48), 48:(320 - 48)][best_pix_ind[1]][best_pix_ind[2]] == 0:
_, _ = self.forward(color_heightmap, depth_heightmap, is_volatile=False, specific_rotation=i, use_push=use_push)
loss = self.grasp_criterion(self.model.output_prob[0][1].view(1, 1, 320, 320), torch.from_numpy(neg_label).view(1, 1, 320, 320).float().cuda()) * Variable(
torch.from_numpy(label_weights).view(1, 1, 320, 320).float().cuda())
loss = loss.sum()
neg_loss.append(loss)
if len(neg_loss) > 0:
self.loss_list.append(sum(neg_loss) / len(neg_loss))
if primitive_action == 'push':
if label_value > 0:
label_weights *= 2 # to compromise the less push operations
# Do forward pass with specified rotation (to save gradients)
_, _ = self.forward(color_heightmap, depth_heightmap, is_volatile=False,
specific_rotation=best_pix_ind[0], use_push=use_push)
if self.use_cuda:
loss = self.push_criterion(
self.model.output_prob[0][0].view(1, 1, 320, 320), Variable(torch.from_numpy(label).view(
1, 1, 320, 320).float().cuda())) * Variable(torch.from_numpy(label_weights).view(
1, 1, 320, 320).float().cuda(), requires_grad=False)
else:
loss = self.push_criterion(self.model.output_prob[0][0].view(1, 1, 320, 320), Variable(
torch.from_numpy(label).float())) * Variable(torch.from_numpy(label_weights).float(), requires_grad=False)
loss = loss.sum()
if len(self.loss_list) >= self.batch_size:
total_loss = sum(self.loss_list)
print('Batch Loss:', total_loss.cpu().item())
self.loss_log.append([self.iteration, total_loss.cpu()])
mean_loss = total_loss / len(self.loss_list)
mean_loss.backward()
self.loss_list = []
else:
self.loss_list.append(loss)
# loss.backward()
loss_value = loss.cpu().data.numpy()
elif primitive_action == 'grasp':
if label_value > 0:
label_weights *= 2
# Do forward pass with specified rotation (to save gradients)
_, _ = self.forward(color_heightmap, depth_heightmap, is_volatile=False,
specific_rotation=best_pix_ind[0], use_push=use_push)
if self.use_cuda:
loss = self.grasp_criterion(
self.model.output_prob[0][1].view(1, 1, 320, 320), Variable(
torch.from_numpy(label).view(1, 1, 320, 320).float().cuda())) * Variable(
torch.from_numpy(label_weights).view(1, 1, 320, 320).float().cuda())
else:
loss = self.grasp_criterion(self.model.output_prob[0][1].view(1, 320, 320), Variable(
torch.from_numpy(label).float())) * Variable(torch.from_numpy(label_weights).float())
loss = loss.sum()
self.loss_list.append(loss)
# loss.backward()
loss_value = loss.cpu().data.numpy()
opposite_rotate_idx = (best_pix_ind[0] + self.model.num_rotations / 2) % self.model.num_rotations
_, _ = self.forward(color_heightmap, depth_heightmap, is_volatile=False,
specific_rotation=opposite_rotate_idx, use_push=use_push)
if self.use_cuda:
loss = self.grasp_criterion(
self.model.output_prob[0][1].view(1, 1, 320, 320), Variable(
torch.from_numpy(label).view(1, 1, 320, 320).float().cuda())) * Variable(
torch.from_numpy(label_weights).view(1, 1, 320, 320).float().cuda())
else:
loss = self.grasp_criterion(self.model.output_prob[0][1].view(1, 320, 320), Variable(
torch.from_numpy(label).float())) * Variable(torch.from_numpy(label_weights).float())
loss = loss.sum()
if len(self.loss_list) >= self.batch_size:
total_loss = sum(self.loss_list)
print('Batch Loss:', total_loss.cpu().item())
self.loss_log.append([self.iteration, total_loss.cpu()])
mean_loss = total_loss / len(self.loss_list)
mean_loss.backward()
self.loss_list = []
else:
self.loss_list.append(loss)
# loss.backward()
loss_value += loss.cpu().data.numpy()
loss_value = loss_value / 2
print('Training loss: %f' % (loss_value.sum()))
if len(self.loss_list) == 0:
self.optimizer.step()
self.lr_scheduler.step()
def get_prediction_vis(self, predictions, color_heightmap, best_pix_ind):
canvas = None
num_rotations = predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
tmp_row_canvas = None
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
prediction_vis = predictions[rotate_idx, :, :].copy()
# prediction_vis[prediction_vis < 0] = 0 # assume probability
# prediction_vis[prediction_vis > 1] = 1 # assume probability
prediction_vis = np.clip(prediction_vis, 0, 1)
prediction_vis.shape = (predictions.shape[1], predictions.shape[2])
prediction_vis = cv2.applyColorMap((prediction_vis * 255).astype(np.uint8), cv2.COLORMAP_JET)
if rotate_idx == best_pix_ind[0]:
prediction_vis = cv2.circle(
prediction_vis, (int(
best_pix_ind[2]), int(
best_pix_ind[1])), 7, (0, 0, 255), 2)
prediction_vis = ndimage.rotate(prediction_vis, rotate_idx *
(360.0 / num_rotations), reshape=False, order=0)
background_image = ndimage.rotate(color_heightmap, rotate_idx *
(360.0 / num_rotations), reshape=False, order=0)
prediction_vis = (0.5 * cv2.cvtColor(background_image, cv2.COLOR_RGB2BGR) + 0.5 * prediction_vis).astype(np.uint8)
if tmp_row_canvas is None:
tmp_row_canvas = prediction_vis
else:
tmp_row_canvas = np.concatenate((tmp_row_canvas, prediction_vis), axis=1)
if canvas is None:
canvas = tmp_row_canvas
else:
canvas = np.concatenate((canvas, tmp_row_canvas), axis=0)
return canvas
def push_heuristic(self, depth_heightmap):
num_rotations = 16
for rotate_idx in range(num_rotations):
rotated_heightmap = ndimage.rotate(depth_heightmap, rotate_idx *
(360.0 / num_rotations), reshape=False, order=0)
valid_areas = np.zeros(rotated_heightmap.shape)
valid_areas[ndimage.interpolation.shift(rotated_heightmap, [0, -25],
order=0) - rotated_heightmap > 0.02] = 1
# valid_areas = np.multiply(valid_areas, rotated_heightmap)
blur_kernel = np.ones((25, 25), np.float32) / 9
valid_areas = cv2.filter2D(valid_areas, -1, blur_kernel)
tmp_push_predictions = ndimage.rotate(
valid_areas, -rotate_idx * (360.0 / num_rotations), reshape=False, order=0)
tmp_push_predictions.shape = (1, rotated_heightmap.shape[0], rotated_heightmap.shape[1])
if rotate_idx == 0:
push_predictions = tmp_push_predictions
else:
push_predictions = np.concatenate((push_predictions, tmp_push_predictions), axis=0)
best_pix_ind = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
return best_pix_ind
def grasp_heuristic(self, depth_heightmap):
num_rotations = 16
for rotate_idx in range(num_rotations):
rotated_heightmap = ndimage.rotate(depth_heightmap, rotate_idx *
(360.0 / num_rotations), reshape=False, order=0)
valid_areas = np.zeros(rotated_heightmap.shape)
valid_areas[np.logical_and(rotated_heightmap -
ndimage.interpolation.shift(rotated_heightmap, [0, -
25], order=0) > 0.02, rotated_heightmap -
ndimage.interpolation.shift(rotated_heightmap, [0, 25], order=0) > 0.02)] = 1
# valid_areas = np.multiply(valid_areas, rotated_heightmap)
blur_kernel = np.ones((25, 25), np.float32) / 9
valid_areas = cv2.filter2D(valid_areas, -1, blur_kernel)
tmp_grasp_predictions = ndimage.rotate(
valid_areas, -rotate_idx * (360.0 / num_rotations), reshape=False, order=0)
tmp_grasp_predictions.shape = (1, rotated_heightmap.shape[0], rotated_heightmap.shape[1])
if rotate_idx == 0:
grasp_predictions = tmp_grasp_predictions
else:
grasp_predictions = np.concatenate((grasp_predictions, tmp_grasp_predictions), axis=0)
best_pix_ind = np.unravel_index( | np.argmax(grasp_predictions) | numpy.argmax |
import dash
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
import numpy as np
import requests as r
import plotly.graph_objects as go
import astropy.coordinates as coord
from astropy import units as u
import matplotlib.pyplot as plt
from whitenoise import WhiteNoise
def load_lc(tic):
url = "http://tessebs.villanova.edu/static/catalog/lcs_ascii/tic"+str(int(tic)).zfill(10)+".01.norm.lc"
lc = r.get(url)
lc_data = np.fromstring(lc.text, sep=' ')
lc_data = lc_data.reshape(int(len(lc_data)/4), 4)
return pd.DataFrame.from_dict({
'times': lc_data[:,0][::10],
'phases': lc_data[:,1][::10],
'fluxes': lc_data[:,2][::10],
'sigmas': lc_data[:,3][::10]
})
def isolate_params_twog(func, model_params):
params = {'C': ['C'],
'CE': ['C', 'Aell', 'phi0'],
'CG': ['C', 'mu1', 'd1', 'sigma1'],
'CGE': ['C', 'mu1', 'd1', 'sigma1', 'Aell', 'phi0'],
'CG12': ['C', 'mu1', 'd1', 'sigma1', 'mu2', 'd2', 'sigma2'],
'CG12E1': ['C', 'mu1', 'd1', 'sigma1', 'mu2', 'd2', 'sigma2', 'Aell'],
'CG12E2': ['C', 'mu1', 'd1', 'sigma1', 'mu2', 'd2', 'sigma2', 'Aell']
}
param_vals = np.zeros(len(params[func]))
for i,key in enumerate(params[func]):
param_vals[i] = model_params[key]
return param_vals
# TODO: make ligeor pip installable and a dependency. Add a static file with model properties
# compute 2g and pf model on the fly instead of loading it from file
def load_model(tic, model='2g', bins=100):
df_row = models[models['TIC']==tic]
if model == '2g':
from ligeor.models import TwoGaussianModel
func = df_row['func'].values[0]
twog_func = getattr(TwoGaussianModel, func.lower())
model_params = {}
for key in ['C', 'mu1', 'd1', 'sigma1', 'mu2', 'd2', 'sigma2', 'Aell', 'phi0']:
model_params[key] = df_row[key].values[0]
param_vals = isolate_params_twog(func, model_params)
phases = np.linspace(0,1,bins)
fluxes = twog_func(phases, *param_vals)
return phases, fluxes
elif model == 'pf':
from ligeor.models import Polyfit
phases = np.linspace(0,1,bins)
polyfit = Polyfit(phases=phases,
fluxes=np.ones_like(phases),
sigmas=0.1* | np.ones_like(phases) | numpy.ones_like |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import submission as sub
import helper
data = np.load('../data/some_corresp.npz')
noise_data = np.load('../data/some_corresp_noisy.npz')
im1 = plt.imread('../data/im1.png')
im2 = plt.imread('../data/im2.png')
N = data['pts1'].shape[0]
M = 640
pts1, pts2 = noise_data['pts1'], noise_data['pts2']
#bestF, inliers = sub.ransacF(noise_data['pts1'], noise_data['pts2'], M);
#np.save('inliers4.npy', inliers)
#print('Done!!')
inliers = np.load('best_inliers.npy').reshape([-1])
pts1, pts2 = pts1[inliers, :], pts2[inliers, :]
p = pts1.shape[0]
pts1_h = np.hstack((pts1, np.ones((p, 1))))
pts2_h = np.hstack((pts2, np.ones((p, 1))))
bestFs = sub.sevenpoint(pts1, pts2, M)
tol = 0.001
bestF_inlier_count = 0
for F in bestFs:
dst = np.diag(pts2_h @ F @ pts1_h.T)
inliers = | np.abs(dst) | numpy.abs |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Script that contains 3 functions. These are to be used if we want to proceed to merging of thin layers of the firn column.
I suggest we specify in json input if merge is true/false and the thickness threshold ('merge_min'):
"merging": true,
"merge_min": 5e-3
mergesurf(): for layer[0] and layer[1], to be used in time_evolve() of firn_density_nospin
mergenotsurf(): for layers[2:], to be used in time_evolve() of firn_density_nospin
mergeall(): for all layers, to be used at the end of firn_density_spin
CAUTION:
- not used for all variables (e.g. du_dx)
- nothing is done considering gas neither for isotopes
@author: verjans
'''
import numpy as np
from constants import *
def mergesurf(self,thickmin):
'''
This function is to call during time_evolve function of firn_density_nospin.
We merge the surface layer[0] with the layer[1] below as long as layer[1] remains under a certain thickness threshold.
By applying condition on layer[1] instead of layer[0], we avoid merging all newly accumulated layers in the case we use a RCM forcing on a short time scale.
Thickness threshold must be specified and consistent with the one of mergenotsurf().
'''
if ((self.dz[1] < thickmin) or (self.dz[0] < 1e-4)): #test
self.rho[1] = (self.rho[1]*self.dz[1]+self.rho[0]*self.dz[0]) / (self.dz[1]+self.dz[0])
self.Tz[1] = (self.Tz[1]*self.mass[1]+self.Tz[0]*self.mass[0]) / (self.mass[1]+self.mass[0])
self.r2[1] = (self.r2[1]*self.mass[1]+self.r2[0]*self.mass[0]) / (self.mass[1]+self.mass[0])
self.age[1] = self.age[1] # suggestion of Max 28Jun, important if we use bdot_mean
### Additive variables: take sum ###
self.LWC[1] = (self.LWC[0]+self.LWC[1])
self.PLWC_mem[1] = (self.PLWC_mem[0]+self.PLWC_mem[1])
self.dz[1] += self.dz[0] # add thickness to underlying layer
### Remove the thin surface layer ###
self.rho = np.delete(self.rho,0)
self.Tz = np.delete(self.Tz,0)
self.r2 = np.delete(self.r2,0)
self.age = np.delete(self.age,0)
self.dz = np.delete(self.dz,0)
self.LWC = np.delete(self.LWC,0)
self.PLWC_mem = np.delete(self.PLWC_mem,0)
## For Dcon, here we remove the layer that is merged but maybe we want to remove the layer that receives the merging (and keep most recent dcon)##
self.Dcon = np.delete(self.Dcon,0)
self.rho = np.append(self.rho, self.rho[-1])
self.Tz = np.append(self.Tz, self.Tz[-1])
self.r2 = np.append(self.r2, self.r2[-1])
self.age = np.append(self.age, self.age[-1])
self.dz = np.append(self.dz, self.dz[-1])
self.LWC = np.append(self.LWC, 0.)
self.PLWC_mem = np.append(self.PLWC_mem, 0.)
self.Dcon = np.append(self.Dcon, self.Dcon[-1])
### Adjustment of variables ###
self.z = self.dz.cumsum(axis=0)
self.z = np.delete(np.append(0,self.z),-1)
self.gridLen = | np.size(self.z) | numpy.size |
import warnings
import astropy.units as u
import numpy as np
import pytest
from numpy.testing import assert_allclose
from einsteinpy.metric import Schwarzschild, Kerr, KerrNewman
from einsteinpy.coordinates import CartesianConversion
from einsteinpy.coordinates.utils import four_position, stacked_vec
from einsteinpy.geodesic import Geodesic
from einsteinpy import constant
_c = constant.c.value
_G = constant.G.value
_Cc = constant.coulombs_const.value
def test_str_repr():
"""
Tests, if the ``__str__`` and ``__repr__`` messages match
"""
t = 0.
M = 1e25
x_vec = np.array([306., np.pi / 2, np.pi / 2])
v_vec = np.array([0., 0.01, 10.])
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 1.
step_size = 0.4e-6
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size
)
assert str(geod) == repr(geod)
@pytest.fixture()
def dummy_data():
M = 6e24
t = 0.
x_vec = np.array([130.0, np.pi / 2, -np.pi / 8])
v_vec = np.array([0.0, 0.0, 1900.0])
metric = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
metric_mat = metric.metric_covariant(x_4vec)
init_vec = stacked_vec(metric_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 0.002
step_size = 5e-8
return metric, init_vec, end_lambda, step_size
def test_Geodesics_has_trajectory(dummy_data):
metric, init_vec, end_lambda, step_size = dummy_data
geo = Geodesic(
metric=metric,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size
)
assert isinstance(geo.trajectory, np.ndarray)
@pytest.mark.parametrize(
"x_vec, v_vec, t, M, end_lambda, step_size",
[
(
np.array([306., np.pi / 2, np.pi / 2]),
np.array([0., 0., 951.]),
0.,
4e24,
0.002,
0.5e-6,
),
(
np.array([1e3, 0.15, np.pi / 2]),
np.array([0.1 * _c, 0.5e-5 * _c, 0.5e-4 * _c]),
0.,
5.972e24,
0.0001,
0.5e-6,
),
(
np.array([50e3, np.pi / 2, np.pi / 2]),
np.array([0.1 * _c, 2e-7 * _c, 1e-5]),
0.,
5.972e24,
0.001,
5e-6,
),
],
)
def test_calculate_trajectory_schwarzschild(
x_vec, v_vec, t, M, end_lambda, step_size
):
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size,
return_cartesian=False
)
ans = geod.trajectory
testarray = list()
for i in ans:
x = i[:4]
g = ms_cov.metric_covariant(x)
testarray.append(
g[0][0] * (i[4] ** 2) +
g[1][1] * (i[5] ** 2) +
g[2][2] * (i[6] ** 2) +
g[3][3] * (i[7] ** 2)
)
testarray = np.array(testarray, dtype=float)
assert_allclose(testarray, 1., 1e-4)
def test_calculate_trajectory2_schwarzschild():
# based on the revolution of earth around sun
# data from https://en.wikipedia.org/wiki/Earth%27s_orbit
t = 0.
M = 1.989e30
distance_at_perihelion = 147.10e9
speed_at_perihelion = 30290
angular_vel = (speed_at_perihelion / distance_at_perihelion)
x_vec = np.array([distance_at_perihelion, np.pi / 2, 0])
v_vec = np.array([0.0, 0.0, angular_vel])
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 3.154e7
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=end_lambda / 2e3,
return_cartesian=False
)
ans = geod.trajectory
# velocity should be 29.29 km/s at aphelion(where r is max)
i = np.argmax(ans[:, 1]) # index where radial distance is max
v_aphelion = (((ans[i][1] * ans[i][7]) * (u.m / u.s)).to(u.km / u.s)).value
assert_allclose(v_aphelion, 29.29, rtol=0.01)
def test_calculate_trajectory3_schwarzschild():
# same test as with test_calculate_trajectory2_schwarzschild(),
# but initialized with cartesian coordinates
# and function returning cartesian coordinates
t = 0.
M = 1.989e30
distance_at_perihelion = 147.10e9
speed_at_perihelion = 30290
x_sph = CartesianConversion(
distance_at_perihelion / np.sqrt(2),
distance_at_perihelion / np.sqrt(2),
0.,
-speed_at_perihelion / np.sqrt(2),
speed_at_perihelion / np.sqrt(2),
0.
).convert_spherical()
x_vec = x_sph[:3]
v_vec = x_sph[3:]
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 3.154e7
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=end_lambda / 2e3,
)
ans = geod.trajectory
# velocity should be 29.29 km/s at aphelion(where r is max)
R = np.sqrt(ans[:, 1] ** 2 + ans[:, 2] ** 2 + ans[:, 3] ** 2)
i = np.argmax(R) # index where radial distance is max
v_aphelion = (
(np.sqrt(ans[i, 5] ** 2 + ans[i, 6] ** 2 + ans[i, 7] ** 2) * (u.m / u.s)).to(
u.km / u.s
)
).value
assert_allclose(v_aphelion, 29.29, rtol=0.01)
@pytest.mark.parametrize(
"x_vec, v_vec, t, M, end_lambda, step_size, OdeMethodKwargs, return_cartesian",
[
(
np.array([306., np.pi / 2, np.pi / 2]),
np.array([0., 0.1, 951.]),
0.,
4e24,
0.0002,
0.3e-6,
{"stepsize": 0.3e-6},
True,
),
(
np.array([1e3, 0.15, np.pi / 2]),
np.array([_c, 0.5e-5 * _c, 1e-4 * _c]),
0.,
5.972e24,
0.0002,
0.5e-6,
{"stepsize": 0.5e-6},
False,
),
],
)
def test_calculate_trajectory_iterator_schwarzschild(
x_vec, v_vec, t, M, end_lambda, step_size, OdeMethodKwargs, return_cartesian
):
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size,
return_cartesian=return_cartesian
)
traj = geod.trajectory
traj_iter = geod.calculate_trajectory_iterator(OdeMethodKwargs=OdeMethodKwargs, return_cartesian=return_cartesian)
traj_iter_list = list()
for _, val in zip(range(50), traj_iter):
traj_iter_list.append(val[1])
traj_iter_arr = np.array(traj_iter_list)
assert_allclose(traj[:50, :], traj_iter_arr, rtol=1e-10)
def test_calculate_trajectory_iterator_RuntimeWarning_schwarzschild():
t = 0.
M = 1e25
x_vec = np.array([306., np.pi / 2, np.pi / 2])
v_vec = np.array([0., 0.01, 10.])
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 1.
stepsize = 0.4e-6
OdeMethodKwargs = {"stepsize": stepsize}
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=stepsize,
return_cartesian=False
)
with warnings.catch_warnings(record=True) as w:
it = geod.calculate_trajectory_iterator(
OdeMethodKwargs=OdeMethodKwargs,
)
for _, _ in zip(range(1000), it):
pass
assert len(w) >= 1
@pytest.mark.parametrize(
"x_vec, v_vec, t, M, a, end_lambda, step_size",
[
(
np.array([306., np.pi / 2.05, np.pi / 2]),
np.array([0., 0., 951.]),
0.,
4e24,
2e-3,
0.001,
0.5e-6,
),
(
np.array([1e3, 0.15, np.pi / 2]),
np.array([0.1 * _c, 0.5e-5 * _c, 0.5e-4 * _c]),
0.,
5.972e24,
2e-3,
0.0001,
0.5e-6,
),
(
np.array([50e3, np.pi / 2, np.pi / 2]),
| np.array([0.1 * _c, 2e-7 * _c, 1e-5]) | numpy.array |
import os
import sys
import json
import typing
import torch
import numpy as np
import albumentations
import nibabel as nib
from skimage import transform
from torch.utils.data import random_split
from torch.utils.data.dataset import Dataset
from warnings import simplefilter
from matplotlib import pyplot as plt
simplefilter(action='ignore', category=FutureWarning)
class GenACDC(Dataset):
def __init__(self,
data_dir: str, # path to ACDC nii data
slice_num: int,
data_mode: str,
resolution: float
) -> None:
super(GenACDC, self).__init__()
self.data_dir = data_dir
self.slice_num = slice_num
self.data_mode = data_mode
self.res = resolution
self.transform = albumentations.Compose([
albumentations.augmentations.Normalize(mean=0.5, std=0.5, max_pixel_value=1.0)]
)
if self.data_mode == 'labeled':
self.data = self._load_labeled_data()
else:
self.data = self._load_unlabeled_data(include_all=True)
def __getitem__(self,
index: int
) -> typing.Tuple[typing.Any, typing.Any]:
img, mask, label = self.data['images'][index], self.data['masks'][index], self.data['labels'][index]
augmented_img = self.transform(image=img.numpy().transpose(1, 2, 0))
img = torch.from_numpy(augmented_img['image'].transpose(2, 0, 1))
return img, mask, label
def __len__(self) -> int:
return len(self.data['images'])
def create_labeled_dataset(self,
path_to_dir: str,
slice_num: int
) -> None:
try:
os.mkdir(path_to_dir)
os.mkdir(path_to_dir + os.sep + 'images')
os.mkdir(path_to_dir + os.sep + 'masks')
os.mkdir(path_to_dir + os.sep + 'labels')
except OSError:
print ("Creation of directories in %s failed" % path_to_dir)
if slice_num == -1:
images = self.data['images']
masks = self.data['masks']
targets = []
for i in range(images.shape[0]):
for slice_num in range(images[i].shape[0]):
self._save_intensity_image(images[i][slice_num].squeeze(0).numpy(), path_to_dir, self.data['subject_idx'][i], self.data['frame_idx'][i], slice_num)
self._save_mask(masks[i][slice_num].squeeze(0).numpy(), path_to_dir, self.data['subject_idx'][i], self.data['frame_idx'][i], slice_num)
targets.append(self.data['labels'][i].item())
else:
images = self.data['images'][:, slice_num]
masks = self.data['masks'][:, slice_num]
for i in range(images.shape[0]):
self._save_intensity_image(images[i].squeeze(0).numpy(), path_to_dir, self.data['subject_idx'][i], self.data['frame_idx'][i], slice_num)
self._save_mask(masks[i].squeeze(0).numpy(), path_to_dir, self.data['subject_idx'][i], self.data['frame_idx'][i], slice_num)
targets = self.data['labels'].tolist()
with open(path_to_dir + '/labels/' + 'labels.json', 'w') as outfile:
outfile.write(
'[' +
',\n'.join(json.dumps(i) for i in targets) +
']\n'
)
def create_unlabeled_dataset(self,
path_to_dir: str,
slice_num: int
) -> None:
try:
os.mkdir(path_to_dir)
os.mkdir(path_to_dir + os.sep + 'images')
os.mkdir(path_to_dir + os.sep + 'labels')
except OSError:
print ("Creation of directories in %s failed" % path_to_dir)
if slice_num == -1:
images = self.data['images']
targets = []
for i in range(images.shape[0]):
for slice_num in range(images[i].shape[0]):
self._save_intensity_image(images[i][slice_num].squeeze(0).numpy(), path_to_dir, self.data['subject_idx'][i], self.data['frame_idx'][i], slice_num)
targets.append(self.data['labels'][i].item())
else:
images = self.data['images'][:, slice_num]
for i in range(images.shape[0]):
self._save_intensity_image(images[i].squeeze(0).numpy(), path_to_dir, self.data['subject_idx'][i], self.data['frame_idx'][i], slice_num)
targets = self.data['labels'].tolist()
with open(path_to_dir + '/labels/' + 'labels.json', 'w') as outfile:
outfile.write(
'[' +
',\n'.join(json.dumps(i) for i in targets) +
']\n'
)
def _load_labeled_data(self) -> typing.Dict[str, np.array]:
td = {}
images, masks, labels, subject_idx, frame_idx = self._load_raw_labeled_data()
td = {
"images": torch.from_numpy(np.float32(images)),
"masks": torch.from_numpy(np.float32(masks)),
"labels": torch.from_numpy(np.float32(labels)),
"subject_idx": torch.from_numpy(subject_idx),
"frame_idx": torch.from_numpy(frame_idx)
}
return td
def _load_raw_labeled_data(self) -> typing.List[np.array]:
images, masks_lv, masks_rv, masks_myo, labels = [], [], [], [], []
subject_idx, frame_idx = [], []
volumes = list(range(1, 151))
for patient_i in volumes:
patient = 'patient%03d' % patient_i
patient_folder = os.path.join(self.data_dir, patient)
if os.path.exists(patient_folder) == False:
continue
# retrieve pathology label from patient's Info.cfg file
cfg = [f for f in os.listdir(patient_folder) if 'cfg' in f and f.startswith('Info')]
label_file = open(os.path.join(patient_folder, cfg[0]), mode = 'r')
lines = label_file.readlines()
label_file.close()
label_char = ''
for line in lines:
line = line.split(' ')
if line[0] == 'Group:':
label_char = line[1]
if label_char == 'NOR\n':
label = 0
elif label_char == 'MINF\n':
label = 1
elif label_char == 'DCM\n':
label = 2
elif label_char == 'HCM\n':
label = 3
else: # RV
label = 4
gt = [f for f in os.listdir(patient_folder) if 'gt' in f and f.startswith(patient + '_frame')]
ims = [f.replace('_gt', '') for f in gt]
for i in range(len(ims)):
subject_idx.append(patient_i)
frame_idx.append(int(ims[i].split('.')[0].split('frame')[-1]))
im = self._process_raw_image(ims[i], patient_folder)
im = np.expand_dims(im, axis=-1)
m = self._resample_raw_image(gt[i], patient_folder, binary=True)
m = np.expand_dims(m, axis=-1)
images.append(im)
# convert 3-dim mask array to 3 binary mask arrays for lv, rv, myo
m_lv = m.copy()
m_lv[m != 3] = 0
m_lv[m == 3] = 1
masks_lv.append(m_lv)
m_rv = m.copy()
m_rv[m != 1] = 0
m_rv[m == 1] = 1
masks_rv.append(m_rv)
m_myo = m.copy()
m_myo[m != 2] = 0
m_myo[m == 2] = 1
masks_myo.append(m_myo)
labels.append(label)
# move slice axis to the first position
images = [np.moveaxis(im, 2, 0) for im in images]
masks_lv = [np.moveaxis(m, 2, 0) for m in masks_lv]
masks_rv = [np.moveaxis(m, 2, 0) for m in masks_rv]
masks_myo = [np.moveaxis(m, 2, 0) for m in masks_myo]
# normalize images
for i in range (len(images)):
images[i] = (images[i] / 757.4495) * 255.0
# crop images and masks to the same pixel dimensions and concatenate all data
images_cropped, masks_lv_cropped = self._crop_same(images, masks_lv, (224, 224))
_, masks_rv_cropped = self._crop_same(images, masks_rv, (224, 224))
_, masks_myo_cropped = self._crop_same(images, masks_myo, (224, 224))
# images_cropped = np.expand_dims(images_cropped[:], axis=0)
images_cropped = [np.expand_dims(image, axis=0) for image in images_cropped]
images_cropped = np.concatenate(images_cropped, axis=0)
masks_cropped = np.concatenate([masks_myo_cropped, masks_lv_cropped, masks_rv_cropped], axis=-1)
labels = np.array(labels)
subject_idx = np.array(subject_idx)
frame_idx = np.array(frame_idx)
return images_cropped.transpose(0,1,4,2,3), masks_cropped.transpose(0,1,4,2,3), labels, subject_idx, frame_idx
def _load_unlabeled_data(self,
include_all: bool=False
) -> typing.Dict[str, torch.Tensor]:
td = {}
images, labels, subject_idx, frame_idx = self._load_raw_unlabeled_data(include_all)
td = {
"images": torch.from_numpy(np.float32(images)),
"labels": torch.from_numpy(np.float32(labels)),
"subject_idx": torch.from_numpy(subject_idx),
"frame_idx": torch.from_numpy(frame_idx)
}
return td
def _load_raw_unlabeled_data(self,
include_all: bool
) -> np.array:
images, labels = [], []
subject_idx, frame_idx = [], []
volumes = list(range(1, 151))
more_than_10_cnt = 0
for patient_i in volumes:
patient = 'patient%03d' % patient_i
patient_folder = os.path.join(self.data_dir, patient)
if os.path.exists(patient_folder) == False:
continue
# retrieve pathology label from patient's Info.cfg file
cfg = [f for f in os.listdir(patient_folder) if 'cfg' in f and f.startswith('Info')]
label_file = open(os.path.join(patient_folder, cfg[0]), mode = 'r')
lines = label_file.readlines()
label_file.close()
label_char = ''
for line in lines:
line = line.split(' ')
if line[0] == 'Group:':
label_char = line[1]
if label_char == 'NOR\n':
label = 0
elif label_char == 'MINF\n':
label = 1
elif label_char == 'DCM\n':
label = 2
elif label_char == 'HCM\n':
label = 3
else: #RV
label = 4
im_name = patient + '_4d.nii.gz'
im = self._process_raw_image(im_name, patient_folder)
frames = range(im.shape[-1])
gt = [f for f in os.listdir(patient_folder) if 'gt' in f and not f.startswith('._')]
if len(gt) > 0:
gt_ims = [f.replace('_gt', '') for f in gt if not f.startswith('._')]
else:
gt_ims = [f for f in os.listdir(patient_folder) if 'frame' in f and not f.startswith('._')]
exclude_frames = [int(gt_im.split('.')[0].split('frame')[1]) for gt_im in gt_ims]
if include_all:
frames = [f for f in range(im.shape[-1]) if (f > exclude_frames[0] and f < exclude_frames[1]) or f == exclude_frames[0] or f == exclude_frames[1]]
else:
frames = [f for f in range(im.shape[-1]) if f not in exclude_frames and f > exclude_frames[0] and f < exclude_frames[1]]
for frame in frames:
subject_idx.append(patient_i)
frame_idx.append(frame)
im_res = im[:, :, :, frame]
if im_res.sum() == 0:
print('Skipping blank images')
continue
im_res = np.expand_dims(im_res, axis=-1)
images.append(im_res)
labels.append(label)
images = [np.moveaxis(im, 2, 0) for im in images]
# normalize images
for i in range (len(images)):
images[i] = np.round((images[i] / 757.4495) * 255.0)
zeros = [np.zeros(im.shape) for im in images]
images_cropped, _ = self._crop_same(images, zeros, (224, 224))
images_cropped = np.concatenate(np.expand_dims(images_cropped, axis=0), axis=0)#[..., 0]
labels = np.array(labels)
subject_idx = np.array(subject_idx)
frame_idx = np.array(frame_idx)
return images_cropped.transpose(0,1,4,2,3), labels, subject_idx, frame_idx
def _resample_raw_image(self, # Load raw data (image/mask) and resample to fixed resolution.
mask_fname: str, # filename of mask
patient_folder: str, # folder containing patient data
binary: bool=False # boolean to define binary masks or not
)-> np.array:
m_nii_fname = os.path.join(patient_folder, mask_fname)
new_res = (self.res, self.res)
im_nii = nib.load(m_nii_fname)
im_data = im_nii.get_data()
voxel_size = im_nii.header.get_zooms()
sform_matrix = im_nii.header.get_sform()
scale_vector = [voxel_size[i] / new_res[i] for i in range(len(new_res))]
order = 0 if binary else 1
result = []
dims = im_data.shape
if len(dims) < 4:
for i in range(im_data.shape[-1]):
if i > 5:
break
im = im_data[..., i]
rescaled = transform.rescale(im, scale_vector, order=order, preserve_range=True, mode='constant')
rotated = transform.rotate(rescaled, 270.0)
result.append(np.expand_dims(np.flip(rotated, axis=0), axis=-1))
else:
for i in range(im_data.shape[-1]):
inner_im_data = im_data[..., i]
all_slices = []
for j in range(inner_im_data.shape[-1]):
if j > 5:
break
im = inner_im_data[..., j]
rescaled = transform.rescale(im, scale_vector, order=order, preserve_range=True, mode='constant')
rotated = transform.rotate(rescaled, 270.0)
all_slices.append(np.expand_dims(rotated, axis=-1))
result.append(np.expand_dims(np.concatenate(all_slices, axis=-1), axis=-1))
return np.concatenate(result, axis=-1)
def _process_raw_image(self, # Normalise and crop extreme values of an image
im_fname: str, # filename of the image
patient_folder: str, # folder of patient data
value_crop: bool=True # True/False to crop values between 5/95 percentiles
) -> typing.List:
im = self._resample_raw_image(im_fname, patient_folder, binary=False)
# crop to 5-95%
if value_crop:
p5 = np.percentile(im.flatten(), 5)
p95 = np.percentile(im.flatten(), 95)
im = np.clip(im, p5, p95)
return im
def _crop_same(self,
image_list: list, # List of images. Each element should be 4-dimensional, (slice,height,width,channel)
mask_list: list, # List of masks. Each element should be 4-dimensional, (slice,height,width,channel)
size: tuple, # Dimensions to crop the images to.
mode: str='equal', # [equal, left, right]. Denotes where to crop pixels from. Defaults to middle.
pad_mode: str='edge', # ['edge', 'constant']. 'edge' pads using the values of the edge pixels, 'constant' pads with a constant value
image_only: bool=False
) -> typing.List[np.array]:
min_w = np.min([im.shape[1] for im in image_list]) if size[0] is None else size[0]
min_h = np.min([im.shape[2] for im in image_list]) if size[1] is None else size[1]
if image_only:
img_result = []
for i in range(len(image_list)):
im = image_list[i]
if im.shape[1] > min_w:
im = self._crop(im, 1, min_w, mode)
if im.shape[1] < min_w:
im = self._pad(im, 1, min_w, pad_mode)
if im.shape[2] > min_h:
im = self._crop(im, 2, min_h, mode)
if im.shape[2] < min_h:
im = self._pad(im, 2, min_h, pad_mode)
img_result.append(im)
return img_result
else:
img_result, msk_result = [], []
for i in range(len(mask_list)):
im = image_list[i]
m = mask_list[i]
if m.shape[1] > min_w:
m = self._crop(m, 1, min_w, mode)
if im.shape[1] > min_w:
im = self._crop(im, 1, min_w, mode)
if m.shape[1] < min_w:
m = self._pad(m, 1, min_w, pad_mode)
if im.shape[1] < min_w:
im = self._pad(im, 1, min_w, pad_mode)
if m.shape[2] > min_h:
m = self._crop(m, 2, min_h, mode)
if im.shape[2] > min_h:
im = self._crop(im, 2, min_h, mode)
if m.shape[2] < min_h:
m = self._pad(m, 2, min_h, pad_mode)
if im.shape[2] < min_h:
im = self._pad(im, 2, min_h, pad_mode)
img_result.append(im)
msk_result.append(m)
return img_result, msk_result
def _crop(self,
image: list,
dim: int,
nb_pixels: int,
mode: str
) -> typing.Union[None, list]:
diff = image.shape[dim] - nb_pixels
if mode == 'equal':
l = int( | np.ceil(diff / 2) | numpy.ceil |
from collections import defaultdict
from functools import reduce
import numpy as np
import pandas as pd
from nltk import word_tokenize
from fuzzywuzzy import fuzz
import hybrid_search_engine
from hybrid_search_engine.utils import text_processing as processing
from hybrid_search_engine.utils.exceptions import SearchEngineException
class SearchEngine():
def __init__(self, index, documents_df, columns, filtering_columns=[], config=None,
nlp_engine=None, syntax_threshold=0.9, semantic_threshold=0.8):
self.index = index
self.matrix = np.stack(index["token vector"])
self.syntax_threshold = syntax_threshold
self.semantic_threshold = semantic_threshold
self.document_ids = documents_df[documents_df.columns[0]]
self.document_idx_mapping = {id_: i for i, id_ in enumerate(self.document_ids)}
self.documents_norm = documents_df[[f"{c} Norm" for c in columns]]
self.document_tags = documents_df[filtering_columns]
self.default_columns = columns
self.filtering_columns = filtering_columns
self.doc2token_mapping = self.__create_doc2token_mapping()
self.lower = True
self.dynamic_idf_reweighting = False
self.use_TF = True
self.use_IDF = True
self.normalize_query = True
self.syntax_weight = 0.5
self.semantic_weight = 0.5
self.dynamic_idf_reweighting = False
default_weight = 1 / len(columns)
self.column_weights = {c: default_weight for c in columns}
if config is not None:
self.update_config(config)
if nlp_engine is None:
self.nlp_engine = hybrid_search_engine.nlp_engine
else:
self.nlp_engine = nlp_engine
def __create_doc2token_mapping(self):
doc2token_dictionary_mapping = defaultdict(list)
for column in self.default_columns:
document_ids = self.index[column].values
for i, doc_ids in enumerate(document_ids):
for doc_id in doc_ids:
doc2token_dictionary_mapping[doc_id].append(i)
for k in doc2token_dictionary_mapping.keys():
doc2token_dictionary_mapping[k] = list(sorted(set(doc2token_dictionary_mapping[k])))
doc2token_mapping = pd.DataFrame({
"document_id": [k for k in doc2token_dictionary_mapping.keys()],
"token_ids": [np.array(v) for k, v in doc2token_dictionary_mapping.items()]
})
doc2token_mapping["document_id"] = self.document_ids[doc2token_mapping["document_id"]]
doc2token_mapping.set_index(keys="document_id", inplace=True)
return doc2token_mapping
def __filter_token_by_doc_ids(self, doc_ids):
token_ids = self.doc2token_mapping.loc[doc_ids, "token_ids"].values
token_ids = np.concatenate(token_ids)
token_ids = np.unique(token_ids)
return np.sort(token_ids)
def update_config(self, config):
if "dynamic_idf_reweighting" in config:
self.dynamic_idf_reweighting = config["dynamic_idf_reweighting"]
else:
self.dynamic_idf_reweighting = False
if "use_TF" in config:
self.use_TF = config["use_TF"]
else:
self.use_TF = True
if "use_IDF" in config:
self.use_IDF = config["use_IDF"]
else:
self.use_IDF = True
if "normalize_query" in config:
self.normalize_query = config["normalize_query"]
else:
self.normalize_query = True
if "similarity_weight" in config and config["similarity_weight"] is not None:
for weight in ["syntax_weight", "semantic_weight"]:
if config["similarity_weight"][weight] < 0:
raise SearchEngineException(f"{weight} similarity must be greater than 0")
self.syntax_weight = config["similarity_weight"]["syntax_weight"]
self.semantic_weight = config["similarity_weight"]["semantic_weight"]
if "column_weights" in config and config["column_weights"] is not None:
for c, weight in config["column_weights"].items():
if weight < 0:
raise SearchEngineException(f"{c} weight must be greater than 0")
self.column_weights = config["column_weights"]
if "lower" in config:
self.lower = config["lower"]
def find(self, query, doc_ids=[], columns=[], filtering_options={}):
processed_query = processing.process_string(query, lower=self.lower)
query_tokens = word_tokenize(processed_query)
if len(query_tokens) == 0:
return f"Unable to process query. Query '{query}' has been reduced to empty string by text processing"
if len(columns) == 0:
columns = self.default_columns
if len(doc_ids) > 0:
token_ids = self.__filter_token_by_doc_ids(doc_ids)
else:
token_ids = self.index.index.values
v = [self.nlp_engine(t).vector for t in query_tokens]
v = np.array([c / np.linalg.norm(c) for c in v])
v = np.nan_to_num(v)
syntax_scores = self.index.loc[token_ids]["token"].apply(syntax_similarity, args=(processed_query,))
semantic_scores = np.matmul(self.matrix[token_ids], v.T)
semantic_scores = | np.max(semantic_scores, axis=1) | numpy.max |
import gym
from scipy.integrate import ode
import numpy as np
import json
from .models import dcmotor_model, converter_models, load_models
from ..dashboard import MotorDashboard
from ..utils import EulerSolver
class _DCMBaseEnv(gym.Env):
"""
**Description:**
An abstract environment for common functions of the DC motors
**Observation:**
Specified by the concrete motor. It is always a concatenation of the state variables, voltages, torque
and next reference values.
**Actions:**
Depending on the converter type the action space may be discrete or continuous
Type: Discrete(2 / 3 / 4)
Num Action: Depend on the converter
1Q Converter: (only positive voltage and positive current)
- 0: transistor block
- 1: positive DC-link voltage applied
2Q Converter: (only positive voltage and both current directions)
- 0: both transistors blocking
- 1: positive DC-link voltage applied
- 2: 0V applied
4Q Converter (both voltage and current directions)
- 0: short circuit with upper transistors, 0V applied
- 1: positive DC-link voltage
- 2: negative DC-link voltage
- 3: short circuit with lower transistors, 0V applied
Type: Box()
Defines the duty cycle for the transistors.\n
[0, 1]: 1Q and 2Q\n
[-1, 1]: 4Q
For an externally excited motor it is a two dimensional box from [-1, 1] or [0, 1]
**Reward:**
The reward is the cumulative squared error (se) or the cumulative absolute error (ae) between the
current value and the current reference of the state variables. Both are also available in a shifted
form with an added on such that the reward is positive. More details are given below.
The variables are normalised by their maximal values and weighted by the reward_weights.
**Starting State:**
All observations are assigned a random value.
**Episode Termination**:
An episode terminates, when all the steps in the reference have been simulated
or a limit has been violated.
**Attributes:**
+----------------------------+----------------------------------------------------------+
| **Name** | **Description** |
+============================+==========================================================+
| **state_vars** | Names of all the quantities that can be observed |
+----------------------------+----------------------------------------------------------+
| **state_var_positions** | Inverse dict of the state vars. Mapping of key to index. |
+----------------------------+----------------------------------------------------------+
| **limits** | Maximum allowed values of the state variables |
+----------------------------+----------------------------------------------------------+
| **reward_weights** | Ratio of the weight of the state variable for the reward |
+----------------------------+----------------------------------------------------------+
| **on_dashboard** | Flag indicating if the state var is shown on dashboard |
+----------------------------+----------------------------------------------------------+
| **noise_levels** | Percentage of the noise power to the signal power |
+----------------------------+----------------------------------------------------------+
| **zero_refs** | State variables that get a fixed zero reference |
+----------------------------+----------------------------------------------------------+
"""
OMEGA_IDX = 0
MOTOR_IDX = None
# region Properties
@property
def tau(self):
"""
Returns:
the step size of the environment Default: 1e-5 for discrete / 1e-4 for continuous action space
"""
return self._tau
@property
def episode_length(self):
"""
Returns:
The length of the current episode
"""
return self._episode_length
@episode_length.setter
def episode_length(self, episode_length):
"""
Set the length of the episode in the environment. Must be larger than the prediction horizon.
"""
self._episode_length = max(self._prediction_horizon + 1, episode_length)
@property
def k(self):
"""
Returns:
The current step in the running episode
"""
return self._k
@property
def limit_observer(self):
return self._limit_observer
@property
def safety_margin(self):
return self._safety_margin
@property
def prediction_horizon(self):
return self._prediction_horizon
@property
def motor_parameter(self):
"""
Returns:
motor parameter with calculated limits
"""
params = self.motor_model.motor_parameter
params['safety_margin'] = self.safety_margin
params['episode_length'] = self._episode_length
params['prediction_horizon'] = self._prediction_horizon
params['tau'] = self._tau
params['limits'] = self._limits.tolist()
return params
@property
def _reward(self):
return self._reward_function
# endregion
def __init__(self, motor_type, state_vars, zero_refs, converter_type, tau, episode_length=10000, load_parameter=None,
motor_parameter=None, reward_weight=(('omega', 1.0),), on_dashboard=('omega',), integrator='euler',
nsteps=1, prediction_horizon=0, interlocking_time=0.0, noise_levels=0.0, reward_fct='swsae',
limit_observer='off', safety_margin=1.3, gamma=0.9, dead_time=True):
"""
Basic setting of all the common motor parameters.
Args:
motor_type: Can be 'dc-series', 'dc-shunt', 'dc-extex' or 'dc-permex'. Set by the child classes.
state_vars: State variables of the DC motor. Set by the child classes.
zero_refs: State variables that get zero references. (E.g. to punish high control power)
motor_parameter: A dict of motor parameters that differ from the default ones. \n
For details look into the dc_motor model.
load_parameter: A dict of load parameters that differ from the default ones. \n
For details look into the load model.
converter_type: The specific converter type.'{disc/cont}-{1Q/2Q/4Q}'. For details look into converter
tau: The step size or sampling time of the environment.
episode_length: The episode length of the environment
reward_weight: Iterable of key/value pairs that specifies how the rewards in the environment
are weighted.
E.g. ::
(('omega', 0.9),('u', 0.1))
on_dashboard: Iterable that specifies the variables on the dashboard.
E.g.::
['omega','u']
integrator: Select which integrator to choose from 'euler', 'dopri5'
nsteps: Maximum allowed number of steps for the integrator.
prediction_horizon: The length of future reference points that are shown to the agents
interlocking_time: interlocking time of the converter
noise_levels: Noise levels of the state variables in percentage of the signal power.
reward_fct: Select the reward function between: (Each one normalised to [0,1] or [-1,0]) \n
'swae': Absolute Error between references and state variables [-1,0] \n
'swse': Squared Error between references and state variables [-1,0]\n
'swsae': Shifted absolute error / 1 + swae [0,1] \n
'swsse': Shifted squared error / 1 + swse [0,1] \n
limit_observer: Select the limit observing function. \n
'off': No limits are observed. Episode goes on. \n
'no_punish': Limits are observed, no punishment term for violation. This function should be used with
shifted reward functions. \n
'const_punish': Limits are observed. Punishment in the form of -1 / (1-gamma) to punish the agent with
the maximum negative reward for the further steps. This function should be used with non shifted reward
functions.
safety_margin: Ratio between maximal and nominal power of the motor parameters.
gamma: Parameter for the punishment of a limit violation. Should equal agents gamma parameter.
"""
self._gamma = gamma
self._safety_margin = safety_margin
self._reward_function, self.reward_range = self._reward_functions(reward_fct)
self._limit_observer = self._limit_observers(limit_observer)
self._tau = tau
self._episode_length = episode_length
self.state_vars = np.array(state_vars)
#: dict(int): Inverse state vars. Dictionary to map state names to positions in the state arrays
self._state_var_positions = {}
for ind, val in enumerate(state_vars):
self._state_var_positions[val] = ind
self._prediction_horizon = max(0, prediction_horizon)
self._zero_refs = zero_refs
#: array(bool): True, if the state variable on the index is a zero_reference. For fast access
self._zero_ref_flags = np.isin(self.state_vars, self._zero_refs)
self.load_model = load_models.Load(load_parameter)
self.motor_model = dcmotor_model.make(motor_type, self.load_model.load, motor_parameter)
self.converter_model = converter_models.Converter.make(converter_type, self._tau, interlocking_time, dead_time)
self._k = 0
self._dashboard = None
self._state = np.zeros(len(state_vars))
self._reference = np.zeros((len(self.state_vars), episode_length + prediction_horizon))
self._reward_weights = np.zeros(len(self._state))
self.reference_vars = np.zeros_like(self.state_vars, dtype=bool)
self._on_dashboard = np.ones_like(self.state_vars, dtype=bool)
if on_dashboard[0] == 'True':
self._on_dashboard *= True
elif on_dashboard[0] == 'False':
self._on_dashboard *= False
else:
self._on_dashboard *= False
for key in on_dashboard:
self._on_dashboard[self._state_var_positions[key]] = True
for key, val in reward_weight:
self._reward_weights[self._state_var_positions[key]] = val
for i in range(len(state_vars)):
if self._reward_weights[i] > 0 and self.state_vars[i] not in self._zero_refs:
self.reference_vars[i] = True
integrators = ['euler', 'dopri5']
assert integrator in integrators, f'Integrator was {integrator}, but has to be in {integrators}'
if integrator == 'euler':
self.system = EulerSolver(self._system_eq, nsteps)
else:
self.system = ode(self._system_eq, self._system_jac).set_integrator(integrator, nsteps=nsteps)
self.integrate = self.system.integrate
self.action_space = self.converter_model.action_space
self._limits = np.zeros(len(self.state_vars))
self._set_limits()
self._set_observation_space()
self._noise_levels = np.zeros(len(state_vars))
if type(noise_levels) is tuple:
for state_var, noise_level in noise_levels:
self._noise_levels[self._state_var_positions[state_var]] = noise_level
else:
self._noise_levels = np.ones(len(self.state_vars)) * noise_levels
self._noise = None
self._resetDashboard = True
def seed(self, seed=None):
"""
Seed the random generators in the environment
Args:
seed: The value to seed the random number generator with
"""
np.random.seed(seed)
def _set_observation_space(self):
"""
Child classes need to write their concrete observation space into self.observation_space here
"""
raise NotImplementedError
def _set_limits(self):
"""
Child classes need to write their concrete limits of the state variables into self._limits here
"""
raise NotImplementedError
def _step_integrate(self, action):
"""
The integration is done for one time period. The converter considers the dead time and interlocking time.
Args:
action: switching state of the converter that should be applied
"""
raise NotImplementedError
def step(self, action):
"""
Clips the action to its limits and performs one step of the environment.
Args:
action: The action from the action space that will be performed on the motor
Returns:
Tuple(array(float), float, bool, dict):
**observation:** The observation from the environment \n
**reward:** The reward for the taken action \n
**bool:** Flag if the episode has ended \n
**info:** An always empty dictionary \n
"""
last_state = np.array(self._state, copy=True)
self._step_integrate(action)
rew = self._reward(self._state/self._limits, self._reference[:, self._k].T)
done, punish = self.limit_observer(self._state)
observation_references = self._reference[self.reference_vars, self._k:self._k + self._prediction_horizon + 1]
# normalize the observation
observation = np.concatenate((
self._state/self._limits + self._noise[:, self._k], observation_references.flatten()
))
self._k += 1
if done == 0: # Check if period is finished
done = self._k == self._episode_length
else:
rew = punish
return observation, rew, done, {}
def _set_initial_value(self):
"""
call self.system.set_initial_value(initial_state, 0.0) to reset the state to initial.
"""
self.system.set_initial_value(self._state[self.MOTOR_IDX], 0.0)
def reset(self):
"""
Resets the environment.
All state variables will be set to a random value in [-nominal value, nominal value].
New references will be generated.
Returns:
The initial observation for the episode
"""
self._k = 0
# Set new state
self._set_initial_state()
# New References
self._generate_references()
# Reset Integrator
self._set_initial_value()
# Reset Dashboard Flag
self._resetDashboard = True
# Generate new gaussian noise for the state variables
self._noise = (
np.sqrt(self._noise_levels/6) / self._safety_margin
* np.random.randn(self._episode_length+1, len(self.state_vars))
).T
# Calculate initial observation
observation_references = self._reference[self.reference_vars, self._k:self._k + self._prediction_horizon+1]
observation = np.concatenate((self._state/self._limits, observation_references.flatten()))
return observation
def render(self, mode='human'):
"""
Call this function once a cycle to update the visualization with the current values.
"""
if not self._on_dashboard.any():
return
if self._dashboard is None:
# First Call: No dashboard was initialised before
self._dashboard = MotorDashboard(self.state_vars[self._on_dashboard], self._tau,
self.observation_space.low[:len(self.state_vars)][self._on_dashboard]
* self._limits[self._on_dashboard],
self.observation_space.high[:len(self.state_vars)][self._on_dashboard]
* self._limits[self._on_dashboard],
self._episode_length,
self._safety_margin,
self._reward_weights[self._on_dashboard] > 0)
if self._resetDashboard:
self._resetDashboard = False
self._dashboard.reset((self._reference[self._on_dashboard].T * self._limits[self._on_dashboard]).T)
self._dashboard.step(self._state[self._on_dashboard], self._k) # Update the plot in the dashboard
def close(self):
"""
When the environment is closed the dashboard will also be closed.
This function does not need to be called explicitly.
"""
if self._dashboard is not None:
self._dashboard.close()
def _system_eq(self, t, state, u_in, noise):
"""
The differential equation of the whole system consisting of the converter, load and motor.
This function is called by the integrator.
Args:
t: Current time of the system
state: The current state as a numpy array.
u_in: Applied input voltage
Returns:
The solution of the system. The first derivatives of all the state variables of the system.
"""
t_load = self.load_model.load(state[self.OMEGA_IDX])
return self.motor_model.model(state, t_load, u_in + noise)
def _system_jac(self, t, state):
"""
The Jacobian matrix of the systems equation.
Args:
t: Current time of the system.
state: Current state
Returns:
The solution of the Jacobian matrix for the current state
"""
load_jac = self.load_model.jac(state)
return self.motor_model.jac(state, load_jac)
# region Reference Generation
def _reference_sin(self, bandwidth=20):
"""
Set sinus references for the state variables with a random amplitude, offset and phase shift
Args:
bandwidth: bandwidth of the system
"""
x = np.arange(0, (self._episode_length + self._prediction_horizon))
if self.observation_space.low[0] == 0.0:
amplitude = np.random.rand() / 2
offset = np.random.rand() * (1 - 2*amplitude) + amplitude
else:
amplitude = np.random.rand()
offset = (2 * np.random.rand() - 1) * (1 - amplitude)
t_min, t_max = self._set_time_interval_reference('sin', bandwidth) # specify range for period time
t_s = np.random.rand() * (t_max - t_min) + t_min
phase_shift = 2 * np.pi * np.random.rand()
self._reference = amplitude * | np.sin(2 * np.pi / t_s * x * self.tau + phase_shift) | numpy.sin |
import numpy as np
from scipy import ndimage, optimize
import pdb
import matplotlib.pyplot as plt
import cv2
import matplotlib.patches as patches
import multiprocessing
import datetime
import json
####################################################
def findMaxRect(data):
'''http://stackoverflow.com/a/30418912/5008845'''
nrows, ncols = data.shape
w = np.zeros(dtype=int, shape=data.shape)
h = np.zeros(dtype=int, shape=data.shape)
skip = 1
area_max = (0, [])
for r in range(nrows):
for c in range(ncols):
if data[r][c] == skip:
continue
if r == 0:
h[r][c] = 1
else:
h[r][c] = h[r - 1][c] + 1
if c == 0:
w[r][c] = 1
else:
w[r][c] = w[r][c - 1] + 1
minw = w[r][c]
for dh in range(h[r][c]):
minw = min(minw, w[r - dh][c])
area = (dh + 1) * minw
if area > area_max[0]:
area_max = (area, [(r - dh, c - minw + 1, r, c)])
return area_max
########################################################################
def residual(angle, data):
nx, ny = data.shape
M = cv2.getRotationMatrix2D(((nx - 1) / 2, (ny - 1) / 2), angle, 1)
RotData = cv2.warpAffine(data, M, (nx, ny), flags=cv2.INTER_NEAREST, borderValue=1)
rectangle = findMaxRect(RotData)
return 1. / rectangle[0]
########################################################################
def residual_star(args):
return residual(*args)
########################################################################
def get_rectangle_coord(angle, data, flag_out=None):
nx, ny = data.shape
M = cv2.getRotationMatrix2D(((nx - 1) / 2, (ny - 1) / 2), angle, 1)
RotData = cv2.warpAffine(data, M, (nx, ny), flags=cv2.INTER_NEAREST, borderValue=1)
rectangle = findMaxRect(RotData)
if flag_out:
return rectangle[1][0], M, RotData
else:
return rectangle[1][0], M
########################################################################
def findRotMaxRect(data_in, flag_opt=False, flag_parallel=False, nbre_angle=10, flag_out=None, flag_enlarge_img=False,
limit_image_size=300):
'''
flag_opt : True only nbre_angle are tested between 90 and 180
and a opt descent algo is run on the best fit
False 100 angle are tested from 90 to 180.
flag_parallel: only valid when flag_opt=False. the 100 angle are run on multithreading
flag_out : angle and rectangle of the rotated image are output together with the rectangle of the original image
flag_enlarge_img : the image used in the function is double of the size of the original to ensure all feature stay in when rotated
limit_image_size : control the size numbre of pixel of the image use in the function.
this speeds up the code but can give approximated results if the shape is not simple
'''
# time_s = datetime.datetime.now()
# make the image square
# ----------------
nx_in, ny_in = data_in.shape
if nx_in != ny_in:
n = max([nx_in, ny_in])
data_square = | np.ones([n, n]) | numpy.ones |
import json
import os
import time
from copy import deepcopy
import TransportMaps.Distributions as dist
import TransportMaps.Likelihoods as like
from typing import List, Dict
from matplotlib import pyplot as plt
from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, \
LikelihoodFactor, BinaryFactorMixture, KWayFactor
from sampler.NestedSampling import GlobalNestedSampler
from sampler.SimulationBasedSampler import SimulationBasedSampler
from slam.Variables import Variable, VariableType
from slam.FactorGraph import FactorGraph
from slam.BayesTree import BayesTree, BayesTreeNode
import numpy as np
from sampler.sampler_utils import JointFactor
from utils.Functions import sort_pair_lists
from utils.Visualization import plot_2d_samples
from utils.Functions import sample_dict_to_array, array_order_to_dict
class SolverArgs:
def __init__(self,
elimination_method: str = "natural",
posterior_sample_num: int = 500,
local_sample_num: int = 500,
store_clique_samples: bool = False,
local_sampling_method="direct",
adaptive_posterior_sampling=None,
*args, **kwargs
):
# graph-related and tree-related params
self.elimination_method = elimination_method
self.posterior_sample_num = posterior_sample_num
self.store_clique_samples = store_clique_samples
self.local_sampling_method = local_sampling_method
self.local_sample_num = local_sample_num
self.adaptive_posterior_sampling = adaptive_posterior_sampling
def jsonStr(self):
return json.dumps(self.__dict__)
class CliqueSeparatorFactor(ImplicitPriorFactor):
def sample(self, num_samples: int, **kwargs):
return NotImplementedError("implementation depends on density models")
class ConditionalSampler:
def conditional_sample_given_observation(self, conditional_dim,
obs_samples=None,
sample_number=None):
"""
This method returns samples with the dimension of conditional_dim.
If sample_number is given, samples of the first conditional_dim variables are return.
If obs_samples is given, samples of the first conditional_dim variables after
the dimension of obs_samples will be returned. obs_samples.shape = (sample num, dim)
Note that the dims here are of the vectorized point on manifolds not the dim of manifold.
"""
raise NotImplementedError("Implementation depends on density estimation method.")
class FactorGraphSolver:
"""
This is the abstract class of factor graph solvers.
It mainly works as:
1. the interface for users to define and solve factor graphs.
2. the maintainer of factor graphs and Bayes tree for incremental inference
3. fitting probabilistic models to the working part of factor graph and Bayes tree
4. inference (sampling) on the entire Bayes tree
The derived class may reply on different probabilistic modeling approaches.
"""
def __init__(self, args: SolverArgs):
"""
Parameters
----------
elimination_method : string
option of heuristics for variable elimination ordering.
TODO: this can be a dynamic parameter when updating Bayes tree
"""
self._args = args
self._physical_graph = FactorGraph()
self._working_graph = FactorGraph()
self._physical_bayes_tree = None
self._working_bayes_tree = None
self._conditional_couplings = {} # map from Bayes tree clique to flows
self._implicit_factors = {} # map from Bayes tree clique to factor
self._samples = {} # map from variable to samples
self._new_nodes = []
self._new_factors = []
self._clique_samples = {} # map from Bayes tree clique to samples
self._clique_true_obs = {} # map from Bayes tree clique to observations which augments flow models
self._clique_density_model = {} # map from Bayes tree clique to flow model
# map from Bayes tree clique to variable pattern; (Separator,Frontal) in reverse elimination order
self._clique_variable_pattern = {}
self._elimination_ordering = []
self._reverse_ordering_map = {}
self._temp_training_loss = {}
def set_args(self, args: SolverArgs):
raise NotImplementedError("Implementation depends on probabilistic modeling approaches.")
@property
def elimination_method(self) -> str:
return self._args.elimination_method
@property
def elimination_ordering(self) -> List[Variable]:
return self._elimination_ordering
@property
def physical_vars(self) -> List[Variable]:
return self._physical_graph.vars
@property
def new_vars(self) -> List[Variable]:
return self._new_nodes
@property
def working_vars(self) -> List[Variable]:
return self._working_graph.vars
@property
def physical_factors(self) -> List[Factor]:
return self._physical_graph.factors
@property
def new_factors(self) -> List[Factor]:
return self._new_factors
@property
def working_factors(self) -> List[Factor]:
return self._working_graph.factors
@property
def working_factor_graph(self) -> FactorGraph:
return self._working_graph
@property
def physical_factor_graph(self) -> FactorGraph:
return self._physical_graph
@property
def working_bayes_tree(self) -> BayesTree:
return self._working_bayes_tree
@property
def physical_bayes_tree(self) -> BayesTree:
return self._physical_bayes_tree
def generate_natural_ordering(self) -> None:
"""
Generate the ordering by which nodes are added
"""
self._elimination_ordering = self._physical_graph.vars + self._new_nodes
def generate_pose_first_ordering(self) -> None:
"""
Generate the ordering by which nodes are added and lmk eliminated later
"""
natural_order = self._physical_graph.vars + self._new_nodes
pose_list = []
lmk_list = []
for node in natural_order:
if node._type == VariableType.Landmark:
lmk_list.append(node)
else:
pose_list.append(node)
self._elimination_ordering = pose_list + lmk_list
def generate_ccolamd_ordering(self) -> None:
"""
"""
physical_graph_ordering = [var for var in self._elimination_ordering if var not in self._working_graph.vars]
working_graph_ordering = self._working_graph.analyze_elimination_ordering(
method="ccolamd", last_vars=
[[var for var in self._working_graph.vars if
var.type == VariableType.Pose][-1]])
self._elimination_ordering = physical_graph_ordering + working_graph_ordering
def generate_ordering(self) -> None:
"""
Generate the ordering by which Bayes tree should be generated
"""
if self._args.elimination_method == "natural":
self.generate_natural_ordering()
elif self._args.elimination_method == "ccolamd":
self.generate_ccolamd_ordering()
elif self._args.elimination_method == "pose_first":
self.generate_pose_first_ordering()
self._reverse_ordering_map = {
var: index for index, var in
enumerate(self._elimination_ordering[::-1])}
# TODO: Add other ordering methods
def add_node(self, var: Variable = None, name: str = None,
dim: int = None) -> "FactorGraphSolver":
"""
Add a new node
The node has not been added to the physical or current factor graphs
:param var:
:param name: used only when variable is not specified
:param dim: used only when variable is not specified
:return: the current problem
"""
if var:
self._new_nodes.append(var)
else:
self._new_nodes.append(Variable(name, dim))
return self
def add_factor(self, factor: Factor) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param factor
:return: the current problem
"""
self._new_factors.append(factor)
return self
def add_prior_factor(self, vars: List[Variable],
distribution: dist.Distribution) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param distribution
:return: the current problem
"""
self._new_factors.append(ExplicitPriorFactor(
vars=vars, distribution=distribution))
return self
def add_likelihood_factor(self, vars: List[Variable],
likelihood: like.LikelihoodBase) -> "FactorGraphSolver":
"""
Add a likelihood factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param likelihood
:return: the current problem
"""
self._new_factors.append(LikelihoodFactor(
vars=vars, log_likelihood=likelihood))
return self
def update_physical_and_working_graphs(self, timer: List[float] = None, device: str = "cpu"
) -> "FactorGraphSolver":
"""
Add all new nodes and factors into the physical factor graph,
retrieve the working factor graph, update Bayes trees
:return: the current problem
"""
start = time.time()
# Determine the affected variables in the physical Bayes tree
old_nodes = set(self.physical_vars)
nodes_of_new_factors = set.union(*[set(factor.vars) for
factor in self._new_factors])
old_nodes_of_new_factors = set.intersection(old_nodes,
nodes_of_new_factors)
# Get the working factor graph
if self._physical_bayes_tree: # if not first step, get sub graph
affected_nodes, sub_bayes_trees = \
self._physical_bayes_tree. \
get_affected_vars_and_partial_bayes_trees(
vars=old_nodes_of_new_factors)
self._working_graph = self._physical_graph.get_sub_factor_graph_with_prior(
variables=affected_nodes,
sub_trees=sub_bayes_trees,
clique_prior_dict=self._implicit_factors)
else:
sub_bayes_trees = set()
for node in self._new_nodes:
self._working_graph.add_node(node)
for factor in self._new_factors:
self._working_graph.add_factor(factor)
# Get the working Bayes treeget_sub_factor_graph
old_ordering = self._elimination_ordering
self.generate_ordering()
self._working_bayes_tree = self._working_graph.get_bayes_tree(
ordering=[var for var in self._elimination_ordering
if var in set(self.working_vars)])
# Update the physical factor graph
for node in self._new_nodes:
self._physical_graph.add_node(node)
for factor in self._new_factors:
self._physical_graph.add_factor(factor)
# Update the physical Bayesian tree
self._physical_bayes_tree = self._working_bayes_tree.__copy__()
self._physical_bayes_tree.append_child_bayes_trees(sub_bayes_trees)
# Delete legacy conditional samplers in the old tree and
# convert the density model w/o separator at leaves to density model w/ separator.
cliques_to_delete = set()
for old_clique in set(self._clique_density_model.keys()).difference(self._physical_bayes_tree.clique_nodes):
for new_clique in self._working_bayes_tree.clique_nodes:
if old_clique.vars == new_clique.vars and [var for var in old_ordering if var in old_clique.vars] == \
[var for var in self._elimination_ordering if var in new_clique.vars]:
# This clique was the root in the old tree but is leaf in the new tree.
# If the ordering of variables remains the same, its density model can be re-used.
# Update the clique to density model dict
self._clique_true_obs[new_clique] = self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
self._clique_variable_pattern[new_clique] = self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
self._clique_samples[new_clique] = self._clique_samples[old_clique]
self._clique_density_model[new_clique] = \
self.root_clique_density_model_to_leaf(old_clique, new_clique, device)
# since new clique will be skipped, related factors shall be eliminated beforehand.
# TODO: update _clique_density_model.keys() in which some clique parents change
# TODO: this currently has no impact on results
# TODO: if we store all models or clique-depend values on cliques, this issue will disappear
new_separator_factor = None
if new_clique.separator:
# extract new factor over separator
separator_var_list = sorted(new_clique.separator, key=lambda x: self._reverse_ordering_map[x])
new_separator_factor = \
self.clique_density_to_separator_factor(separator_var_list,
self._clique_density_model[new_clique],
self._clique_true_obs[old_clique])
self._implicit_factors[new_clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=new_clique,
new_factor=new_separator_factor)
break
cliques_to_delete.add(old_clique)
for old_clique in cliques_to_delete:
del self._clique_density_model[old_clique]
del self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
del self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
del self._clique_samples[old_clique]
# Clear all newly added variables and factors
self._new_nodes = []
self._new_factors = []
end = time.time()
if timer is not None:
timer.append(end - start)
return self
def root_clique_density_model_to_leaf(self,
old_clique: BayesTreeNode,
new_clique: BayesTreeNode,
device) -> "ConditionalSampler":
"""
when old clique and new clique have same variables but different division of frontal and separator vars,
recycle the density model in the old clique and convert it to that in the new clique.
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def clique_density_to_separator_factor(self,
separator_var_list: List[Variable],
density_model,
true_obs: np.ndarray) -> CliqueSeparatorFactor:
"""
extract marginal of separator variables from clique density as separator factor
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def incremental_inference(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs
):
self.fit_tree_density_models(timer=timer,
clique_dim_timer=clique_dim_timer,
*args, **kwargs)
if self._args.adaptive_posterior_sampling is None:
self._samples = self.sample_posterior(timer=timer, *args, **kwargs)
else:
self._samples = self.adaptive_posterior(timer=timer, *args, **kwargs)
return self._samples
def fit_clique_density_model(self,
clique,
samples,
var_ordering,
timer,
*args, **kwargs) -> "ConditionalSampler":
raise NotImplementedError("Implementation depends on probabilistic modeling.")
def adaptive_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
raise NotADirectoryError("implementation depends on density models.")
def fit_tree_density_models(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs):
"""
By the order of Bayes tree, perform local sampling and training
on all cliques
:return:
"""
self._temp_training_loss = {}
clique_ordering = self._working_bayes_tree.clique_ordering()
total_clique_num = len(clique_ordering)
clique_cnt = 1
before_clique_time = time.time()
while clique_ordering:
start_clique_time = time.time()
clique = clique_ordering.pop()
if clique in self._clique_density_model:
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec")
clique_cnt += 1
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
continue
# local sampling
sampler_start = time.time()
local_samples, sample_var_ordering, true_obs = \
self.clique_training_sampler(clique,
num_samples=self._args.local_sample_num,
method=self._args.local_sampling_method)
sampler_end = time.time()
if timer is not None:
timer.append(sampler_end - sampler_start)
self._clique_true_obs[clique] = true_obs
if self._args.store_clique_samples:
self._clique_samples[clique] = local_samples
local_density_model = \
self.fit_clique_density_model(clique=clique,
samples=local_samples,
var_ordering=sample_var_ordering,
timer=timer)
self._clique_density_model[clique] = local_density_model
new_separator_factor = None
if clique.separator:
# extract new factor over separator
separator_list = sorted(clique.separator,
key=lambda x:
self._reverse_ordering_map[x])
new_separator_factor = self.clique_density_to_separator_factor(separator_list,
local_density_model,
true_obs)
self._implicit_factors[clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=clique,
new_factor=new_separator_factor)
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec" + ", clique_dim is " + str(clique.dim))
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
clique_cnt += 1
def clique_training_sampler(self, clique: BayesTreeNode, num_samples: int, method: str):
r""" This function returns training samples, simulated variables, and unused observations
"""
graph = self._working_graph.get_clique_factor_graph(clique)
variable_pattern = \
self._working_bayes_tree.clique_variable_pattern(clique)
if method == "direct":
sampler = SimulationBasedSampler(factors=graph.factors, vars=variable_pattern)
samples, var_list, unused_obs = sampler.sample(num_samples)
elif method == "nested" or method == "dynamic nested":
ns_sampler = GlobalNestedSampler(nodes=variable_pattern, factors=graph.factors)
samples = ns_sampler.sample(live_points=num_samples, sampling_method=method)
var_list = variable_pattern
unused_obs = np.array([])
else:
raise ValueError("Unknown sampling method.")
return samples, var_list, unused_obs
def sample_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
num_samples = self._args.posterior_sample_num
start = time.time()
stack = [self._physical_bayes_tree.root]
samples = {}
while stack:
# Retrieve the working clique
clique = stack.pop()
# Local sampling
frontal_list = sorted(clique.frontal,
key=lambda x: self._reverse_ordering_map[x])
separator_list = sorted(clique.separator,
key=lambda x: self._reverse_ordering_map[x])
clique_density_model = self._clique_density_model[clique]
obs = self._clique_true_obs[clique]
aug_separator_samples = np.zeros(shape=(num_samples, 0))
if len(obs) != 0:
aug_separator_samples = np.tile(obs, (num_samples, 1))
for var in separator_list:
aug_separator_samples = np.hstack((aug_separator_samples,
samples[var]))
if aug_separator_samples.shape[1] != 0:
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
obs_samples=aug_separator_samples)
else: # the root clique
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
sample_number=num_samples)
# Dispatch samples
cur_index = 0
for var in frontal_list:
samples[var] = frontal_samples[:,
cur_index: cur_index + var.dim]
cur_index += var.dim
if clique.children:
for child in clique.children:
stack.append(child)
end = time.time()
if timer is not None:
timer.append(end - start)
return samples
def plot2d_posterior(self, title: str = None, xlim=None, ylim=None,
marker_size: float = 1, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
for i in range(len_var):
cur_sample = self._samples[vars[i]]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], marker=".",
s=marker_size)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def results(self):
return list(self._samples.values()), list(self._samples.keys())
def plot2d_mean_points(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot2d_mean_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None, show_plot=False, **kwargs):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = self._samples[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
if show_plot:
plt.show()
return fig_handle
def plot2d_MAP_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None):
# xlim and ylim are tuples
vars = self._elimination_ordering
jf = JointFactor(self.physical_factors, vars)
# list(self._samples.keys())
all_sample = sample_dict_to_array(self._samples, vars)
log_pdf = jf.log_pdf(all_sample)
max_idx = np.argmax(log_pdf)
map_sample = all_sample[max_idx:max_idx+1]
map_sample_dict = array_order_to_dict(map_sample, vars)
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = map_sample_dict[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = map_sample_dict[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
plt.show()
return fig_handle
def plot2d_mean_poses(self, title: str = None, xlim=None, ylim=None,
width: float = 0.05, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
# th_mean = circmean(cur_sample[:,2])
# dx, dy = np.cos(th_mean), np.sin(th_mean)
# plt.arrow(x-dx/2, y-dy/2, dx, dy,
# head_width=4*width,
# width=0.05)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot_factor_graph(self):
pass
def plot_bayes_tree(self):
pass
def run_incrementally(case_dir: str, solver: FactorGraphSolver, nodes_factors_by_step, truth=None, traj_plot=False,
plot_args=None, check_root_transform=False) -> None:
run_count = 1
while os.path.exists(f"{case_dir}/run{run_count}"):
run_count += 1
os.mkdir(f"{case_dir}/run{run_count}")
run_dir = f"{case_dir}/run{run_count}"
print("create run dir: " + run_dir)
file = open(f"{run_dir}/parameters", "w+")
params = solver._args.jsonStr()
print(params)
file.write(params)
file.close()
num_batches = len(nodes_factors_by_step)
observed_nodes = []
step_timer = []
step_list = []
posterior_sampling_timer = []
fitting_timer = []
mixture_factor2weights = {}
show_plot = True
if "show_plot" in plot_args and not plot_args["show_plot"]:
show_plot = False
for i in range(num_batches):
step_nodes, step_factors = nodes_factors_by_step[i]
for node in step_nodes:
solver.add_node(node)
for factor in step_factors:
solver.add_factor(factor)
if isinstance(factor, BinaryFactorMixture):
mixture_factor2weights[factor] = []
observed_nodes += step_nodes
step_list.append(i)
step_file_prefix = f"{run_dir}/step{i}"
detailed_timer = []
clique_dim_timer = []
start = time.time()
solver.update_physical_and_working_graphs(timer=detailed_timer)
cur_sample = solver.incremental_inference(timer=detailed_timer, clique_dim_timer=clique_dim_timer)
end = time.time()
step_timer.append(end - start)
print(f"step {i}/{num_batches} time: {step_timer[-1]} sec, "
f"total time: {sum(step_timer)}")
file = open(f"{step_file_prefix}_ordering", "w+")
file.write(" ".join([var.name for var in solver.elimination_ordering]))
file.close()
file = open(f"{step_file_prefix}_split_timing", "w+")
file.write(" ".join([str(t) for t in detailed_timer]))
file.close()
file = open(f"{step_file_prefix}_step_training_loss", "w+")
last_training_loss = json.dumps(solver._temp_training_loss)
file.write(last_training_loss)
file.close()
posterior_sampling_timer.append(detailed_timer[-1])
fitting_timer.append(sum(detailed_timer[1:-1]))
X = np.hstack([cur_sample[var] for var in solver.elimination_ordering])
np.savetxt(fname=step_file_prefix, X=X)
# check transformation
if check_root_transform:
root_clique = solver.physical_bayes_tree.root
root_clique_model = solver._clique_density_model[root_clique]
y = root_clique_model.prior.sample((3000,))
tx = deepcopy(y)
if hasattr(root_clique_model, "flows"):
for f in root_clique_model.flows[::-1]:
tx = f.inverse_given_separator(tx, None)
y = y.detach().numpy()
tx = tx.detach().numpy()
np.savetxt(fname=step_file_prefix + '_root_normal_data', X=y)
np.savetxt(fname=step_file_prefix + '_root_transformed', X=tx)
plt.figure()
x_sort, tx_sort = sort_pair_lists(tx[:,0], y[:,0])
plt.plot(x_sort, tx_sort)
plt.ylabel("T(x)")
plt.xlabel("x")
plt.savefig(f"{step_file_prefix}_transform.png", bbox_inches="tight")
if show_plot: plt.show()
plt.close()
# clique dim and timing
np.savetxt(fname=step_file_prefix + '_dim_time', X=np.array(clique_dim_timer))
if traj_plot:
plot_2d_samples(samples_mapping=cur_sample,
equal_axis=True,
truth={variable: pose for variable, pose in
truth.items() if variable in solver.physical_vars},
truth_factors={factor for factor in solver.physical_factors if
set(factor.vars).issubset(solver.physical_vars)},
title=f'Step {i}',
plot_all_meas=False,
plot_meas_give_pose=[var for var in step_nodes if var.type == VariableType.Pose],
rbt_traj_no_samples=True,
truth_R2=True,
truth_SE2=False,
truth_odometry_color='k',
truth_landmark_markersize=10,
truth_landmark_marker='x',
file_name=f"{step_file_prefix}.png",
**plot_args)
else:
plot_2d_samples(samples_mapping=cur_sample,
equal_axis=True,
truth={variable: pose for variable, pose in
truth.items() if variable in solver.physical_vars},
truth_factors={factor for factor in solver.physical_factors if
set(factor.vars).issubset(solver.physical_vars)},
file_name=f"{step_file_prefix}.png", title=f'Step {i}',
**plot_args)
solver.plot2d_mean_rbt_only(title=f"step {i} posterior", if_legend=False, fname=f"{step_file_prefix}.png", **plot_args)
# solver.plot2d_MAP_rbt_only(title=f"step {i} posterior", if_legend=False, fname=f"{step_file_prefix}.png")
file = open(f"{run_dir}/step_timing", "w+")
file.write(" ".join(str(t) for t in step_timer))
file.close()
file = open(f"{run_dir}/step_list", "w+")
file.write(" ".join(str(s) for s in step_list))
file.close()
file = open(f"{run_dir}/posterior_sampling_timer", "w+")
file.write(" ".join(str(t) for t in posterior_sampling_timer))
file.close()
file = open(f"{run_dir}/fitting_timer", "w+")
file.write(" ".join(str(t) for t in fitting_timer))
file.close()
plt.figure()
plt.plot(np.array(step_list)*5+5, step_timer, 'go-', label='Total')
plt.plot(np.array(step_list)*5+5, posterior_sampling_timer, 'ro-', label='Posterior sampling')
plt.plot(np.array(step_list)*5+5, fitting_timer, 'bd-', label='Learning NF')
plt.ylabel(f"Time (sec)")
plt.xlabel(f"Key poses")
plt.legend()
plt.savefig(f"{run_dir}/step_timing.png", bbox_inches="tight")
if show_plot: plt.show()
plt.close()
if mixture_factor2weights:
# write updated hypothesis weights
hypo_file = open(run_dir + f'/step{i}.hypoweights', 'w+')
plt.figure()
for factor, weights in mixture_factor2weights.items():
hypo_weights = factor.posterior_weights(cur_sample)
line = ' '.join([var.name for var in factor.vars]) + ' : ' + ','.join(
[str(w) for w in hypo_weights])
hypo_file.writelines(line + '\n')
weights.append(hypo_weights)
for i_w in range(len(hypo_weights)):
plt.plot(np.arange(i + 1 - len(weights), i + 1), | np.array(weights) | numpy.array |
import holter_monitor_errors as hme
import holter_monitor_constants as hmc
import numpy as np
import lvm_read as lr
import os.path
from biosppy.signals import ecg
import matplotlib.pyplot as plt
from input_reader import file_path
import array
import sys
import filter_functions as ff
def get_signal_data(fs, window, filename):
""" reads ecg data from an LabView (.lvm) file and ensures proper window length
:param fs: sampling frequency of data
:param window: interval for average processing (seconds)
:return: ecg data array
"""
extension = os.path.splitext(filename)[1]
if extension == ".lvm":
data = read_lvm(filename, "data_2/")['data']
print("Length:", len(data))
seconds = len(data) / fs
if window > seconds:
raise IndexError("Window longer than length of data")
return data
def get_distances(r_peaks, fs):
""" calculates RR Intervals based on R-peak locations
:param r_peaks: data point locations of R-peaks
:param fs: sampling frequency of data
:return: array of RR Interval lengths
"""
distances = [None] * (len(r_peaks) - 1)
r_peak_times = []
for i in range(1, len(r_peaks)):
distances[i - 1] = r_peaks[i] - r_peaks[i - 1]
temp = r_peaks[i] / (fs)
r_peak_times.append(temp)
return distances, r_peak_times
def get_indexes(r_peak_times, window):
""" computes zero-based indexes of windows for RR-Interval averages
:param r_peak_times: data point locations of R-peaks, in seconds
:param window: desired window width, in seconds
:return: array of indexes
"""
indexes = []
multiplier = 1
for i in range(0, len(r_peak_times)):
if r_peak_times[i] >= multiplier*window:
indexes.append(i)
multiplier += 1
return indexes
def get_averages(distances, indexes):
""" calculates RR Interval averages for a specific window of time
:param distances: array of RR-Interval widths
:param indexes: zero-based indexes defining the windows of data
:return: array of RR Interval averages
"""
averages = []
averages.append(np.mean(remove_outliers(distances[0:indexes[0]])))
for i in range(1, len(indexes)):
removed_outliers = remove_outliers(distances[indexes[i - 1]:indexes[i]])
average = np.mean(removed_outliers)
averages.append(average)
averages.append(np.mean(distances[indexes[len(indexes) - 1]:]))
return averages
def get_mode(signal):
""" calculates the mode of the amplitude of the original ECG signal
:param signal: the original ECG signal
:return: most-occuring y-value in the ECG signal
"""
signal = np.array(signal)
hist = | np.histogram(signal) | numpy.histogram |
from math import ceil
import pytest
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_equal
from numpy.testing import assert_raises
import keras
# TODO: remove the 3 lines below once the Keras release
# is configured to use keras_preprocessing
import keras_preprocessing
keras_preprocessing.set_keras_submodules(
backend=keras.backend, utils=keras.utils)
from keras_preprocessing import sequence
def test_pad_sequences():
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = sequence.pad_sequences(a, maxlen=3, padding='pre')
assert_allclose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = sequence.pad_sequences(a, maxlen=3, padding='post')
assert_allclose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = sequence.pad_sequences(a, maxlen=2, truncating='pre')
assert_allclose(b, [[0, 1], [1, 2], [2, 3]])
b = sequence.pad_sequences(a, maxlen=2, truncating='post')
assert_allclose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = sequence.pad_sequences(a, maxlen=3, value=1)
assert_allclose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_str():
a = [['1'], ['1', '2'], ['1', '2', '3']]
# test padding
b = sequence.pad_sequences(a, maxlen=3, padding='pre', value='pad', dtype=object)
assert_equal(b, [['pad', 'pad', '1'], ['pad', '1', '2'], ['1', '2', '3']])
b = sequence.pad_sequences(a, maxlen=3, padding='post', value='pad', dtype='<U3')
assert_equal(b, [['1', 'pad', 'pad'], ['1', '2', 'pad'], ['1', '2', '3']])
# test truncating
b = sequence.pad_sequences(a, maxlen=2, truncating='pre', value='pad',
dtype=object)
assert_equal(b, [['pad', '1'], ['1', '2'], ['2', '3']])
b = sequence.pad_sequences(a, maxlen=2, truncating='post', value='pad',
dtype='<U3')
assert_equal(b, [['pad', '1'], ['1', '2'], ['1', '2']])
with pytest.raises(ValueError, match="`dtype` int32 is not compatible with "):
sequence.pad_sequences(a, maxlen=2, truncating='post', value='pad')
def test_pad_sequences_vector():
a = [[[1, 1]],
[[2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]]
# test padding
b = sequence.pad_sequences(a, maxlen=3, padding='pre')
assert_allclose(b, [[[0, 0], [0, 0], [1, 1]],
[[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
b = sequence.pad_sequences(a, maxlen=3, padding='post')
assert_allclose(b, [[[1, 1], [0, 0], [0, 0]],
[[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]]])
# test truncating
b = sequence.pad_sequences(a, maxlen=2, truncating='pre')
assert_allclose(b, [[[0, 0], [1, 1]],
[[2, 1], [2, 2]],
[[3, 2], [3, 3]]])
b = sequence.pad_sequences(a, maxlen=2, truncating='post')
assert_allclose(b, [[[0, 0], [1, 1]],
[[2, 1], [2, 2]],
[[3, 1], [3, 2]]])
# test value
b = sequence.pad_sequences(a, maxlen=3, value=1)
assert_allclose(b, [[[1, 1], [1, 1], [1, 1]],
[[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
def test_make_sampling_table():
a = sequence.make_sampling_table(3)
assert_allclose(a, np.asarray([0.00315225, 0.00315225, 0.00547597]),
rtol=.1)
def test_skipgrams():
# test with no window size and binary labels
couples, labels = sequence.skipgrams(np.arange(3), vocabulary_size=3)
for couple in couples:
assert couple[0] in [0, 1, 2] and couple[1] in [0, 1, 2]
# test window size and categorical labels
couples, labels = sequence.skipgrams(np.arange(5),
vocabulary_size=5,
window_size=1,
categorical=True)
for couple in couples:
assert couple[0] - couple[1] <= 3
for l in labels:
assert len(l) == 2
def test_remove_long_seq():
maxlen = 5
seq = [
[1, 2, 3],
[1, 2, 3, 4, 5, 6],
]
label = ['a', 'b']
new_seq, new_label = sequence._remove_long_seq(maxlen, seq, label)
assert new_seq == [[1, 2, 3]]
assert new_label == ['a']
def test_TimeseriesGenerator_serde():
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = sequence.TimeseriesGenerator(data, targets,
length=10,
sampling_rate=2,
batch_size=2)
json_gen = data_gen.to_json()
recovered_gen = sequence.timeseries_generator_from_json(json_gen)
assert data_gen.batch_size == recovered_gen.batch_size
assert data_gen.end_index == recovered_gen.end_index
assert data_gen.length == recovered_gen.length
assert data_gen.reverse == recovered_gen.reverse
assert data_gen.sampling_rate == recovered_gen.sampling_rate
assert data_gen.shuffle == recovered_gen.shuffle
assert data_gen.start_index == data_gen.start_index
assert data_gen.stride == data_gen.stride
assert (data_gen.data == recovered_gen.data).all()
assert (data_gen.targets == recovered_gen.targets).all()
def test_TimeseriesGenerator():
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = sequence.TimeseriesGenerator(data, targets,
length=10,
sampling_rate=2,
batch_size=2)
assert len(data_gen) == 20
assert (np.allclose(data_gen[0][0],
np.array([[[0], [2], [4], [6], [8]],
[[1], [3], [5], [7], [9]]])))
assert (np.allclose(data_gen[0][1],
np.array([[10], [11]])))
assert (np.allclose(data_gen[1][0],
np.array([[[2], [4], [6], [8], [10]],
[[3], [5], [7], [9], [11]]])))
assert (np.allclose(data_gen[1][1],
np.array([[12], [13]])))
data_gen = sequence.TimeseriesGenerator(data, targets,
length=10,
sampling_rate=2,
reverse=True,
batch_size=2)
assert len(data_gen) == 20
assert (np.allclose(data_gen[0][0],
np.array([[[8], [6], [4], [2], [0]],
[[9], [7], [5], [3], [1]]])))
assert (np.allclose(data_gen[0][1],
np.array([[10], [11]])))
data_gen = sequence.TimeseriesGenerator(data, targets,
length=10,
sampling_rate=2,
shuffle=True,
batch_size=1)
batch = data_gen[0]
r = batch[1][0][0]
assert (np.allclose(batch[0],
np.array([[[r - 10],
[r - 8],
[r - 6],
[r - 4],
[r - 2]]])))
assert (np.allclose(batch[1], np.array([[r], ])))
data_gen = sequence.TimeseriesGenerator(data, targets,
length=10,
sampling_rate=2,
stride=2,
batch_size=2)
assert len(data_gen) == 10
assert (np.allclose(data_gen[1][0],
np.array([[[4], [6], [8], [10], [12]],
[[6], [8], [10], [12], [14]]])))
assert (np.allclose(data_gen[1][1],
np.array([[14], [16]])))
data_gen = sequence.TimeseriesGenerator(data, targets,
length=10,
sampling_rate=2,
start_index=10,
end_index=30,
batch_size=2)
assert len(data_gen) == 6
assert (np.allclose(data_gen[0][0],
np.array([[[10], [12], [14], [16], [18]],
[[11], [13], [15], [17], [19]]])))
assert (np.allclose(data_gen[0][1],
np.array([[20], [21]])))
data = np.array([np.random.random_sample((1, 2, 3, 4)) for i in range(50)])
targets = np.array([np.random.random_sample((3, 2, 1)) for i in range(50)])
data_gen = sequence.TimeseriesGenerator(data, targets,
length=10,
sampling_rate=2,
start_index=10,
end_index=30,
batch_size=2)
assert len(data_gen) == 6
assert np.allclose(data_gen[0][0], np.array(
[ | np.array(data[10:19:2]) | numpy.array |
# - <NAME> <<EMAIL>>
"""Miscellaneous Utility functions."""
from glob import glob
import numpy as np
from scipy.signal import correlate as corr
from skimage.io import imread as skimread
from skimage.transform import resize as imresize
def imread(fname, factor=100):
"""Read possibly scaled version of image"""
img = skimread(fname)
if factor < 100:
img = imresize(img, [int(img.shape[0]*factor/100),
int(img.shape[1]*factor/100)], order=3)
img = (img*255).astype(np.uint8)
return img
def getimglist(sdir):
"""Get list of images."""
jpgs = sorted(glob(sdir+'/*.jpg'))
jpegs = sorted(glob(sdir+'/*.jpeg'))
pngs = sorted(glob(sdir+'/*.png'))
if len(jpgs) >= len(jpegs) and len(jpgs) >= len(pngs):
return jpgs
if len(jpegs) >= len(pngs):
return jpegs
return pngs
def visualize(img, mask):
"""Produce a visualization of the segmentation."""
out = np.float32(img)/255
msk = CMAP[mask % CMAP.shape[0], :]
msk[mask == 0, :] = 0.
out = out*0.5 + msk*0.5
return (out*255).astype(np.uint8)
def crop_align(img, imgc):
"""Find crop in img aligned to imgc."""
if np.amax(img) < | np.amax(imgc) | numpy.amax |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import csv
import string
"""Load Amazon review data, remove stopwords and punctuation, tokenize sentences
and return text, title and stars of each review
Arguments:
file_path(string): path of the csv file to load
title_index(int): index of column with titles
review_index(int): index of columns with reviews
star_index(int): index of column with number of stars
limit_rows: maximum number of rows to load
Return:
titles: list of tokenize titles of Amazon reviews
reviews: list of tokenize full text of Amazon reviews
stars: list of number of stars of Amazon reviews"""
def load_amazon_data(file_path, title_index, review_index, star_index, limit_rows=None):
reviews = []
titles = []
stars = []
stopwords_list = stopwords.words('english')
counter = 1
with open(file_path, 'r', encoding="utf8") as csvfile:
datastore = csv.reader(csvfile, delimiter=',')
next(datastore) # skip header
for row in datastore:
review_tokens = word_tokenize(row[review_index]) # tokenize sentence
review_filtered = [w for w in review_tokens if w not in stopwords_list and w not in string.punctuation]
reviews.append(review_filtered)
title_tokens = word_tokenize(row[title_index]) # tokenize title
title_filtered = [w for w in title_tokens if w not in stopwords_list and w not in string.punctuation]
titles.append(title_filtered)
stars.append(row[star_index])
if limit_rows is not None and counter >= limit_rows: # lazy evaluation
break
counter += 1
return titles, reviews, stars
'''
@author DTrimarchi10 https://github.com/DTrimarchi10/confusion_matrix
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
percent: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
other_labels: String with other labels to add below the chart. Default is Empty string.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
def make_confusion_matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
other_labels="",
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for _ in range(cf.size)]
if group_names and len(group_names) == cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels, group_counts, group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0], cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
# Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float( | np.sum(cf) | numpy.sum |
"""Functions to calculate trajectory features from input trajectory data
This module provides functions to calculate trajectory features based off the
ImageJ plugin TrajClassifer by <NAME>. See details at
https://imagej.net/TraJClassifier.
"""
import math
import struct
import pandas as pd
import numpy as np
import numpy.linalg as LA
import numpy.ma as ma
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import diff_classifier.msd as msd
def unmask_track(track):
"""Removes empty frames from inpute trajectory datset.
Parameters
----------
track : pandas.core.frame.DataFrame
At a minimum, must contain a Frame, Track_ID, X, Y, MSDs, and
Gauss column.
Returns
-------
comp_track : pandas.core.frame.DataFrame
Similar to track, but has all masked components removed.
"""
xpos = ma.masked_invalid(track['X'])
msds = ma.masked_invalid(track['MSDs'])
x_mask = ma.getmask(xpos)
msd_mask = ma.getmask(msds)
comp_frame = ma.compressed(ma.masked_where(msd_mask, track['Frame']))
compid = ma.compressed(ma.masked_where(msd_mask, track['Track_ID']))
comp_x = ma.compressed(ma.masked_where(x_mask, track['X']))
comp_y = ma.compressed(ma.masked_where(x_mask, track['Y']))
comp_msd = ma.compressed(ma.masked_where(msd_mask, track['MSDs']))
comp_gauss = ma.compressed(ma.masked_where(msd_mask, track['Gauss']))
comp_qual = ma.compressed(ma.masked_where(x_mask, track['Quality']))
comp_snr = ma.compressed(ma.masked_where(x_mask, track['SN_Ratio']))
comp_meani = ma.compressed(ma.masked_where(x_mask,
track['Mean_Intensity']))
data1 = {'Frame': comp_frame,
'Track_ID': compid,
'X': comp_x,
'Y': comp_y,
'MSDs': comp_msd,
'Gauss': comp_gauss,
'Quality': comp_qual,
'SN_Ratio': comp_snr,
'Mean_Intensity': comp_meani
}
comp_track = pd.DataFrame(data=data1)
return comp_track
def alpha_calc(track):
"""Calculates alpha, the exponential fit parameter for MSD data
Parameters
----------
track : pandas.core.frame.DataFrame
At a minimum, must contain a Frames and a MSDs column. The function
msd_calc can be used to generate the correctly formatted pd dataframe.
Returns
-------
alph : numpy.float64
The anomalous exponent derived by fitting MSD values to the function,
<rad**2(n)> = 4*dcoef*(n*delt)**alph
dcoef : numpy.float64
The fitted diffusion coefficient derived by fitting MSD values to the
function above.
Examples
--------
>>> frames = 5
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.linspace(1, frames, frames)+5,
... 'Y': np.linspace(1, frames, frames)+3}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> alpha_calc(dframe)
(2.0000000000000004, 0.4999999999999999)
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.sin(np.linspace(1, frames, frames)+3),
... 'Y': np.cos(np.linspace(1, frames, frames)+3)}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> alpha_calc(dframe)
(0.023690002018364065, 0.5144436515510022)
"""
ypos = track['MSDs']
xpos = track['Frame']
def msd_alpha(xpos, alph, dcoef):
return 4*dcoef*(xpos**alph)
try:
popt, pcov = curve_fit(msd_alpha, xpos, ypos)
alph = popt[0]
dcoef = popt[1]
except RuntimeError:
print('Optimal parameters not found. Print NaN instead.')
alph = np.nan
dcoef = np.nan
return alph, dcoef
def gyration_tensor(track):
"""Calculates the eigenvalues and eigenvectors of the gyration tensor of the
input trajectory.
Parameters
----------
track : pandas DataFrame
At a minimum, must contain an X and Y column. The function
msd_calc can be used to generate the correctly formatted pd dataframe.
Returns
-------
eig1 : numpy.float64
Dominant eigenvalue of the gyration tensor.
eig2 : numpy.float64
Secondary eigenvalue of the gyration tensor.
eigv1 : numpy.ndarray
Dominant eigenvector of the gyration tensor.
eigv2 : numpy.ndarray
Secondary eigenvector of the gyration tensor.
Examples
--------
>>> frames = 5
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.linspace(1, frames, frames)+5,
... 'Y': np.linspace(1, frames, frames)+3}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> gyration_tensor(dframe)
(4.0,
4.4408920985006262e-16,
array([ 0.70710678, -0.70710678]),
array([ 0.70710678, 0.70710678]))
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.sin(np.linspace(1, frames, frames)+3),
... 'Y': np.cos(np.linspace(1, frames, frames)+3)}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> gyration_tensor(dframe)
(0.53232560128104522,
0.42766829138901619,
array([ 0.6020119 , -0.79848711]),
array([-0.79848711, -0.6020119 ]))
"""
dframe = track
assert isinstance(dframe, pd.core.frame.DataFrame), "track must be a pandas\
dataframe."
assert isinstance(dframe['X'], pd.core.series.Series), "track must contain\
X column."
assert isinstance(dframe['Y'], pd.core.series.Series), "track must contain\
Y column."
assert dframe.shape[0] > 0, "track must not be empty."
matrixa = np.sum((dframe['X'] - np.mean(
dframe['X']))**2)/dframe['X'].shape[0]
matrixb = np.sum((dframe['Y'] - np.mean(
dframe['Y']))**2)/dframe['Y'].shape[0]
matrixab = np.sum((dframe['X'] - np.mean(
dframe['X']))*(dframe['Y'] - np.mean(
dframe['Y'])))/dframe['X'].shape[0]
eigvals, eigvecs = LA.eig(np.array([[matrixa, matrixab],
[matrixab, matrixb]]))
dom = np.argmax(np.abs(eigvals))
rec = np.argmin(np.abs(eigvals))
eig1 = eigvals[dom]
eig2 = eigvals[rec]
eigv1 = eigvecs[dom]
eigv2 = eigvecs[rec]
return eig1, eig2, eigv1, eigv2
def kurtosis(track):
"""Calculates the kurtosis of input track.
Parameters
----------
track : pandas.core.frame.DataFrame
At a minimum, must contain an X and Y column. The function
msd_calc can be used to generate the correctly formatted pd dataframe.
Returns
-------
kurt : numpy.float64
Kurtosis of the input track. Calculation based on projected 2D
positions on the dominant eigenvector of the radius of gyration tensor.
Examples
--------
>>> frames = 5
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.linspace(1, frames, frames)+5,
... 'Y': np.linspace(1, frames, frames)+3}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> kurtosis(dframe)
2.5147928994082829
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.sin(np.linspace(1, frames, frames)+3),
... 'Y': np.cos(np.linspace(1, frames, frames)+3)}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> kurtosis(dframe)
1.8515139698652476
"""
dframe = track
assert isinstance(dframe, pd.core.frame.DataFrame), "track must be a pandas\
dataframe."
assert isinstance(dframe['X'], pd.core.series.Series), "track must contain\
X column."
assert isinstance(dframe['Y'], pd.core.series.Series), "track must contain\
Y column."
assert dframe.shape[0] > 0, "track must not be empty."
eig1, eig2, eigv1, eigv2 = gyration_tensor(dframe)
projection = dframe['X']*eigv1[0] + dframe['Y']*eigv1[1]
kurt = np.mean((projection - np.mean(
projection))**4/(np.std(projection)**4))
return kurt
def asymmetry(track):
"""Calculates the asymmetry of the trajectory.
Parameters
----------
track : pandas DataFrame
At a minimum, must contain an X and Y column. The function
msd_calc can be used to generate the correctly formatted pd dataframe.
Returns
-------
eig1 : numpy.float64
Dominant eigenvalue of the gyration tensor.
eig2 : numpy.float64
Secondary eigenvalue of the gyration tensor.
asym1 : numpy.float64
asymmetry of the input track. Equal to 0 for circularly symmetric
tracks, and 1 for linear tracks.
asym2 : numpy.float64
alternate definition of asymmetry. Equal to 1 for circularly
symmetric tracks, and 0 for linear tracks.
asym3 : numpy.float64
alternate definition of asymmetry.
Examples
--------
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.linspace(1, frames, frames)+5,
... 'Y': np.linspace(1, frames, frames)+3}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> asymmetry(dframe)
(16.5, 0.0, 1.0, 0.0, 0.69314718055994529)
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.sin(np.linspace(1, frames, frames)+3),
... 'Y': np.cos(np.linspace(1, frames, frames)+3)}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> asymmetry(dframe)
(0.53232560128104522,
0.42766829138901619,
0.046430119259539708,
0.80339606128247354,
0.0059602683290953052)
"""
dframe = track
assert isinstance(dframe, pd.core.frame.DataFrame), "track must be a pandas\
dataframe."
assert isinstance(dframe['X'], pd.core.series.Series), "track must contain\
X column."
assert isinstance(dframe['Y'], pd.core.series.Series), "track must contain\
Y column."
assert dframe.shape[0] > 0, "track must not be empty."
eig1, eig2, eigv1, eigv2 = gyration_tensor(track)
asym1 = (eig1**2 - eig2**2)**2/(eig1**2 + eig2**2)**2
asym2 = eig2/eig1
asym3 = -np.log(1-((eig1-eig2)**2)/(2*(eig1+eig2)**2))
return eig1, eig2, asym1, asym2, asym3
def minboundrect(track):
"""Calculates the minimum bounding rectangle of an input trajectory.
Parameters
----------
dframe : pandas.core.frame.DataFrame
At a minimum, must contain an X and Y column. The function
msd_calc can be used to generate the correctly formatted pd dataframe.
Returns
-------
rot_angle : numpy.float64
Angle of rotation of the bounding box.
area : numpy.float64
Area of the bounding box.
width : numpy.float64
Width of the bounding box.
height : numpy.float64
Height of the bounding box.
center_point : numpy.ndarray
Center point of the bounding box.
corner_pts : numpy.ndarray
Corner points of the bounding box.
Examples
--------
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.linspace(1, frames, frames)+5,
... 'Y': np.linspace(1, frames, frames)+3}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> minboundrect(dframe)
(-2.3561944901923448,
2.8261664256307952e-14,
12.727922061357855,
2.2204460492503131e-15,
array([ 10.5, 8.5]),
array([[ 6., 4.],
[ 15., 13.],
[ 15., 13.],
[ 6., 4.]]))
>>> frames = 10
>>> data1 = {'Frame': np.linspace(1, frames, frames),
... 'X': np.sin(np.linspace(1, frames, frames))+3,
... 'Y': np.cos(np.linspace(1, frames, frames))+3}
>>> dframe = pd.DataFrame(data=data1)
>>> dframe['MSDs'], dframe['Gauss'] = msd_calc(dframe)
>>> minboundrect(dframe)
(0.78318530717958657,
3.6189901131223992,
1.9949899732081091,
1.8140392491811692,
array([ 3.02076903, 2.97913884]),
array([[ 4.3676025 , 3.04013439],
[ 2.95381341, 1.63258851],
[ 1.67393557, 2.9181433 ],
[ 3.08772466, 4.32568917]]))
Notes
-----
Based off of code from the following repo:
https://github.com/dbworth/minimum-area-bounding-rectangle/blob/master/
python/min_bounding_rect.py
"""
dframe = track
assert isinstance(dframe, pd.core.frame.DataFrame), "track must be a pandas\
dataframe."
assert isinstance(dframe['X'], pd.core.series.Series), "track must contain\
X column."
assert isinstance(dframe['Y'], pd.core.series.Series), "track must contain\
Y column."
assert dframe.shape[0] > 0, "track must not be empty."
df2 = np.zeros((dframe.shape[0]+1, 2))
df2[:-1, :] = dframe[['X', 'Y']].values
df2[-1, :] = dframe[['X', 'Y']].values[0, :]
hull_points_2d = df2
edges = np.zeros((len(hull_points_2d)-1, 2))
for i in range(len(edges)):
edge_x = hull_points_2d[i+1, 0] - hull_points_2d[i, 0]
edge_y = hull_points_2d[i+1, 1] - hull_points_2d[i, 1]
edges[i] = [edge_x, edge_y]
edge_angles = np.zeros((len(edges)))
for i in range(len(edge_angles)):
edge_angles[i] = math.atan2(edges[i, 1], edges[i, 0])
edge_angles = np.unique(edge_angles)
start_area = 2 ** (struct.Struct('i').size * 8 - 1) - 1
min_bbox = (0, start_area, 0, 0, 0, 0, 0, 0)
for i in range(len(edge_angles)):
rads = np.array([[math.cos(edge_angles[i]),
math.cos(edge_angles[i]-(math.pi/2))],
[math.cos(edge_angles[i]+(math.pi/2)),
math.cos(edge_angles[i])]])
rot_points = np.dot(rads, np.transpose(hull_points_2d))
min_x = np.nanmin(rot_points[0], axis=0)
max_x = np.nanmax(rot_points[0], axis=0)
min_y = np.nanmin(rot_points[1], axis=0)
max_y = np.nanmax(rot_points[1], axis=0)
width = max_x - min_x
height = max_y - min_y
area = width*height
if area < min_bbox[1]:
min_bbox = (edge_angles[i], area, width, height,
min_x, max_x, min_y, max_y)
angle = min_bbox[0]
rads = np.array([[math.cos(angle), math.cos(angle-(math.pi/2))],
[math.cos(angle+(math.pi/2)), math.cos(angle)]])
min_x = min_bbox[4]
max_x = min_bbox[5]
min_y = min_bbox[6]
max_y = min_bbox[7]
center_x = (min_x + max_x)/2
center_y = (min_y + max_y)/2
center_point = np.dot([center_x, center_y], rads)
corner_pts = | np.zeros((4, 2)) | numpy.zeros |
import configparser
import glob
import os
import subprocess
import sys
import netCDF4 as nc
import numpy as np
import matplotlib.path as mpath
from scipy.interpolate import griddata
from plotSurface import plot_surface
from readMRIData import read_intra_op_points
from readMRIData import read_tumor_point
from readMRIData import rotate_points
from readMRIData import move_points
from readMRIData import interpolation
from readMRIData import get_interpolated_path
from readMRIData import get_path
from readMRIVolume import switch_space
from postProcessing import open_surface_temperatures
from postProcessing import tumor_temperatures
from postProcessing import tumor_near_surface_temperatures
from postProcessing import brain_temperatures
from postProcessing import domain_temperatures
from postProcessing import csv_result_temperatures
from postProcessing import vessels_temperatures
from postProcessing import non_vessels_temperatures
from postProcessing import calc_l2_norm
def parse_config_file(params):
print('Parsing {0}.'.format(params['NAME_CONFIGFILE']))
# Create configparser and open file.
config = configparser.ConfigParser()
config.optionxform = str
config.read(params['NAME_CONFIGFILE'])
# Get values from section 'Dimension'.
try:
params['SPACE_DIM'] = config['Dimension'].getint('SPACE_DIM', fallback=3)
except KeyError:
print('* ERROR:', params['NAME_CONFIGFILE'], 'does not contain section \'Dimension\'.')
print(' ', params['NAME_CONFIGFILE'], 'may not be a config file.')
print('Aborting.')
exit()
# Get values from section 'Geometry'.
# Coordinates of first node.
COORD_NODE_FIRST = config['Geometry'].get('COORD_NODE_FIRST')
params['COORD_NODE_FIRST_ENV'] = COORD_NODE_FIRST
COORD_NODE_FIRST = list(map(float, COORD_NODE_FIRST.split('x')))
params['COORD_NODE_FIRST'] = COORD_NODE_FIRST
# Coordinates of last node.
COORD_NODE_LAST = config['Geometry'].get('COORD_NODE_LAST')
params['COORD_NODE_LAST_ENV'] = COORD_NODE_LAST
COORD_NODE_LAST = list(map(float, COORD_NODE_LAST.split('x')))
params['COORD_NODE_LAST'] = COORD_NODE_LAST
# Number of nodes.
N_NODES = config['Geometry'].get('N_NODES')
params['N_NODES_ENV'] = N_NODES
N_NODES = list(map(int, N_NODES.split('x')))
params['N_NODES'] = N_NODES
# Get values from section 'Time'.
params['START_TIME'] = config['Time'].getint('START_TIME', fallback=0)
params['END_TIME']= config['Time'].getint('END_TIME', fallback=1)
params['N_TIMESTEPS'] = config['Time'].getint('N_TIMESTEPS', fallback=0)
# Get values from section 'Output'.
params['N_SNAPSHOTS'] = config['Output'].getint('N_SNAPSHOTS')
# Get values from section 'Input'.
params['USE_MRI_FILE'] = config['Input'].getboolean('USE_MRI_FILE',
fallback=False)
params['NAME_REGION_FILE'] = config['Input'].get('NAME_REGION_FILE',
fallback='region')
params['NAME_INITFILE'] = config['Input'].get('NAME_INITFILE',
fallback='init')
params['USE_INITFILE'] = config['Input'].getboolean('USE_INITFILE',
fallback=False)
params['CREATE_INITFILE'] = config['Input'].getboolean('CREATE_INITFILE',
fallback=False)
params['NAME_VESSELS_FILE'] = config['Input'].get('NAME_VESSELS_FILE',
fallback='vessels')
params['CREATE_VESSELS_FILE'] = config['Input'].getboolean('CREATE_VESSELS_FILE',
fallback=True)
params['THRESHOLD'] = config['Input'].getfloat('THRESHOLD',
fallback=0.00001)
params['CHECK_CONV_FIRST_AT_ITER'] = config['Input'].getfloat('CHECK_CONV_FIRST_AT_ITER',
fallback=1)
params['CHECK_CONV_AT_EVERY_N_ITER'] = config['Input'].getfloat('CHECK_CONV_AT_EVERY_N_ITER',
fallback=1)
# Get values from section 'MRI'.
mri_case = config['MRI'].get('CASE', fallback='')
params['MRI_DATA_CASE'] = mri_case.split('_')[0]
if params['MRI_DATA_CASE'] != '':
mri_folder = glob.glob(params['MRI_DATA_CASE'] + '*/')
if len(mri_folder) == 0:
print('* ERROR: Folder for case', params['MRI_DATA_CASE'], 'does not exist.')
print('Aborting.')
exit()
params['MRI_DATA_FOLDER'] = mri_folder[0]
else:
params['MRI_DATA_FOLDER'] = ''
params['USE_VESSELS_SEGMENTATION'] = config['MRI'].getboolean('USE_VESSELS_SEGMENTATION',
fallback=False)
VARIABLES_VESSELS = config['MRI'].get('VARIABLES_VESSELS', fallback=list())
if len(VARIABLES_VESSELS) > 0:
params['VARIABLES_VESSELS'] = list(VARIABLES_VESSELS.split(' '))
else:
params['VARIABLES_VESSELS'] = VARIABLES_VESSELS
VALUES_VESSELS = config['MRI'].get('VALUES_VESSELS', fallback=list())
if len(VALUES_VESSELS) > 0:
params['VALUES_VESSELS'] = list(map(float, VALUES_VESSELS.split(' ')))
else:
params['VALUES_VESSELS'] = VALUES_VESSELS
VALUES_NON_VESSELS = config['MRI'].get('VALUES_NON_VESSELS', fallback=list())
if len(VALUES_VESSELS) > 0:
params['VALUES_NON_VESSELS'] = list(map(float, VALUES_NON_VESSELS.split(' ')))
else:
params['VALUES_NON_VESSELS'] = VALUES_NON_VESSELS
params['VESSELS_DEPTH'] = config['MRI'].getint('DEPTH', fallback=1)
# Get values from section 'Brain'.
brain = dict(config.items('Brain'))
for key in brain:
brain[key] = float(brain[key])
params['BRAIN'] = brain
if params['USE_VESSELS_SEGMENTATION'] == True:
vessels = dict(config.items('Brain'))
for key in vessels:
vessels[key] = float(vessels[key])
params['VESSELS'] = vessels
non_vessels = dict(config.items('Brain'))
for key in non_vessels:
non_vessels[key] = float(non_vessels[key])
params['NON_VESSELS'] = non_vessels
# Get values from section 'Tumor'.
tumor = dict(config.items('Tumor'))
for key in tumor:
tumor[key] = float(tumor[key])
params['TUMOR'] = tumor
# Get values from section 'Parameters'.
parameters = dict(config.items('Parameters'))
for key in parameters:
parameters[key] = float(parameters[key])
try:
parameters['DIAMETER'] = 2.0 * parameters['RADIUS']
except KeyError:
pass
params['PARAMETERS'] = parameters
# PyMC section.
try:
params['ITERATIONS'] = config['PyMC'].getint('ITERATIONS', fallback=5)
params['BURNS'] = config['PyMC'].getint('BURNS', fallback=1)
params['T_NORMAL'] = config['PyMC'].getfloat('T_NORMAL', fallback=-1.0)
params['T_TUMOR'] = config['PyMC'].getfloat('T_TUMOR', fallback=-1.0)
params['T_VESSEL'] = config['PyMC'].getfloat('T_VESSEL', fallback=-1.0)
except KeyError:
params['T_NORMAL'] = -1.0
params['T_TUMOR'] = -1.0
params['T_VESSEL'] = -1.0
print('Done.')
def check_variables(params):
print('Checking variables.')
# Check if dimension makes sense and
# some functions and variables only work for dimension 1, 2 or 3.
SPACE_DIM = params['SPACE_DIM']
if SPACE_DIM != 3:
print('* ERROR: SPACE_DIM is {0}.'.format(SPACE_DIM))
print(' SPACE_DIM must be 3.')
print('Aborting.')
exit()
# Check if there are enough coordinates for first node.
DIM_COORD_NODE_FIRST = len(params['COORD_NODE_FIRST'])
if DIM_COORD_NODE_FIRST != SPACE_DIM:
print('* ERROR: Dimension of COORD_NODE_FIRST has to be {0}.'.format(SPACE_DIM))
print(' Dimension of COORD_NODE_FIRST is {0}.'.format(DIM_COORD_NODE_FIRST))
print('Aborting.')
exit()
# Check if there are enough coordinates for last node.
DIM_COORD_NODE_LAST = len(params['COORD_NODE_LAST'])
if DIM_COORD_NODE_LAST != SPACE_DIM:
print('* ERROR: Dimension of COORD_NODE_LAST has to be {0}.'.format(SPACE_DIM))
print(' Dimension of COORD_NODE_LAST is {0}.'.format(DIM_COORD_NODE_LAST))
print('Aborting.')
exit()
# Check if there are enough number of nodes.
DIM_N_NODES = len(params['N_NODES'])
if DIM_N_NODES != SPACE_DIM:
print('* ERROR: Dimension of N_NODES has to be {0}.'.format(SPACE_DIM))
print(' Dimension of N_NODES is {0}.'.format(DIM_N_NODES))
print('Aborting.')
exit()
# Check if END_TIME is after START_TIME.
START_TIME = params['START_TIME']
END_TIME = params['END_TIME']
if END_TIME < START_TIME:
print('* ERROR: END_TIME is smaller than START_TIME.')
print(' END_TIME must be greater than START_TIME.')
print('Aborting.')
exit()
# Check if threshold is positive.
if params['THRESHOLD'] < 0.0:
print('* WARNING: THRESHOLD < 0.0.')
params['THRESHOLD'] = abs(params['THRESHOLD'])
print(' THRESHOLD was set to abs(THRESHOLD).')
# Check if combinations of USE_INITFILE and CREATE_INITFILE makes sense.
if params['USE_INITFILE'] == True and params['CREATE_INITFILE'] == False:
if os.path.isfile(params['NAME_INITFILE'] + '.nc') == False:
print('* ERROR: USE_INITFILE = True and CREATE_INITFILE = False,',
'but', params['NAME_INITFILE'] + '.nc', 'does not exist.')
print('Aborting.')
exit()
if params['USE_INITFILE'] == False and params['CREATE_INITFILE'] == True:
print('* WARNING: CREATE_INITFILE = True, but USE_INITFILE = False.')
# Check CHECK_CONV parameters.
if params['CHECK_CONV_FIRST_AT_ITER'] < 0:
print('* WARNING: CHECK_CONV_FIRST_AT_ITER < 0.')
params['CHECK_CONV_FIRST_AT_ITER'] = abs(params['CHECK_CONV_FIRST_AT_ITER'])
print(' CHECK_CONV_FIRST_AT_ITER set to',
'abs(CHECK_CONV_FIRST_AT_ITER).')
if params['CHECK_CONV_AT_EVERY_N_ITER'] < 0:
print('* WARNING: CHECK_CONV_AT_EVERY_N_ITER < 0.')
params['CHECK_CONV_AT_EVERY_N_ITER'] = abs(params['CHECK_CONV_AT_EVERY_N_ITER'])
print(' CHECK_CONV_AT_EVERY_N_ITER set to',
'abs(CHECK_CONV_AT_EVERY_N_ITER).')
if params['CHECK_CONV_FIRST_AT_ITER'] < 1:
print('* WARNING: CHECK_CONV_FIRST_AT_ITER < 1.')
print(' CHECK_CONV_FIRST_AT_ITER is assumend to be a ratio.')
if params['CHECK_CONV_AT_EVERY_N_ITER'] < 1:
print('* WARNING: CHECK_CONV_AT_EVERY_N_ITER < 1.')
print(' CHECK_CONV_AT_EVERY_N_ITER is assumend to be a ratio.')
# Check if executable exists.
NAME_EXECUTABLE = os.path.basename(os.getcwd()) \
+ str(params['SPACE_DIM']) + 'D'
if os.path.isfile(NAME_EXECUTABLE) == False:
print(NAME_EXECUTABLE, 'does not exist.')
print('Aborting.')
exit()
params['NAME_EXECUTABLE'] = NAME_EXECUTABLE
# Check if MRI data exist.
# Check if path to folder (i.e. results) is provided,
# and if folder does contain fiducials.csv.
folder = params['MRI_DATA_FOLDER']
if folder != '':
if os.path.isdir(folder) == True:
tmp1 = os.path.join(folder, 'fiducials.csv')
tmp2 = os.path.join(folder, 'OpenIGTLink.fcsv')
if os.path.isfile(tmp1) != True and os.path.isfile(tmp2) != True:
print('* ERROR:', folder, 'does not contain fiducials.csv',
'or OpenIGTLink.fcsv.')
print('Aborting.')
exit()
else:
print('* ERROR:', folder, 'does not exist.')
print('Aborting.')
exit()
if params['USE_VESSELS_SEGMENTATION'] == True:
vessels_seg_path = os.path.join(folder, 'vessels_segmentation.csv')
if os.path.isfile(vessels_seg_path) != True:
print('* ERROR:', vessels_seg_path, 'does not exist.')
print('Aborting.')
exit()
# Check if file for vessels exist if none shall be created.
if params['USE_VESSELS_SEGMENTATION'] == True and params['CREATE_VESSELS_FILE'] == False:
if os.path.isfile(params['NAME_VESSELS_FILE'] + '.nc') == False:
print('* ERROR: File for vessels does not exist.')
print('Aborting.')
exit()
# Check if names specified in VARIABLES for vessels are
# variables known in ScaFES.
names = ['rho', 'c', 'lambda', 'rho_blood', 'c_blood', 'omega', 'T_blood', \
'q', 'T']
for var in params['VARIABLES_VESSELS']:
if var not in names:
print('* ERROR:', var, 'in VARIABLES_VESSELS not known.')
print('Aborting.')
exit()
if params['VESSELS_DEPTH'] > params['N_NODES'][2]:
print('* WARNING: Depth for vessel segmentation is bigger than nNodes_2.')
print(' VESSELS_DEPTH was set to {0}.'.format(params['N_NODES'][2]))
params['VESSELS_DEPTH'] = params['N_NODES'][2]
if len(params['VARIABLES_VESSELS']) != len(params['VALUES_VESSELS']):
print('* ERROR: length of VARIABLES_VESSELS does not match length of',
'VALUES_VESSELS.')
print('Aborting.')
exit()
if len(params['VARIABLES_VESSELS']) != len(params['VALUES_NON_VESSELS']):
print('* ERROR: length of VARIABLES_VESSELS does not match length of',
'VALUES_NON_VESSELS.')
print('Aborting.')
exit()
print('Done.')
def calc_delta_time_helper(params, material, parameters):
RHO = material['RHO']
C = material['C']
LAMBDA = material['LAMBDA']
RHO_BLOOD = material['RHO_BLOOD']
C_BLOOD = material['C_BLOOD']
OMEGA = material['OMEGA']
T_I = material['T']
Q = material['Q']
H = parameters['H']
EPSILON = parameters['EPSILON']
GRIDSIZE = params['GRIDSIZE']
SPACE_DIM = params['SPACE_DIM']
SIGMA = 5.670367e-8
T_MAX = T_I + Q/(RHO_BLOOD*C_BLOOD*OMEGA)
# Pennes Bioheat Equation.
tmp = 0
for dim in range(0, SPACE_DIM):
tmp += (2.0/(GRIDSIZE[dim]*GRIDSIZE[dim])) * (LAMBDA/(RHO*C))
# Inner nodes.
tmp += ((RHO_BLOOD*C_BLOOD)/(RHO*C)) * OMEGA
if tmp != 0:
DELTA_TIME = 1.0/tmp
else:
# If time is infinity,
# it will later not be considered for min(delta_time).
DELTA_TIME = float('Inf')
# Border with convection and thermal radiation:
# Convection.
tmp += 2.0*(1.0/GRIDSIZE[SPACE_DIM-1]) * (H/(RHO*C))
# Thermal radiation.
tmp += 2.0 * (1.0/GRIDSIZE[SPACE_DIM-1]) \
* ((EPSILON*SIGMA)/(RHO*C)) \
* ((T_MAX + 273.15)**3)
if tmp != 0:
DELTA_TIME_BC = 1.0/tmp
else:
# If time is infinity,
# it will later not be considered for min(delta_time).
DELTA_TIME_BC = float('Inf')
return DELTA_TIME, DELTA_TIME_BC
def calc_delta_time_inner_nodes(params, material, parameters):
tmp,_ = calc_delta_time_helper(params, material, parameters)
return tmp
def calc_delta_time_boundary_condition(params, material, parameters):
_,tmp = calc_delta_time_helper(params, material, parameters)
return tmp
def calc_variables(params):
print('Calculating variables.')
# Calculate gridsize in each dimension.
GRIDSIZE = []
for dim in range(0, params['SPACE_DIM']):
GRIDSIZE.append((params['COORD_NODE_LAST'][dim] \
- params['COORD_NODE_FIRST'][dim])
/ (params['N_NODES'][dim]-1))
params['GRIDSIZE'] = GRIDSIZE
# Create parameter collection for vessels.
if params['USE_VESSELS_SEGMENTATION'] == True:
VARIABLES_VESSELS = params['VARIABLES_VESSELS']
for NAME_VARIABLE in params['VARIABLES_VESSELS']:
if NAME_VARIABLE.upper() in params['VESSELS'].keys():
params['VESSELS'][NAME_VARIABLE.upper()] = params['VALUES_VESSELS'][VARIABLES_VESSELS.index(NAME_VARIABLE)]
if NAME_VARIABLE.upper() in params['NON_VESSELS'].keys():
params['NON_VESSELS'][NAME_VARIABLE.upper()] = params['VALUES_NON_VESSELS'][VARIABLES_VESSELS.index(NAME_VARIABLE)]
# Calculate delta time.
if params['N_TIMESTEPS'] < 1:
print('* WARNING: N_TIMESTEPS not specified.')
print(' Calculate N_TIMESTEPS from stability criterion.')
BRAIN = calc_delta_time_inner_nodes(params, params['BRAIN'],
params['PARAMETERS'])
BRAIN_BC = calc_delta_time_boundary_condition(params, params['BRAIN'],
params['PARAMETERS'])
TUMOR = calc_delta_time_inner_nodes(params, params['TUMOR'],
params['PARAMETERS'])
TUMOR_BC = calc_delta_time_boundary_condition(params, params['TUMOR'],
params['PARAMETERS'])
if params['USE_VESSELS_SEGMENTATION'] == True:
VESSELS = calc_delta_time_inner_nodes(params, params['VESSELS'],
params['PARAMETERS'])
VESSELS_BC = calc_delta_time_boundary_condition(params,
params['VESSELS'],
params['PARAMETERS'])
NON_VESSELS = calc_delta_time_inner_nodes(params, params['NON_VESSELS'],
params['PARAMETERS'])
NON_VESSELS_BC = calc_delta_time_boundary_condition(params,
params['NON_VESSELS'],
params['PARAMETERS'])
else:
VESSELS = float('Inf')
VESSELS_BC = float('Inf')
NON_VESSELS = float('Inf')
NON_VESSELS_BC = float('Inf')
# Get minimum for calculation of timesteps.
DELTA_TIME_MIN = min((BRAIN, BRAIN_BC, TUMOR, TUMOR_BC,
VESSELS, VESSELS_BC, NON_VESSELS, NON_VESSELS_BC))
# Add five percent for safety reasons.
params['N_TIMESTEPS'] = int(((params['END_TIME'] \
- params['START_TIME']) \
/ DELTA_TIME_MIN) * 1.05) + 1
# Final calculation for delta time.
params['DELTA_TIME'] = (params['END_TIME'] - params['START_TIME']) \
/ params['N_TIMESTEPS']
# Calculate location of tumor center.
TUMOR_CENTER = []
TUMOR_CENTER.append((params['COORD_NODE_LAST'][0] \
+ params['COORD_NODE_FIRST'][0]) / 2.0)
TUMOR_CENTER.append((params['COORD_NODE_LAST'][1] \
+ params['COORD_NODE_FIRST'][1]) / 2.0)
TUMOR_CENTER.append(params['COORD_NODE_LAST'][2]
- params['PARAMETERS']['DEPTH'])
params['TUMOR_CENTER'] = TUMOR_CENTER
# Calc CHECK_CONV parameters if they are a ratio.
if params['CHECK_CONV_FIRST_AT_ITER'] < 1:
params['CHECK_CONV_FIRST_AT_ITER'] = params['CHECK_CONV_FIRST_AT_ITER'] \
* params['N_TIMESTEPS']
params['CHECK_CONV_FIRST_AT_ITER'] = int(params['CHECK_CONV_FIRST_AT_ITER'])
if params['CHECK_CONV_AT_EVERY_N_ITER'] < 1:
params['CHECK_CONV_AT_EVERY_N_ITER'] = params['CHECK_CONV_AT_EVERY_N_ITER'] \
* params['N_TIMESTEPS']
params['CHECK_CONV_AT_EVERY_N_ITER'] = int(params['CHECK_CONV_AT_EVERY_N_ITER'])
# Check if number of snapshots is possible.
if params['N_SNAPSHOTS'] > params['N_TIMESTEPS']:
print('* WARNING: N_SNAPSHOTS was bigger than N_TIMESTEPS.')
params['N_SNAPSHOTS'] = params['N_TIMESTEPS']
print(' N_SNAPSHOTS was set to N_TIMESTEPS.')
print('Done.')
def check_stability(params):
print('Checking stability.')
BRAIN = calc_delta_time_inner_nodes(params, params['BRAIN'],
params['PARAMETERS'])
BRAIN_BC = calc_delta_time_boundary_condition(params, params['BRAIN'],
params['PARAMETERS'])
TUMOR = calc_delta_time_inner_nodes(params, params['TUMOR'],
params['PARAMETERS'])
TUMOR_BC = calc_delta_time_boundary_condition(params, params['TUMOR'],
params['PARAMETERS'])
if params['USE_VESSELS_SEGMENTATION'] == True:
VESSELS = calc_delta_time_inner_nodes(params, params['VESSELS'],
params['PARAMETERS'])
VESSELS_BC = calc_delta_time_boundary_condition(params,
params['VESSELS'],
params['PARAMETERS'])
NON_VESSELS = calc_delta_time_inner_nodes(params, params['NON_VESSELS'],
params['PARAMETERS'])
NON_VESSELS_BC = calc_delta_time_boundary_condition(params,
params['NON_VESSELS'],
params['PARAMETERS'])
else:
VESSELS = float('Inf')
VESSELS_BC = float('Inf')
NON_VESSELS = float('Inf')
NON_VESSELS_BC = float('Inf')
# Get minimum for calculation of timesteps.
DELTA_TIME_MIN = min((BRAIN, BRAIN_BC, TUMOR, TUMOR_BC,
VESSELS, VESSELS_BC, NON_VESSELS, NON_VESSELS_BC))
DELTA_TIME = params['DELTA_TIME']
# Abort simulation if stability is not fulfilled.
if DELTA_TIME > BRAIN:
print('* ERROR: Stability not fulfilled in healthy brain region.')
print(' DELTA_TIME = {0}, but has to be DELTA_TIME < {1}.'.format(DELTA_TIME,
BRAIN))
print('Aborting.')
exit()
if DELTA_TIME > TUMOR:
print('* ERROR: Stability not fulfilled in tumor region.')
print(' DELTA_TIME = {0}, but has to be DELTA_TIME < {1}.'.format(DELTA_TIME,
TUMOR))
print('Aborting.')
exit()
if DELTA_TIME > BRAIN_BC:
print('* ERROR: Stability not fulfilled in healty brain region at \
border with convection and thermal radiation.')
print(' DELTA_TIME = {0}, but has to be DELTA_TIME < {1}.'.format(DELTA_TIME,
BRAIN_BC))
print('Aborting.')
exit()
if DELTA_TIME > TUMOR_BC:
print('* ERROR: Stability not fulfilled in tumor region at border \
with convection and thermal radiation.')
print(' DELTA_TIME = {0}, but has to be DELTA_TIME < {1}.'.format(DELTA_TIME,
TUMOR_BC))
print('Aborting.')
exit()
if params['USE_VESSELS_SEGMENTATION'] == True:
if DELTA_TIME > VESSELS:
print('* ERROR: Stability not fulfilled in vessels region.')
print(' DELTA_TIME = {0}, but has to be DELTA_TIME < {1}.'.format(DELTA_TIME,
VESSELS))
print('Aborting.')
exit()
if DELTA_TIME > NON_VESSELS:
print('* ERROR: Stability not fulfilled in non-vessels region.')
print(' DELTA_TIME = {0}, but has to be DELTA_TIME < {1}.'.format(DELTA_TIME,
NON_VESSELS))
print('Aborting.')
exit()
if DELTA_TIME > VESSELS_BC:
print('* ERROR: Stability not fulfilled in vessels region at \
border with convection and thermal radiation.')
print(' DELTA_TIME = {0}, but has to be DELTA_TIME < {1}.'.format(DELTA_TIME,
VESSELS_BC))
print('Aborting.')
exit()
if DELTA_TIME > NON_VESSELS_BC:
print('* ERROR: Stability not fulfilled in non-vessels region at \
border with convection and thermal radiation.')
print(' DELTA_TIME = {0}, but has to be DELTA_TIME < {1}.'.format(DELTA_TIME,
NON_VESSELS_BC))
print('Aborting.')
exit()
print('Done.')
def create_region_array(params, nc_file, BRAIN_VALUE, TUMOR_VALUE,
NAME_VARIABLE):
RADIUS = params['PARAMETERS']['DIAMETER']/2
# Get file/grid dimensions.
dim0, dim1, dim2 = params['N_NODES']
COORD_NODE_FIRST = params['COORD_NODE_FIRST']
# Get tumor center location.
TUMOR_CENTER = params['TUMOR_CENTER']
num_elem = dim0 * dim1 * dim2
values_array = BRAIN_VALUE \
* np.ones(num_elem, dtype=int).reshape(dim2, dim1, dim0)
# Iterate through array.
for elem_z in range(0, values_array.shape[0]):
for elem_y in range(0, values_array.shape[1]):
for elem_x in range(0, values_array.shape[2]):
# Calculate location of current node.
x = (elem_x * params['GRIDSIZE'][0]) + COORD_NODE_FIRST[0]
y = (elem_y * params['GRIDSIZE'][1]) + COORD_NODE_FIRST[1]
z = (elem_z * params['GRIDSIZE'][2]) + COORD_NODE_FIRST[2]
# Calculate distance (squared) to tumor center.
distance = (x - TUMOR_CENTER[0]) * (x - TUMOR_CENTER[0])
distance += (y - TUMOR_CENTER[1]) * (y - TUMOR_CENTER[1])
distance += (z - TUMOR_CENTER[2]) * (z - TUMOR_CENTER[2])
# Check if current point is inside tumor.
# If yes, set value to tumor specific value
if distance <= RADIUS*RADIUS:
values_array[elem_z, elem_y, elem_x] = TUMOR_VALUE
# Create netCDF variable.
nNodes = []
nNodes.append('time')
for dim in range(len(values_array.shape), 0, -1):
nNodes.append('nNodes_' + str(dim-1))
init_values = nc_file.createVariable(NAME_VARIABLE, 'i', nNodes)
# Write NumPy Array to file.
init_values[0,] = values_array
def create_region_file(params):
filepath = params['NAME_REGION_FILE'] + '.nc'
SPACE_DIM = params['SPACE_DIM']
print('Creating {0}.'.format(filepath))
# Delete old region file.
if os.path.isfile(filepath) == True:
os.remove(filepath)
nc_file = nc.Dataset(filepath, 'w', format='NETCDF3_CLASSIC')
time = nc_file.createDimension('time')
for dim in range(0, SPACE_DIM):
nNodes = nc_file.createDimension('nNodes_' + str(dim),
params['N_NODES'][dim])
# 0 means brain, 1 means tumor.
create_region_array(params, nc_file, 0, 1, 'region')
nc_file.close()
print('Done.')
def write_values_to_file(nc_file, values_array, NAME_VARIABLE):
# Create netCDF variable.
nNodes = []
nNodes.append('time')
for dim in range(len(values_array.shape), 0, -1):
nNodes.append('nNodes_' + str(dim-1))
init_values = nc_file.createVariable(NAME_VARIABLE, 'f8', nNodes)
# Write NumPy Array to file.
init_values[0,] = values_array
def create_vessels_array(params, surface):
print('Creating {0}.nc.'.format(params['NAME_VESSELS_FILE']))
vessels_small = read_vessels_segmentation(params)
dim0, dim1, dim2 = params['N_NODES']
num_elem = dim0 * dim1 * dim2
# Special Case: No trepanation domain is set,
# but vessel segmentation is read.
if np.count_nonzero(surface) == 0:
print('* WARNING: No trepanation area is set, but vessel segmentation is read.')
print(' Vessels can only be created in trepanation area.')
print(' File will contain no vessels.')
surface[-1,:,:] = 0
# Normal case: trepanation domain is set.
# - 1 = grid node outside of trepanation domain
# 0 = grid node inside trepanation domain, no vessel
# 1 = grid node is vessel inside trepanation domain
vessels_big = np.ones(dim1*dim0).reshape(dim1, dim0)
vessels_big *= -1.0
x_min = params['surface_cmin']
x_max = params['surface_cmax']
y_min = params['surface_rmin']
y_max = params['surface_rmax']
depth = params['VESSELS_DEPTH']
surface = surface[-1,:,:]
vessels_tmp = np.zeros(dim1*dim0).reshape(dim1, dim0)
vessels_tmp[y_min:y_max+1,x_min:x_max+1] = vessels_small[:,:]
vessels_big = np.where(surface == 1, vessels_tmp, vessels_big)
vessels_big = np.repeat(vessels_big[np.newaxis,:,:], depth, axis=0)
vessels = np.ones(dim2*dim1*dim0).reshape(dim2, dim1, dim0)
vessels *= -1.0
vessels[-depth:,:,:] = vessels_big
# Create vessels file.
filepath = params['NAME_VESSELS_FILE'] + '.nc'
nc_file = nc.Dataset(filepath, 'w', format='NETCDF3_CLASSIC')
time = nc_file.createDimension('time')
for dim in range(0, params['SPACE_DIM']):
nNodes = nc_file.createDimension('nNodes_' + str(dim),
params['N_NODES'][dim])
write_values_to_file(nc_file, vessels, 'vessels')
nc_file.close()
print('Done')
return vessels
def create_init_array(params, nc_file, region, BRAIN_VALUE, TUMOR_VALUE,
NAME_VARIABLE, vessels, surface):
dim0, dim1, dim2 = params['N_NODES']
num_elem = dim0 * dim1 * dim2
values_array = BRAIN_VALUE * np.ones(num_elem).reshape(dim2, dim1, dim0)
if params['USE_VESSELS_SEGMENTATION'] == True:
VARIABLES_VESSELS = params['VARIABLES_VESSELS']
if NAME_VARIABLE in VARIABLES_VESSELS:
VALUE_VESSEL = params['VALUES_VESSELS'][VARIABLES_VESSELS.index(NAME_VARIABLE)]
VALUE_NON_VESSEL = params['VALUES_NON_VESSELS'][VARIABLES_VESSELS.index(NAME_VARIABLE)]
values_array = np.where(vessels == 1, VALUE_VESSEL, values_array)
values_array = np.where(vessels == 0, VALUE_NON_VESSEL, values_array)
values_array = np.where(region == 1, TUMOR_VALUE, values_array)
write_values_to_file(nc_file, values_array, NAME_VARIABLE)
def create_surface_array(params, nc_file, BRAIN_VALUE, TUMOR_VALUE,
NAME_VARIABLE):
RADIUS = (params['PARAMETERS']['DIAMETER'] \
* params['PARAMETERS']['HOLE_FACTOR'])/2
# Get file/grid dimensions.
dim0, dim1, dim2 = params['N_NODES']
COORD_NODE_FIRST = params['COORD_NODE_FIRST']
# Get tumor center location.
TUMOR_CENTER = params['TUMOR_CENTER']
# Resize array.
num_elem = dim0 * dim1 * dim2
values_array = BRAIN_VALUE \
* np.ones(num_elem, dtype=int).reshape(dim2, dim1, dim0)
# Iterate through array.
for elem_y in range(0, values_array.shape[1]):
for elem_x in range(0, values_array.shape[2]):
# Calculate location of current node.
x = (elem_x * params['GRIDSIZE'][0]) + COORD_NODE_FIRST[0]
y = (elem_y * params['GRIDSIZE'][1]) + COORD_NODE_FIRST[1]
# Calculate distance (squared) to tumor center.
distance = (x - TUMOR_CENTER[0]) * (x - TUMOR_CENTER[0])
distance += (y - TUMOR_CENTER[1]) * (y - TUMOR_CENTER[1])
# Check if current point is inside tumor.
# If yes, set value to tumor specific value
if distance <= RADIUS*RADIUS:
values_array[-1, elem_y, elem_x] = TUMOR_VALUE
# Create netCDF variable.
nNodes = []
nNodes.append('time')
for dim in range(len(values_array.shape), 0, -1):
nNodes.append('nNodes_' + str(dim-1))
init_values = nc_file.createVariable(NAME_VARIABLE, 'i', nNodes)
# Write NumPy Array to file.
init_values[0,] = values_array
# Bounding box for trepanation domain.
rows = np.any(values_array[-1,:,:], axis=1)
cols = | np.any(values_array[-1,:,:], axis=0) | numpy.any |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import proxmin
from proxmin.utils import Traceback
from functools import partial
import logging
logging.basicConfig()
logger = logging.getLogger("proxmin")
logger.setLevel(logging.INFO)
def generateComponent(size, pos, dim):
"""Creates 2D Gaussian component"""
x = np.arange(dim)
c = np.exp(
-((x - pos[0])[:, None] ** 2 + (x - pos[1])[None, :] ** 2) / (2 * size ** 2)
)
return c.flatten() / c.sum()
def generateAmplitude(flux, dim):
"""Creates normalized SED"""
return flux * np.random.dirichlet(np.ones(dim))
def add_noise(Y, sky):
"""Adds Poisson noise to Y"""
Y += sky[:, None]
Y = np.random.poisson(Y).astype("float64")
Y -= sky[:, None]
return Y
def plotLoss(trace, Y, W, ax=None, label=None, plot_max=None):
# convergence plot from traceback
loss = []
for At, St in traceback.trace:
loss.append(proxmin.nmf.log_likelihood(At, St, Y=Y, W=W))
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogy(loss, label=label)
def plotData(Y, A, S):
c, n = Y.shape
nx = ny = np.int(np.sqrt(n))
# reasonable mapping from 5 observed bands to rgb channels
filter_weights = np.zeros((3, c))
filter_weights[0, 4] = 1
filter_weights[0, 3] = 0.667
filter_weights[1, 3] = 0.333
filter_weights[1, 2] = 1
filter_weights[1, 1] = 0.333
filter_weights[2, 1] = 0.667
filter_weights[2, 0] = 1
filter_weights /= 1.667
rgb = np.dot(filter_weights, Y)
try:
from astropy.visualization import make_lupton_rgb
Q = 1
stretch = Y.max() / 2
fig = plt.figure(figsize=(9, 3))
ax0 = fig.add_axes([0, 0, 0.33, 1], frameon=False)
ax1 = fig.add_axes([0.333, 0, 0.33, 1], frameon=False)
ax2 = fig.add_axes([0.666, 0, 0.33, 1], frameon=False)
ax0.imshow(
make_lupton_rgb(
*np.split(rgb, 3, axis=0)[::-1], Q=Q, stretch=stretch
).reshape(ny, nx, 3)
)
best_Y = | np.dot(A, S) | numpy.dot |
##### For testing the original keras model, which is saved as .hdf5 format.
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import numpy as np
import h5py
import scipy.io
import pandas as pd
import librosa
import soundfile as sound
import keras
import tensorflow
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
import sys
sys.path.append("..")
from utils import *
from funcs import *
import tensorflow as tf
# from tensorflow import ConfigProto
# from tensorflow import InteractiveSession
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.InteractiveSession(config=config)
val_csv = 'data_2020/evaluation_setup/fold1_evaluate.csv'
feat_path = 'features/logmel128_scaled_d_dd/'
model_path = '../pretrained_models/smallfcnn-model-0.9618.hdf5'
num_freq_bin = 128
num_classes = 3
data_val, y_val = load_data_2020(feat_path, val_csv, num_freq_bin, 'logmel')
y_val_onehot = keras.utils.to_categorical(y_val, num_classes)
print(data_val.shape)
print(y_val.shape)
best_model = keras.models.load_model(model_path)
preds = best_model.predict(data_val)
y_pred_val = np.argmax(preds,axis=1)
over_loss = log_loss(y_val_onehot, preds)
overall_acc = np.sum(y_pred_val==y_val) / data_val.shape[0]
print(y_val_onehot.shape, preds.shape)
| np.set_printoptions(precision=3) | numpy.set_printoptions |
import numpy as np
import cvxpy as cp
import time
def rho(x):
return 1/(2*x) * (np.sqrt((x+1)**2 - 4 * np.exp(1/(2*x)-1/2)*x**(3/2)) + x - 1)
def bisection_algorithm(f, a, b, y, margin=.00001,direction="right"):
count = 0
while count <= 15:
c = (a + b) / 2
y_c = f(c)
if abs(y_c - y) < margin:
return c
if direction=="right":
if y < y_c:
b = c
else:
a = c
else:
if y < y_c:
a = c
else:
b = c
count+=1
p = float(direction=="right")
return p * b + (1 - p) * a
def linear_search_leftright(f, a, b, y, step=.01):
s=a
found=False
while s<= b and f(s) > y:
s+=step
if f(min(s,b))<=y:
found=True
return min(s,b), found
def linear_search_rightleft(f, a, b, y, step=.01):
s=b
found=False
while s>= a and f(s) > y:
s-=step
if f(max(s,a))<=y:
found=True
return max(s,a), found
def log_supermartingale_value_slow(v,wmax,b0,b1,A0,A1,A2):
# the argument of the exponential
mu = 1
alpha = 2 # also a free parameter that need be no less than 1
# Build the relevant matrices
I = np.identity(2)
J = np.array([1,1,1])
# TODO check if we need to use wmin below
Cv = np.array([[-1,wmax-1,wmax-1],[-v,-v,wmax-v]])
S = b0 + b1 * v
Q = A0 + A1 * v + A2 * v**2
gamma = cp.Variable(3)
Stilde = S + Cv @ gamma
Sigma = np.linalg.inv(pow(mu,-2) * alpha * I + alpha*Q) # TODO Make this more efficient via Sherman Morison
cost = cp.quad_form(Stilde, Sigma)/2 + rho(alpha)*gamma.T @ J
prob = cp.Problem(cp.Minimize(cost),[0<=gamma])
prob.solve()
#print(f'gamma={gamma.value}')
return prob.value - np.log(np.sqrt(np.linalg.det(I + pow(mu,2)*Q))) #TODO use the matrix determinant lemma for efficiency
def log_supermartingale_value(v,wmax,b0,b1,A0,A1,A2):
# This function is an efficient version of martingal_value. We avoid calling a solver, and consider cases instead.
mu = 1
alpha = 2 # also a free parameter that need be no less than 1. TODO change the name
# Build the relevant matrices
I = np.identity(2)
J = np.array([1,1])
gammasols = [np.zeros(3)] # Adding the all zeros feasable solution
# Building S and Q matrices from the sufficient statistics
S = b0 + b1 * v
Q = A0 + A1 * v + A2*v**2
Sigmainv = pow(mu,-2) * alpha * I + alpha*Q
Sigma = np.linalg.inv(Sigmainv) # TODO consider Sherman Morison
Den = np.sqrt(np.linalg.det(I + pow(mu,2)*Q))
## Considering cases
# Case 1: gamma_1 = 0
gamma = np.zeros(3)
Cv = np.array([[wmax-1,wmax-1],
[-v,wmax-v]])
if np.linalg.det(Cv)!=0:
tmp=np.linalg.inv(Cv) @ (-rho(alpha) * Sigmainv @ np.linalg.inv(Cv.T) @ J - S)
gamma[1]=tmp[0]
gamma[2]=tmp[1]
# Check feasability
if gamma[0]>=0 and gamma[1]>=0 and gamma[2]>=0:
gammasols.append(gamma)
# Case 2: gamma_2 = 0
gamma = np.zeros(3)
Cv = np.array([[-1,wmax-1],
[-v,wmax-v]])
if np.linalg.det(Cv)!=0:
tmp=np.linalg.inv(Cv) @ (-rho(alpha) * Sigmainv @ np.linalg.inv(Cv.T) @ J - S)
gamma[0]=tmp[0]
gamma[2]=tmp[1]
if gamma[0]>=0 and gamma[1]>=0 and gamma[2]>=0:
gammasols.append(gamma)
# Case 3: gamma_3 = 0
gamma = np.zeros(3)
Cv = np.array([[-1,wmax-1],
[-v,-v]])
if np.linalg.det(Cv)!=0:
tmp=np.linalg.inv(Cv) @ (-rho(alpha) * Sigmainv @ np.linalg.inv(Cv.T) @ J - S)
gamma[0]=tmp[0]
gamma[1]=tmp[1]
if gamma[0]>=0 and gamma[1]>=0 and gamma[2]>=0:
gammasols.append(gamma)
# Case 4: (gamma_1,gamma_2) = (0,0)
gamma = np.zeros(3)
Cv = np.array([[wmax-1],
[wmax-v]])
adding=False
den = Cv.T @ (Sigma @ Cv)
if den != 0:
adding=True
gamma[2]=(-rho(alpha) - Cv.T @ (Sigma @ S))/den
if adding and gamma[2]>=0:
gammasols.append(gamma)
# Case 5: (gamma_1,gamma_3) = (0,0)
gamma = np.zeros(3)
Cv = np.array([[wmax-1],
[-v]])
adding=False
den = Cv.T @ (Sigma @ Cv)
if den != 0:
adding=True
gamma[1]=(-rho(alpha) - Cv.T @ (Sigma @ S))/den
if adding and gamma[1]>=0:
gammasols.append(gamma)
# Case 6: (gamma_2,gamma_3) = (0,0)
gamma = np.zeros(3)
Cv = np.array([[-1],
[-v]])
adding=False
den = Cv.T @ (Sigma @ Cv)
if den != 0:
adding=True
gamma[0]=(-rho(alpha) - Cv.T @ (Sigma @ S))/den
if adding and gamma[0]>=0:
gammasols.append(gamma)
## Checking the best solution
J = np.array([1,1,1])
Cv = np.array([[-1,wmax-1,wmax-1],
[-v,-v,wmax-v]])
optval = -1
#optgamma = [-1,-1,-1]
for gamma in gammasols:
Stilde = S + Cv @ gamma
arg = Stilde.T @ (Sigma @ Stilde)/2 + rho(alpha)*gamma.T @ J
val = arg - np.log(Den)
if optval<0 or optval>val:
optval=val
#optgamma = gamma
#print(f'gamma={optgamma}')
return optval
def log_supermartingale_value_1d(v,wmax,b0,b1,A0,A1,A2):
# This function is an efficient version of martingal_value. We avoid calling a solver, and consider cases instead.
mu = 1
alpha = 2 # also a free parameter that need be no less than 1. TODO change the name
gammasols = [np.zeros(3)] # Adding the all zeros feasable solution
# Building S and Q matrices from the sufficient statistics
S = b0 + b1 * v
Q = A0 + A1 * v + A2*v**2
Sigmainv = pow(mu,-2) * alpha + alpha*Q
Sigma =1/Sigmainv
Den = np.sqrt(1 + pow(mu,2)*Q)
## Considering cases
# Case 1: (gamma_1,gamma_2) = (0,0)
gamma = np.zeros(3)
Cv = wmax-v
adding=False
den = Cv * Sigma * Cv
if den != 0:
adding=True
gamma[2]=(-rho(alpha) - Cv * Sigma * S)/den
if adding and gamma[2]>=0:
gammasols.append(gamma)
# Case 2: (gamma_1,gamma_3) = (0,0)
gamma = np.zeros(3)
Cv = -v
adding=False
den = Cv * Sigma * Cv
if den != 0:
adding=True
gamma[1]=(-rho(alpha) - Cv * Sigma * S)/den
if adding and gamma[1]>=0:
gammasols.append(gamma)
# Case 3: (gamma_2,gamma_3) = (0,0)
gamma = np.zeros(3)
Cv = -v
adding=False
den = Cv * Sigma * Cv
if den != 0:
adding=True
gamma[0]=(-rho(alpha) - Cv * Sigma * S)/den
if adding and gamma[0]>=0:
gammasols.append(gamma)
## Checking the best solution
J = np.array([1,1,1])
Cv = np.array([-v,-v,wmax-v])
optval = -1
for gamma in gammasols:
Stilde = S + Cv @ gamma
arg = Stilde * Sigma * Stilde/2 + rho(alpha)*gamma.T @ J
val = arg - np.log(Den)
if optval<0 or optval>val:
optval=val
return optval
def martingale_value_lowerbound(v,wmax,b0,b1,A0,A1,A2,t):
eps = 0.001
# Build the relevant matrices
I = np.identity(2)
J = np.array([1,1])
gammasols = [np.zeros(3)] # Adding the all zeros feasable solution
# Building S and Q matrices from the sufficient statistics
S = b0 + b1 * v
Q = A0 + A1 * v + A2*v**2
Sigmainv = eps * I + Q
Sigma = np.linalg.inv(Sigmainv) # TODO consider <NAME>
Den = 1 #(np.exp(1)*t+np.exp(1))
## Considering cases
# Case 1: gamma_1 = 0
gamma = np.zeros(3)
Cv = np.array([[wmax-1,wmax-1],
[-v,wmax-v]])
if np.linalg.det(Cv)!=0:
tmp=np.linalg.inv(Cv) @ (-Sigmainv @ np.linalg.inv(Cv.T) @ J - S)
gamma[1]=tmp[0]
gamma[2]=tmp[1]
# Check feasability
if gamma[0]>=0 and gamma[1]>=0 and gamma[2]>=0:
gammasols.append(gamma)
# Case 2: gamma_2 = 0
gamma = np.zeros(3)
Cv = np.array([[-1,wmax-1],
[-v,wmax-v]])
if np.linalg.det(Cv)!=0:
tmp=np.linalg.inv(Cv) @ (- Sigmainv @ np.linalg.inv(Cv.T) @ J - S)
gamma[0]=tmp[0]
gamma[2]=tmp[1]
if gamma[0]>=0 and gamma[1]>=0 and gamma[2]>=0:
gammasols.append(gamma)
# Case 3: gamma_3 = 0
gamma = np.zeros(3)
Cv = np.array([[-1,wmax-1],
[-v,-v]])
if np.linalg.det(Cv)!=0:
tmp=np.linalg.inv(Cv) @ (-Sigmainv @ np.linalg.inv(Cv.T) @ J - S)
gamma[0]=tmp[0]
gamma[1]=tmp[1]
if gamma[0]>=0 and gamma[1]>=0 and gamma[2]>=0:
gammasols.append(gamma)
# Case 4: (gamma_1,gamma_2) = (0,0)
gamma = np.zeros(3)
Cv = np.array([[wmax-1],
[wmax-v]])
adding=False
den = Cv.T @ (Sigma @ Cv)
if den != 0:
adding=True
gamma[2]=(-1 - Cv.T @ (Sigma @ S))/den
if adding and gamma[2]>=0:
gammasols.append(gamma)
# Case 5: (gamma_1,gamma_3) = (0,0)
gamma = np.zeros(3)
Cv = np.array([[wmax-1],
[-v]])
den = Cv.T @ (Sigma @ Cv)
if den != 0:
adding=True
gamma[1]=(-1 - Cv.T @ (Sigma @ S))/den
if adding and gamma[1]>=0:
gammasols.append(gamma)
# Case 6: (gamma_2,gamma_3) = (0,0)
gamma = np.zeros(3)
Cv = np.array([[-1],
[-v]])
adding=False
den = Cv.T @ (Sigma @ Cv)
if den != 0:
adding=True
gamma[0]=(-1 - Cv.T @ (Sigma @ S))/den
if adding and gamma[0]>=0:
gammasols.append(gamma)
## Checking the best solution
J = np.array([1,1,1])
Cv = np.array([[-1,wmax-1,wmax-1],
[-v,-v,wmax-v]])
optval = -1
for gamma in gammasols:
Stilde = S + Cv @ gamma
arg = min(20, Stilde.T @ (Sigma @ Stilde)/(4 * (4*np.log(2)-2)) + (1/2) * gamma.T @ J)
val = np.exp(arg)/Den
if optval<0 or optval>val:
optval=val
return optval
# def martingale_value_lowerbound_slow(v,wmax,b0,b1,A0,A1,A2,t):
# # the argument of the exponential
# eps = 0.001
# #Build the relevant matrices
# I = np.identity(2)
# J = np.array([1,1,1])
# #TODO check if we need to use wmin below
# Cv = np.array([[-1,wmax-1,wmax-1],[-v,-v,wmax-v]])
# S = b0 + b1 * v
# Q = A0 + A1 * v + A2 * v**2
# gamma = cp.Variable(3)
# Stilde = S + Cv @ gamma
# Sigma = np.linalg.inv(eps * I + Q) #TODO Make this more efficient via <NAME>
# cost = cp.quad_form(Stilde, Sigma)/(4*(4*np.log(2)-2)) + gamma.T @ J/2
# prob = cp.Problem(cp.Minimize(cost),[0<=gamma])
# prob.solve()
# return np.exp(prob.value) #Denominator equal to 1
def cs_via_supermartingale(data, wmin, wmax, alpha):
# Assume data is of type np.array((t,2)), where (w,r)=(wr[:,0],wr[:,1]).
# TODO we want to allow for different policies eventually
T = len(data)
# Initialize
b0 = np.zeros(2)
b1 = np.zeros(2)
A0 = np.zeros((2,2))
A1 = np.zeros((2,2))
A2 = np.zeros((2,2))
lb = np.zeros(T)
ub = np.zeros(T)
prev_lb = 0.
prev_ub = 1.
for t in range(T):
wt = data[t,0]
rt = data[t,1]
b0 += [wt-1,wt*rt]
b1 += [0,-1]
A0 += [[(wt-1)**2, (wt-1)*wt*rt],
[(wt-1) * wt * rt,(wt * rt)**2]]
A1 += [[0, -(wt-1)],
[-(wt-1),-2 * wt * rt]]
A2 += [[0,0],[0,1]]
logmartval = lambda v: log_supermartingale_value(v,wmax,b0,b1,A0,A1,A2)
# Root finding
stepsize = (prev_ub-prev_lb) * 0.01
tmpv_lb, found_lb = linear_search_leftright(logmartval,prev_lb,prev_ub,-np.log(alpha),step=stepsize)
tmpv_ub, found_ub = linear_search_rightleft(logmartval,prev_lb,prev_ub,-np.log(alpha),step=stepsize)
if not found_lb:
lb[t]=prev_lb
else:
margin_lb = (tmpv_lb - prev_lb)*.01
lb[t] = bisection_algorithm(logmartval,prev_lb,tmpv_lb,-np.log(alpha),margin=margin_lb,direction="left")
prev_lb = lb[t]
if not found_ub:
lb[t]=prev_ub
else:
margin_ub = (prev_ub - tmpv_ub)*.01
ub[t] = bisection_algorithm(logmartval,tmpv_ub,prev_ub,-np.log(alpha),margin=margin_ub)
prev_ub = ub[t]
return lb, ub
def cs_via_supermartingale_1d(data, wmin, wmax, alpha):
# Assume data is of type np.array((t,2)), where (w,r)=(wr[:,0],wr[:,1]).
# TODO we want to allow for different policies eventually
T = len(data)
# Initialize
b0 = 0
b1 = 0
A0 = 0
A1 = 0
A2 = 0
lb = np.zeros(T)
ub = np.zeros(T)
prev_lb = 0.
prev_ub = 1.
for t in range(T):
wt = data[t,0]
rt = data[t,1]
b0 += wt*rt
b1 += -1
A0 += (wt * rt)**2
A1 += -2 * wt * rt
A2 += 1
logmartval = lambda v: log_supermartingale_value_1d(v,wmax,b0,b1,A0,A1,A2)
# Root finding
stepsize = (prev_ub-prev_lb) * 0.01
tmpv_lb, found_lb = linear_search_leftright(logmartval,prev_lb,prev_ub,-np.log(alpha),step=stepsize)
tmpv_ub, found_ub = linear_search_rightleft(logmartval,prev_lb,prev_ub,-np.log(alpha),step=stepsize)
if not found_lb:
lb[t]=prev_lb
else:
margin_lb = (tmpv_lb - prev_lb)*.01
lb[t] = bisection_algorithm(logmartval,prev_lb,tmpv_lb,-np.log(alpha),margin=margin_lb,direction="left")
prev_lb = lb[t]
if not found_ub:
lb[t]=prev_ub
else:
margin_ub = (prev_ub - tmpv_ub)*.01
ub[t] = bisection_algorithm(logmartval,tmpv_ub,prev_ub,-np.log(alpha),margin=margin_ub)
prev_ub = ub[t]
return lb, ub
def cs_via_supermartingale_debug(data, wmin, wmax, alpha):
# Assume data is of type np.array((t,2)), where (w,r)=(wr[:,0],wr[:,1]).
# TODO we want to allow for different policies eventually
T = len(data)
# Initialize
b0 = np.zeros(2)
b1 = np.zeros(2)
A0 = np.zeros((2,2))
A1 = np.zeros((2,2))
A2 = np.zeros((2,2))
w = data[:T,0]
r = data[:T,1]
b0 = np.array([np.sum(w)-T,np.dot(w,r)])
b1 = np.array([0,-T])
A0 = np.array([[np.dot(w-1, w-1), np.dot(w-1, w*r)],
[np.dot(w-1, w*r), np.dot(w*r, w*r)]])
A1 = np.array([[0, -np.sum(w)+T],
[-np.sum(w)+T,-2 * np.dot(w,r)]])
A2 = np.array([[0,0],[0,T]])
#print(f'b0={b0},\nb1={b1},\nA0={A0},\nA1={A1},\nA2={A2}\n\n')
logmartval = lambda v: log_supermartingale_value(v,wmax,b0,b1,A0,A1,A2)
# Root finding
tmpv, found = linear_search_leftright(logmartval,0,1,-np.log(2 * alpha),step=0.001)
#print(tmpv)
#tmpv = bisection_algorithm(martval,0,1,1/(2 * alpha),margin=0.000001)
if not found:
#print(f'b0={b0},\nb1={b1},\nA0={A0},\nA1={A1},\nA2={A2}\n\n')
#print('tmpv=0')
return np.array([0.] * T), np.array([1.] * T)
lb = bisection_algorithm(logmartval,0,tmpv,-np.log(alpha),margin=0.000001,direction="left")
ub = bisection_algorithm(logmartval,tmpv,1,-np.log(alpha),margin=0.000001)
#print(f'tmpv not eq 1: lb={lb}, up={ub}\n')
return np.array([float(lb)] * T), np.array([float(ub)] * T)
def cs_via_supermartingale_1d_debug(data, wmin, wmax, alpha):
# Assume data is of type np.array((t,2)), where (w,r)=(wr[:,0],wr[:,1]).
# TODO we want to allow for different policies eventually
T = len(data)
# Initialize
b0 = np.zeros(2)
b1 = np.zeros(2)
A0 = np.zeros((2,2))
A1 = np.zeros((2,2))
A2 = np.zeros((2,2))
w = data[:,0]
r = data[:,1]
b0 = np.dot(w,r)
b1 = -T
A0 = np.dot(w*r, w*r)
A1 = -2 * np.dot(w,r)
A2 = T
#print(f'b0={b0},\nb1={b1},\nA0={A0},\nA1={A1},\nA2={A2}\n\n')
martval = lambda v: log_supermartingale_value_1d(v,wmax,b0,b1,A0,A1,A2)
# Root finding
tmpv, found = linear_search_leftright(martval,0,1,1/(2 * alpha),step=0.001)
#print(tmpv)
#tmpv = bisection_algorithm(martval,0,1,1/(2 * alpha),margin=0.000001)
if not found:
#print(f'b0={b0},\nb1={b1},\nA0={A0},\nA1={A1},\nA2={A2}\n\n')
#print('tmpv=0')
return np.array([0.] * T), np.array([1.] * T)
lb = bisection_algorithm(martval,0,tmpv,1/alpha,margin=0.000001,direction="left")
ub = bisection_algorithm(martval,tmpv,1,1/alpha,margin=0.000001)
#print(f'tmpv not eq 1: lb={lb}, up={ub}\n')
return np.array([float(lb)] * T), np.array([float(ub)] * T)
def cs_via_EWA(data, wmin, wmax, alpha):
# Assume data is of type np.array((t,2)), where (w,r)=(wr[:,0],wr[:,1]).
# TODO we want to allow for different policies eventually
T = len(data)
# Initialize
b0 = np.zeros(2)
b1 = np.zeros(2)
A0 = np.zeros((2,2))
A1 = np.zeros((2,2))
A2 = np.zeros((2,2))
lb = np.zeros(T)
ub = np.zeros(T)
for t in range(T):
wt = data[t,0]
rt = data[t,1]
b0 += [wt-1,wt*rt]
b1 += [0,-1]
A0 += [[(wt-1)**2, (wt-1)*wt*rt],
[(wt-1) * wt * rt,(wt * rt)**2]]
A1 += [[0, -(wt-1)],
[-(wt-1),-2 * wt * rt]]
A2 += [[0,0],[0,1]]
martval = lambda v: martingale_value_lowerbound(v,wmax,b0,b1,A0,A1,A2,t)
# Root finding
tmpv, found = linear_search_leftright(martval,0,1,1/(2 * alpha),step=0.001)
#tmpv = bisection_algorithm(martval,0,1,1/(2 * alpha),margin=0.000001)
if not found:
#print(f'b0={b0},\nb1={b1},\nA0={A0},\nA1={A1},\nA2={A2}\n\n')
#print(f'tmpv={tmpv}')
lb[t]=0.
ub[t]=1.
continue
lb[t] = bisection_algorithm(martval,0,tmpv,1/alpha,margin=0.000001,direction="left")
ub[t] = bisection_algorithm(martval,tmpv,1,1/alpha,margin=0.000001)
return lb, ub
def cs_via_EWA_debug(data, wmin, wmax, alpha):
# Assume data is of type np.array((t,2)), where (w,r)=(wr[:,0],wr[:,1]).
# TODO we want to allow for different policies eventually
T = len(data)
# Initialize
b0 = np.zeros(2)
b1 = np.zeros(2)
A0 = np.zeros((2,2))
A1 = np.zeros((2,2))
A2 = np.zeros((2,2))
#t0 = time.time()
w = data[:,0]
r = data[:,1]
b0 = np.array([np.sum(w)-T,np.dot(w,r)])
b1 = np.array([0,-T])
A0 = np.array([[np.dot(w-1, w-1), np.dot(w-1, w*r)],
[np.dot(w-1, w*r), np.dot(w*r, w*r)]])
A1 = np.array([[0, -np.sum(w)+T],
[-np.sum(w)+T,-2 * np.dot(w,r)]])
A2 = np.array([[0,0],[0,T]])
#print(f'Time to construct matrices is {time.time()-t0} seconds')
#print(f'b0={b0},\nb1={b1},\nA0={A0},\nA1={A1},\nA2={A2}\n\n')
martval = lambda v: martingale_value_lowerbound(v,wmax,b0,b1,A0,A1,A2,T)
# Root finding
#t0 = time.time()
tmpv, found = linear_search_leftright(martval,0,1,1/(2 * alpha),step=0.001)
#tmpv = bisection_algorithm(martval,0,1,1/(2 * alpha),margin=0.000001)
if not found:
#print(f'b0={b0},\nb1={b1},\nA0={A0},\nA1={A1},\nA2={A2}\n\n')
#print(f'tmpv={tmpv}')
return | np.array([0.] * T) | numpy.array |
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm, metrics
from BELM.belm import BELM
def plm_train(data, target, label, n, s1, s2, c, acc=None):
""" Progressive learning implementation"""
gamma = 0.01 + 1 * 0.005
nnet4 = []
var = s2
train_data = []
train_target = []
train_label = []
real_train_label = []
for n_c in range(0, c):
# yxf
num_node = []
error = []
nn_optimal = []
p_max = -1
s2 = var
for nn in range(0, n):
# wsn
for n_s1 in range(0, s1):
if nn == 0:
index = np.random.permutation(data.shape[0])
X_test = data[index]
Y_test = target[index]
L_test = label[index]
X_train = X_test[:5, :]
Y_train = Y_test[:5, :]
for n_s2 in range(0, s2):
belm = BELM(X_train.shape[1], Y_train.shape[1], precision="single")
belm.add_neurons(5, 'sigm')
belm.train(X_train[:5, :], Y_train[:5, :])
yhat = belm.predict(X_test)
v = np.abs(Y_test - yhat)
v = np.where(v > gamma, 0, v)
v = np.where(v > 0, 1, v)
num_node.append(np.sum(v))
error.append(belm.error(Y_test, yhat))
# print(num_node)
if max(num_node) > p_max:
p_max = max(num_node)
e1 = error[num_node.index(max(num_node))]
nnet1 = belm
v1 = v
# yhat1 = yhat
index1 = index
# data1=[y phi]
# data = []
nn_optimal.append((max(num_node), error[num_node.index(max(num_node))]))
Y_test = target[index1]
X_test = data[index1]
L_test = label[index1]
new_ind = np.where(v1 == 1)[0]
Y_train = Y_test[new_ind]
X_train = X_test[new_ind]
L_train = L_test[new_ind]
s2 = 1
nnet4.append(nnet1)
if len(train_data) == 0:
train_data = X_train
train_target = Y_train
real_train_label = L_train
train_label = np.full_like(L_train, n_c + 1)
else:
train_data = np.vstack((train_data, X_train))
train_target = np.vstack((train_target, Y_train))
real_train_label = np.vstack((real_train_label, L_train))
train_label = np.vstack((train_label, np.full_like(L_train, n_c + 1)))
# removing data points of the first cluster
# only data points where the labels are wrongly identified are selected
new_ind = np.where(v1 == 0)[0]
data = data[new_ind]
target = target[new_ind]
label = label[new_ind]
return train_data, train_target, train_label, real_train_label, nnet4
def plm_test(train_dat, train_lab, test_dat, test_tar, test_lab, nn, c):
# SVM classifier
clf = svm.SVC()
clf.fit(train_dat, train_lab.ravel())
predicted = clf.predict(test_dat)
svm_acc = metrics.accuracy_score(test_lab, predicted)
# print("SVM Accuracy: ", metrics.accuracy_score(test_lab, predicted))
# error = []
final_tar = []
final_pred = []
for n_c in range(0, c):
r_ind = np.where(test_lab == n_c + 1)[0]
# p_ind = np.where(predicted == n_c + 1)[0]
tmp_dat = test_dat[r_ind]
tmp_tar = test_tar[r_ind]
# tmp_lab = test_lab[ind]
test_pred = nn[n_c].predict(tmp_dat)
# error.append(nn[n_c].error(tmp_tar, test_pred))
if n_c == 0:
final_tar = tmp_tar
final_pred = test_pred
else:
final_tar = np.vstack((final_tar, tmp_tar))
final_pred = np.vstack((final_pred, test_pred))
return np.mean((final_pred - final_tar) ** 2), svm_acc
def pelm(data, target, m, n=5, p=10, s=10, epochs=20):
X_train, X_test, Y_train, Y_test = train_test_split(data, target, test_size=0.3)
L_train = Y_train[:, -1].reshape(-1, 1)
L_test = Y_test[:, -1].reshape(-1, 1)
Y_test = Y_test[:, 0].reshape(-1, 1)
Y_train = Y_train[:, 0].reshape(-1, 1)
from time import time
start_time = time()
testing_error = []
for i in range(0, epochs):
d, t, l, rl, net = plm_train(X_train, Y_train, L_train, n, p, s, m);
e, svm_acc = plm_test(d, l, X_test, Y_test, L_test, net, m)
testing_error.append(e)
print("Execution time: ", time() - start_time, " secs")
print("Min error: ", np.min(testing_error))
print("Mean error: ", | np.mean(testing_error) | numpy.mean |
###############################################################################
# Reader for CINE files produced by Vision Research Phantom Software
# Author: <NAME>
# <EMAIL>
# Modified by <NAME> (<EMAIL>)
# Added to PIMS by <NAME> (<EMAIL>)
# Modified by <NAME>
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from pims.frame import Frame
from pims.base_frames import FramesSequence, index_attr
from pims.utils.misc import FileLocker
import time
import struct
import numpy as np
from numpy import array, frombuffer, where
from threading import Lock
import datetime
import hashlib
import sys
import warnings
from collections.abc import Iterable
__all__ = ('Cine', )
# '<' for little endian (cine documentation)
def _build_struct(dtype):
return struct.Struct(str("<" + dtype))
FRACTION_MASK = (2**32-1)
MAX_INT = 2**32
# Harmonized/simplified cine file data types with Python struct doc
UINT8 = 'B'
CHAR = 'b'
UINT16 = 'H'
INT16 = 'h'
BOOL = 'i'
UINT32 = 'I'
INT32 = 'i'
INT64 = 'q'
FLOAT = 'f'
DOUBLE = 'd'
TIME64 = 'Q'
RECT = '4i'
WBGAIN = '2f'
IMFILTER = '28i'
# TODO: get correct format for TrigTC
TC = '8s'
CFA_NONE = 0 # gray sensor
CFA_VRI = 1 # gbrg/rggb sensor
CFA_VRIV6 = 2 # bggr/grbg sensor
CFA_BAYER = 3 # gb/rg sensor
CFA_BAYERFLIP = 4 #rg/gb sensor
TAGGED_FIELDS = {
1000: ('ang_dig_sigs', ''),
1001: ('image_time_total', TIME64),
1002: ('image_time_only', TIME64),
1003: ('exposure_only', UINT32),
1004: ('range_data', ''),
1005: ('binsig', ''),
1006: ('anasig', ''),
1007: ('time_code', '')}
HEADER_FIELDS = [
('type', '2s'),
('header_size', UINT16),
('compression', UINT16),
('version', UINT16),
('first_movie_image', INT32),
('total_image_count', UINT32),
('first_image_no', INT32),
('image_count', UINT32),
# Offsets of following sections
('off_image_header', UINT32),
('off_setup', UINT32),
('off_image_offsets', UINT32),
('trigger_time', TIME64),
]
BITMAP_INFO_FIELDS = [
('bi_size', UINT32),
('bi_width', INT32),
('bi_height', INT32),
('bi_planes', UINT16),
('bi_bit_count', UINT16),
('bi_compression', UINT32),
('bi_image_size', UINT32),
('bi_x_pels_per_meter', INT32),
('bi_y_pels_per_meter', INT32),
('bi_clr_used', UINT32),
('bi_clr_important', UINT32),
]
SETUP_FIELDS = [
('frame_rate_16', UINT16),
('shutter_16', UINT16),
('post_trigger_16', UINT16),
('frame_delay_16', UINT16),
('aspect_ratio', UINT16),
('contrast_16', UINT16),
('bright_16', UINT16),
('rotate_16', UINT8),
('time_annotation', UINT8),
('trig_cine', UINT8),
('trig_frame', UINT8),
('shutter_on', UINT8),
('description_old', '121s'),
('mark', '2s'),
('length', UINT16),
('binning', UINT16),
('sig_option', UINT16),
('bin_channels', INT16),
('samples_per_image', UINT8),
] + [('bin_name{:d}'.format(i), '11s') for i in range(8)] + [
('ana_option', UINT16),
('ana_channels', INT16),
('res_6', UINT8),
('ana_board', UINT8),
] + [('ch_option{:d}'.format(i), INT16) for i in range(8)] + [
] + [('ana_gain{:d}'.format(i), FLOAT) for i in range(8)] + [
] + [('ana_unit{:d}'.format(i), '6s') for i in range(8)] + [
] + [('ana_name{:d}'.format(i), '11s') for i in range(8)] + [
('i_first_image', INT32),
('dw_image_count', UINT32),
('n_q_factor', INT16),
('w_cine_file_type', UINT16),
] + [('sz_cine_path{:d}'.format(i), '65s') for i in range(4)] + [
('b_mains_freq', UINT16),
('b_time_code', UINT8),
('b_priority', UINT8),
('w_leap_sec_dy', UINT16),
('d_delay_tc', DOUBLE),
('d_delay_pps', DOUBLE),
('gen_bits', UINT16),
('res_1', INT32),
('res_2', INT32),
('res_3', INT32),
('im_width', UINT16),
('im_height', UINT16),
('edr_shutter_16', UINT16),
('serial', UINT32),
('saturation', INT32),
('res_5', UINT8),
('auto_exposure', UINT32),
('b_flip_h', BOOL),
('b_flip_v', BOOL),
('grid', UINT32),
('frame_rate', UINT32),
('shutter', UINT32),
('edr_shutter', UINT32),
('post_trigger', UINT32),
('frame_delay', UINT32),
('b_enable_color', BOOL),
('camera_version', UINT32),
('firmware_version', UINT32),
('software_version', UINT32),
('recording_time_zone', INT32),
('cfa', UINT32),
('bright', INT32),
('contrast', INT32),
('gamma', INT32),
('res_21', UINT32),
('auto_exp_level', UINT32),
('auto_exp_speed', UINT32),
('auto_exp_rect', RECT),
('wb_gain', '8f'),
('rotate', INT32),
('wb_view', WBGAIN),
('real_bpp', UINT32),
('conv_8_min', UINT32),
('conv_8_max', UINT32),
('filter_code', INT32),
('filter_param', INT32),
('uf', IMFILTER),
('black_cal_sver', UINT32),
('white_cal_sver', UINT32),
('gray_cal_sver', UINT32),
('b_stamp_time', BOOL),
('sound_dest', UINT32),
('frp_steps', UINT32),
] + [('frp_img_nr{:d}'.format(i), INT32) for i in range(16)] + [
] + [('frp_rate{:d}'.format(i), UINT32) for i in range(16)] + [
] + [('frp_exp{:d}'.format(i), UINT32) for i in range(16)] + [
('mc_cnt', INT32),
] + [('mc_percent{:d}'.format(i), FLOAT) for i in range(64)] + [
('ci_calib', UINT32),
('calib_width', UINT32),
('calib_height', UINT32),
('calib_rate', UINT32),
('calib_exp', UINT32),
('calib_edr', UINT32),
('calib_temp', UINT32),
] + [('header_serial{:d}'.format(i), UINT32) for i in range(4)] + [
('range_code', UINT32),
('range_size', UINT32),
('decimation', UINT32),
('master_serial', UINT32),
('sensor', UINT32),
('shutter_ns', UINT32),
('edr_shutter_ns', UINT32),
('frame_delay_ns', UINT32),
('im_pos_xacq', UINT32),
('im_pos_yacq', UINT32),
('im_width_acq', UINT32),
('im_height_acq', UINT32),
('description', '4096s'),
('rising_edge', BOOL),
('filter_time', UINT32),
('long_ready', BOOL),
('shutter_off', BOOL),
('res_4', '16s'),
('b_meta_WB', BOOL),
('hue', INT32),
('black_level', INT32),
('white_level', INT32),
('lens_description', '256s'),
('lens_aperture', FLOAT),
('lens_focus_distance', FLOAT),
('lens_focal_length', FLOAT),
('f_offset', FLOAT),
('f_gain', FLOAT),
('f_saturation', FLOAT),
('f_hue', FLOAT),
('f_gamma', FLOAT),
('f_gamma_R', FLOAT),
('f_gamma_B', FLOAT),
('f_flare', FLOAT),
('f_pedestal_R', FLOAT),
('f_pedestal_G', FLOAT),
('f_pedestal_B', FLOAT),
('f_chroma', FLOAT),
('tone_label', '256s'),
('tone_points', INT32),
('f_tone', ''.join(32*['2f'])),
('user_matrix_label', '256s'),
('enable_matrices', BOOL),
('f_user_matrix', '9'+FLOAT),
('enable_crop', BOOL),
('crop_left_top_right_bottom', '4i'),
('enable_resample', BOOL),
('resample_width', UINT32),
('resample_height', UINT32),
('f_gain16_8', FLOAT),
('frp_shape', '16'+UINT32),
('trig_TC', TC),
('f_pb_rate', FLOAT),
('f_tc_rate', FLOAT),
('cine_name', '256s')
]
#from VR doc: This field is maintained for compatibility with old versions but
#a new field was added for that information. The new field can be larger or may
#have a different measurement unit.
UPDATED_FIELDS = {
'frame_rate_16': 'frame_rate',
'shutter_16': 'shutter_ns',
'post_trigger_16': 'post_trigger',
'frame_delay_16': 'frame_delay_ns',
'edr_shutter_16': 'edr_shutter_ns',
'saturation': 'f_saturation',
'shutter': 'shutter_ns',
'edr_shutter': 'edr_shutter_ns',
'frame_delay': 'frame_delay_ns',
'bright': 'f_offset',
'contrast': 'f_gain',
'gamma': 'f_gamma',
'conv_8_max': 'f_gain16_8',
'hue': 'f_hue',
}
#from VR doc: to be ignored, not used anymore
TO_BE_IGNORED_FIELDS = {
'contrast_16': 'res_7',
'bright_16': 'res_8',
'rotate_16': 'res_9',
'time_annotation': 'res_10',
'trig_cine': 'res_11',
'shutter_on': 'res_12',
'binning': 'res_13',
'b_mains_freq': 'res_14',
'b_time_code': 'res_15',
'b_priority': 'res_16',
'w_leap_sec_dy': 'res_17',
'd_delay_tc': 'res_18',
'd_delay_pps': 'res_19',
'gen_bits': 'res_20',
'conv_8_min': '',
}
# from VR doc: last setup field appearing in software version
# TODO: keep up-to-date with newer and more precise doc, if available
END_OF_SETUP = {
551: 'software_version',
552: 'recording_time_zone',
578: 'rotate',
605: 'b_stamp_time',
606: 'mc_percent63',
607: 'head_serial3',
614: 'decimation',
624: 'master_serial',
625: 'sensor',
631: 'frame_delay_ns',
637: 'description',
671: 'hue',
691: 'lens_focal_length',
693: 'f_gain16_8',
701: 'f_tc_rate',
702: 'cine_name',
}
class Cine(FramesSequence):
"""Read cine files
Read cine files, the out put from Vision Research high-speed phantom
cameras. Support uncompressed monochrome and color files.
Nominally thread-safe, but this assertion is not tested.
Parameters
----------
filename : string
Path to cine (or chd) file.
Notes
-----
For a .chd file, this class only reads the header, not the images.
"""
# TODO: Unit tests using a small sample cine file.
@classmethod
def class_exts(cls):
return {'cine'} | super(Cine, cls).class_exts()
propagate_attrs = ['frame_shape', 'pixel_type', 'filename', 'frame_rate',
'get_fps', 'compression', 'cfa', 'off_set']
def __init__(self, filename):
py_ver = sys.version_info
super(Cine, self).__init__()
self.f = open(filename, 'rb')
self._filename = filename
### HEADER
self.header_dict = self._read_header(HEADER_FIELDS)
self.bitmapinfo_dict = self._read_header(BITMAP_INFO_FIELDS,
self.off_image_header)
self.setup_fields_dict = self._read_header(SETUP_FIELDS, self.off_setup)
self.setup_fields_dict = self.clean_setup_dict()
self._width = self.bitmapinfo_dict['bi_width']
self._height = self.bitmapinfo_dict['bi_height']
self._pixel_count = self._width * self._height
# Allows Cine object to be accessed from multiple threads!
self.file_lock = Lock()
self._hash = None
self._im_sz = (self._width, self._height)
# sort out the data type by reading the meta-data
if self.bitmapinfo_dict['bi_bit_count'] in (8, 24):
self._data_type = 'u1'
else:
self._data_type = 'u2'
self.tagged_blocks = self._read_tagged_blocks()
self.frame_time_stamps = self.tagged_blocks['image_time_only']
self.all_exposures = self.tagged_blocks['exposure_only']
self.stack_meta_data = dict()
self.stack_meta_data.update(self.bitmapinfo_dict)
self.stack_meta_data.update({k: self.setup_fields_dict[k]
for k in set(('trig_frame',
'gamma',
'frame_rate',
'shutter_ns'
)
)
})
self.stack_meta_data.update({k: self.header_dict[k]
for k in set(('first_image_no',
'image_count',
'total_image_count',
'first_movie_image'
)
)
})
self.stack_meta_data['trigger_time'] = self.trigger_time
### IMAGES
# Move to images offset to test EOF...
self.f.seek(self.off_image_offsets)
if self.f.read(1) != b'':
# ... If no, read images
self.image_locations = self._unpack('%dQ' % self.image_count,
self.off_image_offsets)
if type(self.image_locations) not in (list, tuple):
self.image_locations = [self.image_locations]
# TODO: add support for reading sequence within the same framework, when data
# has been saved in another format (.tif, image sequence, etc)
def clean_setup_dict(self):
r"""Clean setup dictionary by removing newer fields, when compared to the
software version, and trailing null character b'\x00' in entries.
Notes
-----
The method is called after building the setup from the raw cine header.
It can be overridden to match more specific purposes (e.g. filtering
out TO_BE_IGNORED_ and UPDATED_FIELDS).
See also
--------
`Vision Research Phantom documentation <http://phantomhighspeed-knowledge.force.com/servlet/fileField?id=0BE1N000000kD2i>`_
"""
setup = self.setup_fields_dict.copy()
# End setup at correct field (according to doc)
versions = sorted(END_OF_SETUP.keys())
fields = [v[0] for v in SETUP_FIELDS]
v = setup['software_version']
# Get next field where setup is known to have ended, according to VR
try:
v_up = versions[sorted(where(array(versions) >= v)[0])[0]]
last_field = END_OF_SETUP[v_up]
for k in fields[fields.index(last_field)+1:]:
del setup[k]
except IndexError:
# Or go to the end (waiting for updated documentation)
pass
# Remove blank characters
setup = _convert_null_byte(setup)
# Filter out 'res_' (reserved/obsolete) fields
#k_res = [k for k in setup.keys() if k.startswith('res_')]
#for k in k_res:
# del setup[k]
# Format f_tone properly
if 'f_tone' in setup.keys():
tone = setup['f_tone']
setup['f_tone'] = tuple((tone[2*k], tone[2*k+1])\
for k in range(setup['tone_points']))
return setup
@property
def filename(self):
return self._filename
@property
def frame_rate(self):
"""Frame rate (setting in Phantom PCC software) (Hz).
May differ from computed average one.
"""
return self.setup_fields_dict['frame_rate']
@property
def frame_rate_avg(self):
"""Actual frame rate, averaged on frame timestamps (Hz)."""
return self.get_frame_rate_avg()
# use properties for things that should not be changeable
@property
def cfa(self):
return self.setup_fields_dict['cfa']
@property
def compression(self):
return self.header_dict['compression']
@property
def pixel_type(self):
return np.dtype(self._data_type)
# TODO: what is this field??? (baneel)
@property
def off_set(self):
return self.header_dict['offset']
@property
def setup_length(self):
return self.setup_fields_dict['length']
@property
def off_image_offsets(self):
return self.header_dict['off_image_offsets']
@property
def off_image_header(self):
return self.header_dict['off_image_header']
@property
def off_setup(self):
return self.header_dict['off_setup']
@property
def image_count(self):
return self.header_dict['image_count']
@property
def frame_shape(self):
return self._im_sz
@property
def shape(self):
"""Shape of virtual np.array containing images."""
W, H = self.frame_shape
return self.len(), H, W
def get_frame(self, j):
md = dict()
md['exposure'] = self.all_exposures[j]
ts, sec_frac = self.frame_time_stamps[j]
md['frame_time'] = {'datetime': ts,
'second_fraction': sec_frac,
'time_to_trigger': self.get_time_to_trigger(j),
}
return Frame(self._get_frame(j), frame_no=j, metadata=md)
def _unpack(self, fs, offset=None):
if offset is not None:
self.f.seek(offset)
s = _build_struct(fs)
vals = s.unpack(self.f.read(s.size))
if len(vals) == 1:
return vals[0]
else:
return vals
def _read_tagged_blocks(self):
"""Reads the tagged block meta-data from the header."""
tmp_dict = dict()
if not self.off_setup + self.setup_length < self.off_image_offsets:
return
next_tag_exists = True
next_tag_offset = 0
while next_tag_exists:
block_size, next_tag_exists = self._read_tag_block(next_tag_offset,
tmp_dict)
next_tag_offset += block_size
return tmp_dict
def _read_tag_block(self, off_set, accum_dict):
'''
Internal helper-function for reading the tagged blocks.
'''
with FileLocker(self.file_lock):
self.f.seek(self.off_setup + self.setup_length + off_set)
block_size = self._unpack(UINT32)
b_type = self._unpack(UINT16)
more_tags = self._unpack(UINT16)
if b_type == 1004:
# docs say to ignore range data it seems to be a poison flag,
# if see this, give up tag parsing
return block_size, 0
try:
d_name, d_type = TAGGED_FIELDS[b_type]
except KeyError:
return block_size, more_tags
if d_type == '':
# print "can't deal with <" + d_name + "> tagged data"
return block_size, more_tags
s_tmp = _build_struct(d_type)
if (block_size-8) % s_tmp.size != 0:
# print 'something is wrong with your data types'
return block_size, more_tags
d_count = (block_size-8)//(s_tmp.size)
data = self._unpack('%d' % d_count + d_type)
if not isinstance(data, tuple):
# fix up data due to design choice in self.unpack
data = (data, )
# parse time
if b_type == 1002 or b_type == 1001:
data = [(datetime.datetime.fromtimestamp(d >> 32),
(FRACTION_MASK & d)/MAX_INT) for d in data]
# convert exposure to seconds
if b_type == 1003:
data = [d/(MAX_INT) for d in data]
accum_dict[d_name] = data
return block_size, more_tags
def _read_header(self, fields, offset=0):
self.f.seek(offset)
tmp = dict()
for name, format in fields:
val = self._unpack(format)
tmp[name] = val
return tmp
def _get_frame(self, number):
with FileLocker(self.file_lock):
# get basic information about the frame we want
image_start = self.image_locations[number]
annotation_size = self._unpack(UINT32, image_start)
# this is not used, but is needed to advance the point in the file
annotation = self._unpack('%db' % (annotation_size - 8))
image_size = self._unpack(UINT32)
cfa = self.cfa
compression = self.compression
# sort out data type looking at the cached version
data_type = self._data_type
# actual bit per pixel
actual_bits = image_size * 8 // (self._pixel_count)
# so this seem wrong as 10 or 12 bits won't fit in 'u1'
# but I (TAC) may not understand and don't have a packed file
# (which the docs seem to imply don't exist) to test on so
# I am leaving it. good luck.
if actual_bits in (10, 12):
data_type = 'u1'
# move the file to the right point in the file
self.f.seek(image_start + annotation_size)
# suck the data out of the file and shove into linear
# numpy array
frame = frombuffer(self.f.read(image_size), data_type)
# if mono-camera
if cfa == CFA_NONE:
if compression != 0:
raise ValueError("Can not deal with compressed files\n" +
"compression level: " +
"{}".format(compression))
# we are working with a monochrome camera
# un-pack packed data
if (actual_bits == 10):
frame = _ten2sixteen(frame)
elif (actual_bits == 12):
frame = _twelve2sixteen(frame)
elif (actual_bits % 8):
raise ValueError('Data should be byte aligned, ' +
'or 10 or 12 bit packed (appears to be' +
' %dbits/pixel?!)' % actual_bits)
# re-shape to an array
# flip the rows
frame = frame.reshape(self._height, self._width)[::-1]
if actual_bits in (10, 12):
frame = frame[::-1, :]
# Don't know why it works this way, but it does...
# else, some sort of color layout
else:
if compression == 0:
# and re-order so color is RGB (naively saves as BGR)
frame = frame.reshape(self._height, self._width,
3)[::-1, :, ::-1]
elif compression == 2:
raise ValueError("Can not process un-interpolated movies")
else:
raise ValueError("Should never hit this, " +
"you have an un-documented file\n" +
"compression level: " +
"{}".format(compression))
return frame
def __len__(self):
return self.image_count
len = __len__
@index_attr
def get_time(self, i):
"""Return the time of frame i in seconds, relative to first frame."""
warnings.warn("This is not guaranteed to be the actual time. "\
+"See self.get_time_to_trigger(i) method.",
category=PendingDeprecationWarning)
return float(i) / self.frame_rate
@index_attr
def get_time_to_trigger(self, i):
"""Get actual time (s) of frame i, relative to trigger."""
ti = self.frame_time_stamps[i]
ti = ti[0].timestamp() + ti[1]
tt= self.trigger_time
tt = tt['datetime'].timestamp() + tt['second_fraction']
return ti - tt
def get_frame_rate_avg(self, error_tol=1e-3):
"""Compute mean frame rate (Hz), on the basis of frame time stamps.
Parameters
----------
error_tol : float, optional.
Tolerance on relative error (standard deviation/mean),
above which a warning is raised.
Returns
-------
fps : float.
Actual mean frame rate, based on the frames time stamps.
"""
times = np.r_[[self.get_time_to_trigger(i) for i in range(self.len())]]
freqs = 1 / np.diff(times)
fps, std = freqs.mean(), freqs.std()
error = std / fps
if error > error_tol:
warnings.warn('Relative precision on the average frame rate is '\
+'{:.2f}%.'.format(1e2*error))
return fps
def get_fps(self):
"""Get frame rate (setting in Phantom PCC software) (Hz).
May differ from computed average one.
See also
--------
PCC setting (all fields refer to the same value)
self.frame_rate
self.setup_fields_dict['frame_rate']
Computed average
self.frame_rate_avg
self.get_frame_rate_avg()
"""
return self.frame_rate
def close(self):
self.f.close()
def __unicode__(self):
return self.filename
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Source: {filename}
Length: {count} frames
Frame Shape: {frame_shape!r}
Pixel Datatype: {dtype}""".format(frame_shape=self.frame_shape,
count=len(self),
filename=self.filename,
dtype=self.pixel_type)
@property
def trigger_time(self):
'''Returns the time of the trigger, tuple of (datatime_object,
fraction_in_s)'''
trigger_time = self.header_dict['trigger_time']
ts, sf = (datetime.datetime.fromtimestamp(trigger_time >> 32),
float(FRACTION_MASK & trigger_time)/(MAX_INT))
return {'datetime': ts, 'second_fraction': sf}
@property
def hash(self):
if self._hash is None:
self._hash_fun()
return self._hash
def __hash__(self):
return int(self.hash, base=16)
def _hash_fun(self):
"""Generates the md5 hash of the header of the file. Here the
header is defined as everything before the first image starts.
This includes all of the meta-data (including the plethora of
time stamps) so this will be unique.
"""
# get the file lock (so we don't screw up any other reads)
with FileLocker(self.file_lock):
self.f.seek(0)
max_loc = self.image_locations[0]
md5 = hashlib.md5()
chunk_size = 128*md5.block_size
chunk_count = (max_loc//chunk_size) + 1
for j in range(chunk_count):
md5.update(self.f.read(128*md5.block_size))
self._hash = md5.hexdigest()
def __eq__(self, other):
return self.hash == other.hash
def __ne__(self, other):
return not self == other
# Should be divisible by 3, 4 and 5! This seems to be near-optimal.
CHUNK_SIZE = 6 * 10 ** 5
def _ten2sixteen(a):
"""Convert array of 10bit uints to array of 16bit uints."""
b = np.zeros(a.size//5*4, dtype='u2')
for j in range(0, len(a), CHUNK_SIZE):
(a0, a1, a2, a3, a4) = [a[j+i:j+CHUNK_SIZE:5].astype('u2')
for i in range(5)]
k = j//5 * 4
k2 = k + CHUNK_SIZE//5 * 4
b[k+0:k2:4] = ((a0 & 0b11111111) << 2) + ((a1 & 0b11000000) >> 6)
b[k+1:k2:4] = ((a1 & 0b00111111) << 4) + ((a2 & 0b11110000) >> 4)
b[k+2:k2:4] = ((a2 & 0b00001111) << 6) + ((a3 & 0b11111100) >> 2)
b[k+3:k2:4] = ((a3 & 0b00000011) << 8) + ((a4 & 0b11111111) >> 0)
return b
def _sixteen2ten(b):
"""Convert array of 16bit uints to array of 10bit uints."""
a = | np.zeros(b.size//4*5, dtype='u1') | numpy.zeros |
from __future__ import division, print_function
import os, types
import numpy as np
import vtk
from vtk.util.numpy_support import numpy_to_vtk
from vtk.util.numpy_support import vtk_to_numpy
import vtkplotter.colors as colors
##############################################################################
vtkMV = vtk.vtkVersion().GetVTKMajorVersion() > 5
def add_actor(f): #decorator
def wrapper(*args, **kwargs):
actor = f(*args, **kwargs)
args[0].actors.append(actor)
return actor
return wrapper
def setInput(vtkobj, p, port=0):
if isinstance(p, vtk.vtkAlgorithmOutput):
vtkobj.SetInputConnection(port, p) # passing port
return
if vtkMV: vtkobj.SetInputData(p)
else: vtkobj.SetInput(p)
def isSequence(arg):
if hasattr(arg, "strip"): return False
if hasattr(arg, "__getslice__"): return True
if hasattr(arg, "__iter__"): return True
return False
def arange(start,stop, step=1):
return np.arange(start, stop, step)
def vector(x, y=None, z=0.):
if y is None: #assume x is already [x,y,z]
return np.array(x, dtype=np.float64)
return np.array([x,y,z], dtype=np.float64)
def mag(z):
if isinstance(z[0], np.ndarray):
return np.array(list(map(np.linalg.norm, z)))
else:
return np.linalg.norm(z)
def mag2(z):
return np.dot(z,z)
def norm(v):
if isinstance(v[0], np.ndarray):
return np.divide(v, mag(v)[:,None])
else:
return v/mag(v)
def to_precision(x, p):
"""
Returns a string representation of x formatted with a precision of p
Based on the webkit javascript implementation taken from here:
https://code.google.com/p/webkit-mirror/source/browse/JavaScriptCore/kjs/number_object.cpp
Implemented in https://github.com/randlet/to-precision
"""
import math
x = float(x)
if x == 0.: return "0." + "0"*(p-1)
out = []
if x < 0:
out.append("-")
x = -x
e = int(math.log10(x))
tens = math.pow(10, e - p + 1)
n = math.floor(x/tens)
if n < math.pow(10, p - 1):
e = e -1
tens = math.pow(10, e - p+1)
n = math.floor(x / tens)
if abs((n + 1.) * tens - x) <= abs(n * tens -x): n = n + 1
if n >= math.pow(10,p):
n = n / 10.
e = e + 1
m = "%.*g" % (p, n)
if e < -2 or e >= p:
out.append(m[0])
if p > 1:
out.append(".")
out.extend(m[1:p])
out.append('e')
if e > 0:
out.append("+")
out.append(str(e))
elif e == (p -1):
out.append(m)
elif e >= 0:
out.append(m[:e+1])
if e+1 < len(m):
out.append(".")
out.extend(m[e+1:])
else:
out.append("0.")
out.extend(["0"]*-(e+1))
out.append(m)
return "".join(out)
#########################################################################
def makeActor(poly, c='gold', alpha=0.5,
wire=False, bc=None, edges=False, legend=None, texture=None):
'''
Return a vtkActor from an input vtkPolyData, optional args:
c, color in RGB format, hex, symbol or name
alpha, transparency (0=invisible)
wire, show surface as wireframe
bc, backface color of internal surface
edges, show edges as line on top of surface
legend optional string
texture jpg file name of surface texture, eg. 'metalfloor1'
'''
clp = vtk.vtkCleanPolyData()
setInput(clp, poly)
clp.Update()
pdnorm = vtk.vtkPolyDataNormals()
setInput(pdnorm, clp.GetOutput())
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
mapper = vtk.vtkPolyDataMapper()
# check if color string contains a float, in this case ignore alpha
if alpha is None: alpha=0.5
al = colors.getAlpha(c)
if al: alpha = al
setInput(mapper, pdnorm.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
prp = actor.GetProperty()
#########################################################################
### On some vtk versions/platforms points are redered as ugly squares
### in such a case uncomment this line:
if vtk.vtkVersion().GetVTKMajorVersion()>6: prp.RenderPointsAsSpheresOn()
#########################################################################
if c is None:
mapper.ScalarVisibilityOn()
else:
mapper.ScalarVisibilityOff()
c = colors.getColor(c)
prp.SetColor(c)
prp.SetOpacity(alpha)
prp.SetSpecular(0.1)
prp.SetSpecularColor(c)
prp.SetSpecularPower(1)
prp.SetAmbient(0.1)
prp.SetAmbientColor(c)
prp.SetDiffuse(1)
prp.SetDiffuseColor(c)
if edges: prp.EdgeVisibilityOn()
if wire: prp.SetRepresentationToWireframe()
if texture:
mapper.ScalarVisibilityOff()
assignTexture(actor, texture)
if bc: # defines a specific color for the backface
backProp = vtk.vtkProperty()
backProp.SetDiffuseColor(colors.getColor(bc))
backProp.SetOpacity(alpha)
actor.SetBackfaceProperty(backProp)
assignPhysicsMethods(actor)
assignConvenienceMethods(actor, legend)
return actor
def makeAssembly(actors, legend=None):
'''Group many actors as a single new actor'''
assembly = vtk.vtkAssembly()
for a in actors: assembly.AddPart(a)
setattr(assembly, 'legend', legend)
assignPhysicsMethods(assembly)
assignConvenienceMethods(assembly, legend)
if hasattr(actors[0], 'base'):
setattr(assembly, 'base', actors[0].base)
setattr(assembly, 'top', actors[0].top)
return assembly
def assignTexture(actor, name, scale=1, falsecolors=False, mapTo=1):
'''Assign a texture to actor from file or name in /textures directory'''
if mapTo == 1: tmapper = vtk.vtkTextureMapToCylinder()
elif mapTo == 2: tmapper = vtk.vtkTextureMapToSphere()
elif mapTo == 3: tmapper = vtk.vtkTextureMapToPlane()
setInput(tmapper, polydata(actor))
if mapTo == 1: tmapper.PreventSeamOn()
xform = vtk.vtkTransformTextureCoords()
xform.SetInputConnection(tmapper.GetOutputPort())
xform.SetScale(scale,scale,scale)
if mapTo == 1: xform.FlipSOn()
xform.Update()
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(xform.GetOutputPort())
mapper.ScalarVisibilityOff()
cdir = os.path.dirname(__file__)
if cdir == '': cdir = '.'
fn = cdir + '/textures/' + name + ".jpg"
if os.path.exists(name):
fn = name
elif not os.path.exists(fn):
colors.printc(('Texture', name, 'not found in', cdir+'/textures'), 'r')
colors.printc('Available textures:', c='m', end=' ')
for ff in os.listdir(cdir + '/textures'):
colors.printc(ff.split('.')[0], end=' ', c='m')
print()
return
jpgReader = vtk.vtkJPEGReader()
jpgReader.SetFileName(fn)
atext = vtk.vtkTexture()
atext.RepeatOn()
atext.EdgeClampOff()
atext.InterpolateOn()
if falsecolors: atext.MapColorScalarsThroughLookupTableOn()
atext.SetInputConnection(jpgReader.GetOutputPort())
actor.GetProperty().SetColor(1,1,1)
actor.SetMapper(mapper)
actor.SetTexture(atext)
# ###########################################################################
def assignConvenienceMethods(actor, legend):
if not hasattr(actor, 'legend'):
setattr(actor, 'legend', legend)
def _fclone(self, c=None, alpha=None, wire=False, bc=None,
edges=False, legend=None, texture=None, rebuild=True):
return clone(self, c, alpha, wire, bc, edges, legend, texture, rebuild)
actor.clone = types.MethodType( _fclone, actor )
def _fpoint(self, i, p=None):
if p is None :
poly = polydata(self, True, 0)
p = [0,0,0]
poly.GetPoints().GetPoint(i, p)
return np.array(p)
else:
poly = polydata(self, False, 0)
poly.GetPoints().SetPoint(i, p)
TI = vtk.vtkTransform()
actor.SetUserMatrix(TI.GetMatrix()) # reset
return self
actor.point = types.MethodType( _fpoint, actor )
def _fN(self, index=0):
return polydata(self, False, index).GetNumberOfPoints()
actor.N = types.MethodType( _fN, actor )
def _fnormalize(self): return normalize(self)
actor.normalize = types.MethodType( _fnormalize, actor )
def _fshrink(self, fraction=0.85): return shrink(self, fraction)
actor.shrink = types.MethodType( _fshrink, actor )
def _fcutPlane(self, origin=(0,0,0), normal=(1,0,0), showcut=False):
return cutPlane(self, origin, normal, showcut)
actor.cutPlane = types.MethodType( _fcutPlane, actor )
def _fcutterw(self): return cutterWidget(self)
actor.cutterWidget = types.MethodType( _fcutterw, actor )
def _fpolydata(self, rebuild=True, index=0):
return polydata(self, rebuild, index)
actor.polydata = types.MethodType( _fpolydata, actor )
def _fcoordinates(self, rebuild=True):
return coordinates(self, rebuild)
actor.coordinates = types.MethodType( _fcoordinates, actor )
def _fxbounds(self):
b = polydata(actor, True).GetBounds()
return (b[0],b[1])
actor.xbounds = types.MethodType( _fxbounds, actor )
def _fybounds(self):
b = polydata(actor, True).GetBounds()
return (b[2],b[3])
actor.ybounds = types.MethodType( _fybounds, actor )
def _fzbounds(self):
b = polydata(actor, True).GetBounds()
return (b[4],b[5])
actor.zbounds = types.MethodType( _fzbounds, actor )
def _fnormalAt(self, index):
normals = polydata(self, True).GetPointData().GetNormals()
return np.array(normals.GetTuple(index))
actor.normalAt = types.MethodType( _fnormalAt, actor )
def _fnormals(self):
vtknormals = polydata(self, True).GetPointData().GetNormals()
as_numpy = vtk_to_numpy(vtknormals)
return as_numpy
actor.normals = types.MethodType( _fnormals, actor )
def _fstretch(self, startpt, endpt):
return stretch(self, startpt, endpt)
actor.stretch = types.MethodType( _fstretch, actor)
def _fsubdivide(self, N=1, method=0, legend=None):
return subdivide(self, N, method, legend)
actor.subdivide = types.MethodType( _fsubdivide, actor)
def _fdecimate(self, fraction=0.5, N=None, verbose=True, boundaries=True):
return decimate(self, fraction, N, verbose, boundaries)
actor.decimate = types.MethodType( _fdecimate, actor)
def _fcolor(self, c=None):
if c is not None:
self.GetProperty().SetColor(colors.getColor(c))
return self
else:
return np.array(self.GetProperty().GetColor())
actor.color = types.MethodType( _fcolor, actor)
def _falpha(self, a=None):
if a:
self.GetProperty().SetOpacity(a)
return self
else:
return self.GetProperty().GetOpacity()
actor.alpha = types.MethodType( _falpha, actor)
def _fwire(self, a=True):
if a:
self.GetProperty().SetRepresentationToWireframe()
else:
self.GetProperty().SetRepresentationToSurface()
return self
actor.wire = types.MethodType( _fwire, actor)
def _fclosestPoint(self, pt, N=1, radius=None):
return closestPoint(self, pt, N, radius)
actor.closestPoint = types.MethodType( _fclosestPoint, actor)
def _fintersectWithLine(self, p0, p1):
return intersectWithLine(self, p0,p1)
actor.intersectWithLine = types.MethodType(_fintersectWithLine , actor)
def _fisInside(self, point, tol=0.0001):
return isInside(self, point, tol)
actor.isInside = types.MethodType(_fisInside , actor)
def _finsidePoints(self, points, invert=False, tol=1e-05):
return insidePoints(self, points, invert, tol)
actor.insidePoints = types.MethodType(_finsidePoints , actor)
def _fflipNormals(self):
return flipNormals(self)
actor.flipNormals = types.MethodType(_fflipNormals , actor)
def _fcellCenters(self):
return cellCenters(self)
actor.cellCenters = types.MethodType(_fcellCenters, actor)
def _fpointScalars(self, scalars, name):
return pointScalars(self, scalars, name)
actor.pointScalars = types.MethodType(_fpointScalars , actor)
def _fpointColors(self, scalars, cmap='jet'):
return pointColors(self, scalars, cmap)
actor.pointColors = types.MethodType(_fpointColors , actor)
def _fcellScalars(self, scalars, name):
return cellScalars(self, scalars, name)
actor.cellScalars = types.MethodType(_fcellScalars , actor)
def _fcellColors(self, scalars, cmap='jet'):
return cellColors(self, scalars, cmap)
actor.cellColors = types.MethodType(_fcellColors , actor)
def _fscalars(self, name):
return scalars(self, name)
actor.scalars = types.MethodType(_fscalars , actor)
# ###########################################################################
def assignPhysicsMethods(actor):
def _fpos(self, p=None):
if p is None:
return np.array(self.GetPosition())
self.SetPosition(p)
return self # return itself to concatenate methods
actor.pos = types.MethodType( _fpos, actor )
def _faddpos(self, dp):
self.SetPosition(np.array(self.GetPosition()) +dp )
return self
actor.addpos = types.MethodType( _faddpos, actor )
def _fpx(self, px=None): # X
_pos = self.GetPosition()
if px is None:
return _pos[0]
newp = [px, _pos[1], _pos[2]]
self.SetPosition(newp)
return self
actor.x = types.MethodType( _fpx, actor )
def _fpy(self, py=None): # Y
_pos = self.GetPosition()
if py is None:
return _pos[1]
newp = [_pos[0], py, _pos[2]]
self.SetPosition(newp)
return self
actor.y = types.MethodType( _fpy, actor )
def _fpz(self, pz=None): # Z
_pos = self.GetPosition()
if pz is None:
return _pos[2]
newp = [_pos[0], _pos[1], pz]
self.SetPosition(newp)
return self
actor.z = types.MethodType( _fpz, actor )
def _fscale(self, p=None):
if p is None:
return np.array(self.GetScale())
self.SetScale(p)
return self # return itself to concatenate methods
actor.scale = types.MethodType( _fscale, actor )
def _frotate(self, angle, axis, axis_point=[0,0,0], rad=False):
if rad: angle *= 57.3
return rotate(self, angle, axis, axis_point, rad)
actor.rotate = types.MethodType( _frotate, actor )
def _frotateX(self, angle, axis_point=[0,0,0], rad=False):
if rad: angle *= 57.3
return rotate(self, angle, [1,0,0], axis_point, rad)
actor.rotateX = types.MethodType( _frotateX, actor )
def _frotateY(self, angle, axis_point=[0,0,0], rad=False):
if rad: angle *= 57.3
return rotate(self, angle, [0,1,0], axis_point, rad)
actor.rotateY = types.MethodType( _frotateY, actor )
def _frotateZ(self, angle, axis_point=[0,0,0], rad=False):
if rad: angle *= 57.3
return rotate(self, angle, [0,0,1], axis_point, rad)
actor.rotateZ = types.MethodType( _frotateZ, actor )
def _forientation(self, newaxis=None, rotation=0):
return orientation(self, newaxis, rotation)
actor.orientation = types.MethodType( _forientation, actor )
def _fcenterOfMass(self): return centerOfMass(self)
actor.centerOfMass = types.MethodType(_fcenterOfMass, actor)
def _fvolume(self): return volume(self)
actor.volume = types.MethodType(_fvolume, actor)
def _farea(self): return area(self)
actor.area = types.MethodType(_farea, actor)
def _fdiagonalSize(self): return diagonalSize(self)
actor.diagonalSize = types.MethodType(_fdiagonalSize, actor)
#########################################################
def clone(actor, c=None, alpha=None, wire=False, bc=None,
edges=False, legend=None, texture=None, rebuild=True):
'''
Clone a vtkActor.
If rebuild is True build its polydata in its current position in space
'''
poly = polydata(actor, rebuild)
if not poly.GetNumberOfPoints():
colors.printc('Limitation: cannot clone textured obj. Returning input.',1)
return actor
polyCopy = vtk.vtkPolyData()
polyCopy.DeepCopy(poly)
if legend is True and hasattr(actor, 'legend'): legend = actor.legend
if alpha is None: alpha = actor.GetProperty().GetOpacity()
if c is None: c = actor.GetProperty().GetColor()
if texture is None and hasattr(actor, 'texture'): texture = actor.texture
cact = makeActor(polyCopy, c, alpha, wire, bc, edges, legend, texture)
cact.GetProperty().SetPointSize(actor.GetProperty().GetPointSize())
return cact
def flipNormals(actor): # N.B. input argument gets modified
rs = vtk.vtkReverseSense()
setInput(rs, polydata(actor, True))
rs.ReverseNormalsOn()
rs.Update()
poly = rs.GetOutput()
mapper = actor.GetMapper()
setInput(mapper, poly)
mapper.Update()
actor.Modified()
if hasattr(actor, 'poly'): actor.poly=poly
return actor # return same obj for concatenation
def normalize(actor): # N.B. input argument gets modified
'''
Shift actor's center of mass at origin and scale its average size to unit.
'''
cm = centerOfMass(actor)
coords = coordinates(actor)
if not len(coords) : return
pts = coords - cm
xyz2 = np.sum(pts * pts, axis=0)
scale = 1/np.sqrt(np.sum(xyz2)/len(pts))
t = vtk.vtkTransform()
t.Scale(scale, scale, scale)
t.Translate(-cm)
tf = vtk.vtkTransformPolyDataFilter()
setInput(tf, actor.GetMapper().GetInput())
tf.SetTransform(t)
tf.Update()
mapper = actor.GetMapper()
setInput(mapper, tf.GetOutput())
mapper.Update()
actor.Modified()
if hasattr(actor, 'poly'): actor.poly=tf.GetOutput()
return actor # return same obj for concatenation
def rotate(actor, angle, axis, axis_point=[0,0,0], rad=False):
'''Rotate an actor around an arbitrary axis passing through axis_point'''
anglerad = angle
if not rad: anglerad = angle/57.3
axis = norm(axis)
a = np.cos(anglerad / 2)
b, c, d = -axis * np.sin(anglerad / 2)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
R = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
rv = np.dot(R, actor.GetPosition()-np.array(axis_point)) + axis_point
if rad: angle *= 57.3
# this vtk method only rotates in the origin of the actor:
actor.RotateWXYZ(angle, axis[0], axis[1], axis[2] )
actor.SetPosition(rv)
return actor
def orientation(actor, newaxis=None, rotation=0):
'''
Set/Get actor orientation.
If rotation != 0 rotate actor around newaxis (in degree units)
'''
initaxis = norm(actor.top - actor.base)
if newaxis is None: return initaxis
newaxis = norm(newaxis)
TI = vtk.vtkTransform()
actor.SetUserMatrix(TI.GetMatrix()) # reset
pos = np.array(actor.GetPosition())
crossvec = np.cross(initaxis, newaxis)
angle = np.arccos(np.dot(initaxis, newaxis))
T = vtk.vtkTransform()
T.PostMultiply()
T.Translate(-pos)
if rotation: T.RotateWXYZ(rotation, initaxis)
T.RotateWXYZ(angle*57.3, crossvec)
T.Translate(pos)
actor.SetUserMatrix(T.GetMatrix())
return actor
############################################################################
def shrink(actor, fraction=0.85): # N.B. input argument gets modified
'''Shrink the triangle polydata in the representation of actor'''
poly = polydata(actor, True)
shrink = vtk.vtkShrinkPolyData()
setInput(shrink, poly)
shrink.SetShrinkFactor(fraction)
shrink.Update()
mapper = actor.GetMapper()
setInput(mapper, shrink.GetOutput())
mapper.Update()
actor.Modified()
return actor # return same obj for concatenation
def stretch(actor, q1, q2):
'''Stretch actor between points q1 and q2'''
if not hasattr(actor, 'base'):
colors.printc('Please define vectors actor.base and actor.top at creation. Exit.','r')
exit(0)
TI = vtk.vtkTransform()
actor.SetUserMatrix(TI.GetMatrix()) # reset
p1, p2 = actor.base, actor.top
q1,q2,z = np.array(q1), np.array(q2), np.array([0,0,1])
plength = np.linalg.norm(p2-p1)
qlength = np.linalg.norm(q2-q1)
T = vtk.vtkTransform()
T.PostMultiply()
T.Translate(-p1)
cosa = np.dot(p2-p1, z)/plength
n = np.cross(p2-p1, z)
T.RotateWXYZ(np.arccos(cosa)*57.3, n)
T.Scale(1,1, qlength/plength)
cosa = np.dot(q2-q1, z)/qlength
n = np.cross(q2-q1, z)
T.RotateWXYZ(-np.arccos(cosa)*57.3, n)
T.Translate(q1)
actor.SetUserMatrix(T.GetMatrix())
return actor
def cutPlane(actor, origin=(0,0,0), normal=(1,0,0), showcut=False):
'''
Takes actor and cuts it with the plane defined by a point
and a normal.
showcut = shows the cut away part as thin wireframe
'''
plane = vtk.vtkPlane()
plane.SetOrigin(origin)
plane.SetNormal(normal)
poly = polydata(actor)
clipper = vtk.vtkClipPolyData()
setInput(clipper, poly)
clipper.SetClipFunction(plane)
clipper.GenerateClippedOutputOn()
clipper.SetValue(0.)
clipper.Update()
if hasattr(actor, 'GetProperty'):
alpha = actor.GetProperty().GetOpacity()
c = actor.GetProperty().GetColor()
bf = actor.GetBackfaceProperty()
else:
alpha=1
c='gold'
bf=None
leg = None
if hasattr(actor, 'legend'): leg = actor.legend
clipActor = makeActor(clipper.GetOutput(),c=c,alpha=alpha, legend=leg)
clipActor.SetBackfaceProperty(bf)
acts = [clipActor]
if showcut:
cpoly = clipper.GetClippedOutput()
restActor = makeActor(cpoly, c=c, alpha=0.05, wire=1)
acts.append(restActor)
if len(acts)>1:
asse = makeAssembly(acts)
return asse
else:
return clipActor
def mergeActors(actors, c=None, alpha=1,
wire=False, bc=None, edges=False, legend=None, texture=None):
'''
Build a new actor formed by the fusion of the polydata of the input objects.
Similar to makeAssembly, but in this case the input objects become a single mesh.
'''
polylns = vtk.vtkAppendPolyData()
for a in actors:
polylns.AddInputData(polydata(a, True))
polylns.Update()
actor = makeActor(polylns.GetOutput(),
c, alpha, wire, bc, edges, legend, texture)
return actor
#########################################################
# Useful Functions
#########################################################
def isInside(actor, point, tol=0.0001):
"""Return True if point is inside a polydata closed surface"""
poly = polydata(actor, True)
points = vtk.vtkPoints()
points.InsertNextPoint(point)
pointsPolydata = vtk.vtkPolyData()
pointsPolydata.SetPoints(points)
sep = vtk.vtkSelectEnclosedPoints()
sep.SetTolerance(tol)
sep.CheckSurfaceOff()
setInput(sep, pointsPolydata)
if vtkMV: sep.SetSurfaceData(poly)
else: sep.SetSurface(poly)
sep.Update()
return sep.IsInside(0)
def insidePoints(actor, points, invert=False, tol=1e-05):
"""Return list of points that are inside a polydata closed surface"""
poly = polydata(actor, True)
# check if the stl file is closed
featureEdge = vtk.vtkFeatureEdges()
featureEdge.FeatureEdgesOff()
featureEdge.BoundaryEdgesOn()
featureEdge.NonManifoldEdgesOn()
setInput(featureEdge, poly)
featureEdge.Update()
openEdges = featureEdge.GetOutput().GetNumberOfCells()
if openEdges != 0:
colors.printc("Warning: polydata is not a closed surface",5)
vpoints = vtk.vtkPoints()
for p in points: vpoints.InsertNextPoint(p)
pointsPolydata = vtk.vtkPolyData()
pointsPolydata.SetPoints(vpoints)
sep = vtk.vtkSelectEnclosedPoints()
sep.SetTolerance(tol)
setInput(sep, pointsPolydata)
if vtkMV: sep.SetSurfaceData(poly)
else: sep.SetSurface(poly)
sep.Update()
mask1, mask2 = [], []
for i,p in enumerate(points):
if sep.IsInside(i) :
mask1.append(p)
else:
mask2.append(p)
if invert:
return mask2
else:
return mask1
def pointIsInTriangle(p, p1,p2,p3):
'''
Return True if a point is inside (or above/below) a triangle
defined by 3 points in space.
'''
p = np.array(p)
u = np.array(p2) - p1
v = np.array(p3) - p1
n = np.cross(u,v)
w = p - p1
ln= np.dot(n,n)
if not ln: return True #degenerate triangle
gamma = ( np.dot(np.cross(u,w), n) )/ ln
beta = ( np.dot(np.cross(w,v), n) )/ ln
alpha = 1-gamma-beta
if 0<alpha<1 and 0<beta<1 and 0<gamma<1: return True
return False
def fillHoles(actor, size=None, legend=None): # not tested properly
fh = vtk.vtkFillHolesFilter()
if not size:
mb = maxBoundSize(actor)
size = mb/20
fh.SetHoleSize(size)
poly = polydata(actor)
setInput(fh, poly)
fh.Update()
fpoly = fh.GetOutput()
factor = makeActor(fpoly, legend=legend)
factor.SetProperty(actor.GetProperty())
return factor
def cellCenters(actor):
'''Get the list of cell centers of the mesh surface'''
vcen = vtk.vtkCellCenters()
setInput(vcen, polydata(actor, True))
vcen.Update()
return coordinates(vcen.GetOutput())
def isIdentity(M, tol=1e-06):
'''Check if vtkMatrix4x4 is Identity'''
for i in [0,1,2,3]:
for j in [0,1,2,3]:
e = M.GetElement(i,j)
if i==j:
if np.abs(e-1) > tol: return False
elif np.abs(e) > tol: return False
return True
def cleanPolydata(actor, tol=None):
'''
Clean actor's polydata.
tol paramenter defines how far should be the points from each other
in terms of fraction of bounding box length.
'''
poly = polydata(actor, False)
cleanPolyData = vtk.vtkCleanPolyData()
setInput(cleanPolyData, poly)
if tol: cleanPolyData.SetTolerance(tol)
cleanPolyData.PointMergingOn()
cleanPolyData.Update()
mapper = actor.GetMapper()
setInput(mapper, cleanPolyData.GetOutput())
mapper.Update()
actor.Modified()
if hasattr(actor, 'poly'): actor.poly = cleanPolyData.GetOutput()
return actor # NB: polydata is being changed
#################################################################### get stuff
def polydata(obj, rebuild=True, index=0):
'''
Returns the vtkPolyData of a vtkActor or vtkAssembly.
If rebuild=True returns a copy of polydata
that corresponds to the current actor's position in space.
If a vtkAssembly is passed, return the polydata of component index.
'''
if isinstance(obj, vtk.vtkActor):
if not rebuild:
if hasattr(obj, 'poly') :
if obj.poly: return obj.poly
else:
setattr(obj, 'poly', None)
obj.poly = obj.GetMapper().GetInput() #cache it for speed
return obj.poly
M = obj.GetMatrix()
if isIdentity(M):
if hasattr(obj, 'poly') :
if obj.poly: return obj.poly
else:
setattr(obj, 'poly', None)
obj.poly = obj.GetMapper().GetInput() #cache it for speed
return obj.poly
# if identity return the original polydata
# otherwise make a copy that corresponds to
# the actual position in space of the actor
transform = vtk.vtkTransform()
transform.SetMatrix(M)
tp = vtk.vtkTransformPolyDataFilter()
tp.SetTransform(transform)
if vtkMV: tp.SetInputData(obj.GetMapper().GetInput())
else: tp.SetInput(obj.GetMapper().GetInput())
tp.Update()
return tp.GetOutput()
elif isinstance(obj, vtk.vtkAssembly):
cl = vtk.vtkPropCollection()
obj.GetActors(cl)
cl.InitTraversal()
for i in range(index+1):
act = vtk.vtkActor.SafeDownCast(cl.GetNextProp())
pd = act.GetMapper().GetInput() #not optimized
if not rebuild: return pd
M = act.GetMatrix()
if isIdentity(M): return pd
# if identity return the original polydata
# otherwise make a copy that corresponds to
# the actual position in space of the actor
transform = vtk.vtkTransform()
transform.SetMatrix(M)
tp = vtk.vtkTransformPolyDataFilter()
tp.SetTransform(transform)
if vtkMV: tp.SetInputData(pd)
else: tp.SetInput(pd)
tp.Update()
return tp.GetOutput()
elif isinstance(obj, vtk.vtkPolyData): return obj
elif isinstance(obj, vtk.vtkActor2D): return obj.GetMapper().GetInput()
elif isinstance(obj, vtk.vtkImageActor): return obj.GetMapper().GetInput()
elif obj is None: return None
colors.printc("Fatal Error in polydata(): ", 'r', end='')
colors.printc(("input is neither a vtkActor nor vtkAssembly.", [obj]), 'r')
exit(1)
def coordinates(actor, rebuild=True):
"""Return a merged list of coordinates of actors or polys"""
pts = []
poly = polydata(actor, rebuild)
for j in range(poly.GetNumberOfPoints()):
p = [0, 0, 0]
poly.GetPoint(j, p)
pts.append(p)
return np.array(pts)
def xbounds(actor):
'''Get the the actor bounding [xmin,xmax] '''
b = polydata(actor, True).GetBounds()
return (b[0],b[1])
def ybounds(actor):
'''Get the the actor bounding [ymin,ymax] '''
b = polydata(actor, True).GetBounds()
return (b[2],b[3])
def zbounds(actor):
'''Get the the actor bounding [zmin,zmax] '''
b = polydata(actor, True).GetBounds()
return (b[4],b[5])
def centerOfMass(actor):
'''Get the Center of Mass of the actor'''
if vtkMV: #faster
cmf = vtk.vtkCenterOfMass()
setInput(cmf, polydata(actor, True))
cmf.Update()
c = cmf.GetCenter()
return np.array(c)
else:
pts = coordinates(actor, True)
if not len(pts): return np.array([0,0,0])
return np.mean(pts, axis=0)
def volume(actor):
'''Get the volume occupied by actor'''
mass = vtk.vtkMassProperties()
setInput(mass, polydata(actor))
mass.Update()
return mass.GetVolume()
def area(actor):
'''Get the surface area of actor'''
mass = vtk.vtkMassProperties()
setInput(mass, polydata(actor))
mass.Update()
return mass.GetSurfaceArea()
def averageSize(actor):
cm = centerOfMass(actor)
coords = coordinates(actor, True)
if not len(coords) : return
pts = coords - cm
xyz2 = np.sum(pts * pts, axis=0)
return np.sqrt( | np.sum(xyz2) | numpy.sum |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Friday Feb 20 2020
This code was implemented by
<NAME>, <NAME> and <NAME>
"""
import argparse
import math
import os
from decimal import Decimal
from monte_carlo import monte_carlo
import matplotlib.pyplot as plt
import matplotlib.lines as ls
import colorsys
import numpy as np
import scipy.stats as stats
import tqdm
from collections import defaultdict
import multiprocessing
from Binomial_tree import BinTreeOption, BlackScholes
import tqdm
import pickle
def plot_wiener_process(T,K, S0, r, sigma, steps,save_plot=False):
"""
:param T: Period
:param S0: Stock price at spot time
:param K: Strike price
:param r: interest rate
:param sigma: volatility
:param steps: number of steps
:param save_plot: to save the plot
:return: returns a plot of a simulated stock movement
"""
mc=monte_carlo(steps, T, S0, sigma, r, K)
mc.wiener_method()
plt.figure(figsize=(10, 7))
np.linspace(1,mc.T*365,mc.steps)#to ensure the x-axis is in respective to the total time T
plt.plot(np.linspace(1,mc.T*365,mc.steps),mc.wiener_price_path)
plt.xlabel("Days",fontsize=18,fontweight='bold')
plt.ylabel("Stock price",fontsize=18,fontweight='bold')
plt.tick_params(labelsize='18')
#plt.title("Stock price simulated based on the Wiener process",fontsize=17,fontweight='bold')
if save_plot:
plt.savefig("figures/"+"wiener_process",dpi=300)
plt.show()
plt.close()
def worker_pay_off_euler_direct(object):
np.random.seed()
object.euler_integration_method()
pay_off_array = np.max([(object.K - object.euler_integration), 0])
return pay_off_array
def worker_pay_off_euler_sim(object):
np.random.seed()
object.euler_integration_method(generate_path=True)
pay_off_array = np.max([(object.K - object.euler_price_path[-1]), 0])
return pay_off_array
def diff_monte_carlo_process(T, S0, K, r, sigma, steps,samples,save_plot=False):
"""
:param T: Period
:param S0: Stock price at spot time
:param K: Strike price
:param r: interest rate
:param sigma: volatility
:param steps: number of steps
:param save_plot: to save the plot
:return: returns a plot of a simulated stock movement
"""
different_mc_rep = samples
increments = len(samples)
# mc_pricing will be a dict a list containing tuples of (pricing and standard error)
mc_pricing = defaultdict(list)
for repetition in tqdm.tqdm(different_mc_rep):
mc_list = [monte_carlo(steps, T, S0, sigma, r, K) for i in range(repetition)]
num_core = 3
pool = multiprocessing.Pool(num_core)
pay_off_list = pool.map(worker_pay_off_euler_direct, ((mc) for mc in mc_list))
pool.close()
pool.join()
mean_pay_off = np.mean([pay_off for pay_off in pay_off_list])
std_pay_off = np.std([pay_off for pay_off in pay_off_list])/np.sqrt(repetition)
mc_pricing['euler_integration'].append((np.exp(-r*T)*mean_pay_off ,std_pay_off))
bs = BlackScholes(T, S0, K, r, sigma)
bs_solution=np.ones(increments)*bs.put_price()
print(bs.put_price())
for i in range(len(different_mc_rep)):
print("Number of samples: ", different_mc_rep[i]," Mean :", mc_pricing['euler_integration'][i][0], " Variance :", mc_pricing['euler_integration'][i][1])
fig, axs = plt.subplots(2,figsize=(10, 7))
axs[0].plot(different_mc_rep, [i[0] for i in mc_pricing['euler_integration']], color='gray', label='Monte Carlo')
axs[0].plot(different_mc_rep, bs_solution, 'r', label='Black Scholes')
axs[0].legend()
axs[0].set_ylabel("Option Price", fontsize=17)
axs[0].tick_params(labelsize='18')
axs[1].plot(different_mc_rep, [i[1] for i in mc_pricing['euler_integration']], label='Standard error')
axs[1].set_xlabel("Monte Carlo repetition", fontsize=17)
axs[1].legend()
axs[1].set_ylabel("Standard error", fontsize=17)
axs[1].tick_params(labelsize='18')
axs[1].ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
if save_plot:
plt.savefig("figures/" + "mc_euler_integration_diff_MC", dpi=300)
plt.show()
plt.close()
def diff_K_monte_carlo_process(T,different_k , S0, r, sigma, steps, repetition, save_plot=False):
"""
:param T: Period
:param S0: Stock price at spot time
:param K: Strike price
:param r: interest rate
:param sigma: volatility
:param steps: number of steps
:param save_plot: to save the plot
:return: returns a plot of a simulated stock movement
"""
# mc_pricing will be a dict of a list containing tuples of (pricing and standard error)
mc_pricing = defaultdict(list)
for diff_strike_price in tqdm.tqdm(different_k):
mc_list = [monte_carlo(steps, T, S0, sigma, r, diff_strike_price) for i in range(repetition)]
num_core = 3
pool = multiprocessing.Pool(num_core)
pay_off_list = pool.map(worker_pay_off_euler_direct, ((mc) for mc in mc_list))
pool.close()
pool.join()
mean_pay_off = np.mean([pay_off for pay_off in pay_off_list])
std_pay_off = np.std([pay_off for pay_off in pay_off_list])/np.sqrt(repetition)
mc_pricing['euler_integration'].append((np.exp(-r*T)*mean_pay_off,std_pay_off))
bs_list= []
for k in different_k:
bs = BlackScholes(T, S0, k, r, sigma)
bs_list.append(bs.put_price())
fig, axs = plt.subplots(2,figsize=(10, 7))
axs[0].plot(different_k,[i[0] for i in mc_pricing['euler_integration']],linestyle='--',linewidth=3,
color='gray', label='Monte Carlo')
axs[0].plot(different_k, bs_list, 'r', label='Black Scholes')
axs[0].legend()
axs[0].set_ylabel("Option Price",fontsize=17)
axs[0].tick_params(labelsize='18')
axs[1].plot(different_k,[i[1] for i in mc_pricing['euler_integration']],label='Standard error')
axs[1].set_xlabel("Strike price K", fontsize=17)
axs[1].legend()
axs[1].set_ylabel("Standard error", fontsize=17)
axs[1].tick_params(labelsize='18')
axs[1].ticklabel_format(axis="y", style="sci",scilimits=(0,0))
if save_plot:
plt.savefig("figures/" + "mc_euler_integration_diff_K", dpi=300)
plt.show()
plt.close()
def diff_sigma_monte_carlo_process(T,K , S0, r, different_sigma, steps, repetition, save_plot=False):
"""
:param T: Period
:param S0: Stock price at spot time
:param K: Strike price
:param r: interest rate
:param sigma: volatility
:param steps: number of steps
:param save_plot: to save the plot
:return: returns a plot of a simulated stock movement
"""
# mc_pricing will be a dict of a list containing tuples of (pricing and standard error)
mc_pricing = defaultdict(list)
for sigma in tqdm.tqdm(different_sigma):
mc_list = [monte_carlo(steps, T, S0, sigma, r, K) for i in range(repetition)]
num_core = 3
pool = multiprocessing.Pool(num_core)
pay_off_list = pool.map(worker_pay_off_euler_direct, ((mc) for mc in mc_list))
pool.close()
pool.join()
mean_pay_off = np.mean([pay_off for pay_off in pay_off_list])
std_pay_off = np.std([pay_off for pay_off in pay_off_list])/np.sqrt(repetition)
mc_pricing['euler_integration'].append((np.exp(-r*T)*mean_pay_off,std_pay_off))
bs_list = []
for s in different_sigma:
bs = BlackScholes(T, S0, K, r, s)
bs_list.append(bs.put_price())
fig, axs = plt.subplots(2,figsize=(10, 7))
axs[0].plot(different_sigma,[i[0] for i in mc_pricing['euler_integration']],linestyle='--',linewidth=3,
color='gray', label='Monte Carlo')
axs[0].plot(different_sigma, bs_list, 'r', label='Black Scholes')
axs[0].legend()
axs[0].set_ylabel("Option Price",fontsize=18)
axs[0].tick_params(labelsize='18')
axs[1].plot(different_sigma,[i[1] for i in mc_pricing['euler_integration']],label='Standard error')
axs[1].set_xlabel("Volatility", fontsize=18)
axs[1].legend()
axs[1].set_ylabel("Standard error", fontsize=18)
axs[1].tick_params(labelsize='18')
axs[1].ticklabel_format(axis="y", style="sci",scilimits=(0,0))
if save_plot:
plt.savefig("figures/" + "mc_euler_integration_diff_sigma", dpi=300)
plt.show()
plt.close()
def milstein_process(T, S0, K, r, sigma, steps,save_plot=False):
"""
:param T: Period
:param S0: Stock price at spot time
:param K: Strike price
:param r: interest rate
:param sigma: volatility
:param steps: number of steps
:param save_plot: to save the plot
:return: returns a plot of a simulated stock movement
"""
mc = monte_carlo(steps, T, S0, sigma, r, K)
price_path=mc.milstein_method()
plt.figure()
| np.linspace(1, mc.T * 365, mc.steps) | numpy.linspace |
from __future__ import print_function
import mxnet as mx
import logging
import os
import time
def _get_lr_scheduler(args, adv=False):
lr = args.lr
if adv:
lr *= args.adv_lr_scale
if 'lr_factor' not in args or args.lr_factor >= 1:
return (lr, None)
epoch_size = args.num_examples // args.batch_size
# if 'dist' in args.kv_store:
# epoch_size //= kv.num_workers
begin_epoch = args.load_epoch if args.load_epoch else 0
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d' %(lr, begin_epoch))
steps = [epoch_size * (x-begin_epoch) for x in step_epochs if x-begin_epoch > 0]
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))
def _load_model(args):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
# symD = mx.sym.load('%s-symbol.json' % model_prefix)
softmaxD = mx.sym.load('%s-symbol-softmax.json' % model_prefix)
symAdv = None
# symAdv = mx.sym.load('%s-adv-symbol.json' % model_prefix)
param_file = '%s-%04d.params' % (model_prefix, args.load_epoch)
adv_param_file = '%s-adv-%04d.params' % (model_prefix, args.load_epoch)
logging.info('Load model from %s and %s', param_file, adv_param_file)
return (softmaxD, symAdv, param_file, adv_param_file)
def _save_model(args, epoch, netD, netAdv, symD, symAdv, softmax=None):
if args.model_prefix is None:
return None
dst_dir = os.path.dirname(args.model_prefix)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
model_prefix = args.model_prefix
symD.save('%s-symbol.json' % model_prefix)
# symAdv.save('%s-adv-symbol.json' % model_prefix)
if softmax:
softmax.save('%s-symbol-softmax.json' % model_prefix)
param_name = '%s-%04d.params' % (model_prefix, epoch)
netD.save_params(param_name)
logging.info('Saving model parameter to %s' % param_name)
adv_param_name = '%s-adv-%04d.params' % (model_prefix, epoch)
netAdv.save_params(adv_param_name)
logging.info('Saving adversarial net parameter to %s' % adv_param_name)
def _get_adversarial_weight(args, epoch=None, batch=None):
if epoch is None or epoch >= args.adv_warmup_epochs:
return float(args.adv_max_weight)
else:
wgt = float(args.adv_max_weight) / args.adv_warmup_epochs * (epoch + 1)
if batch is None or batch >= args.adv_warmup_batches:
return wgt
else:
return wgt / args.adv_warmup_batches * batch
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, required by some networks such as resnet')
train.add_argument('--gpus', type=str, default='0',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--gpus-work-load', type=str, default=None,
help='list of gpus workload')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=500,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--test-io', action='store_true', default=False,
help='test reading speed without training')
train.add_argument('--make-plots', action='store_true', default=False,
help='make control plots wihtout training')
train.add_argument('--predict', action='store_true', default=False,
help='run prediction instead of training')
train.add_argument('--predict-output', type=str,
help='predict output')
train.add_argument('--adv-max-weight', type=float, default=50.,
help='max weight of adversarial loss')
train.add_argument('--adv-warmup-epochs', type=int, default=1,
help='num. epochs taken to reach max weight for the advesarial loss')
train.add_argument('--adv-warmup-batches', type=int, default=100,
help='num. batches taken to reach max weight for the advesarial loss')
train.add_argument('--adv-qcd-start-label', type=int, default=11,
help='qcd start label')
train.add_argument('--adv-lr-scale', type=float, default=1., # lr=0.001 seems good
help='ratio of adv. lr to classifier lr')
train.add_argument('--adv-mass-max', type=float, default=250.,
help='max fatjet mass')
train.add_argument('--adv-mass-nbins', type=int, default=50,
help='nbins for fatjet mass')
train.add_argument('--adv-train-interval', type=int, default=100,
help='adv-to-classifier training times ratio')
train.add_argument('--clip-gradient', type=float, default=None,
help='grad clipping')
return train
class dummyKV:
def __init__(self):
self.rank = 0
def fit(args, symbol, data_loader, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
if len(devs) == 1:
devs = devs[0]
# logging
head = '%(asctime)-15s Node[0] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
(train, val) = data_loader(args)
if args.test_io:
for i_epoch in range(args.num_epochs):
train.reset()
tic = time.time()
for i, batch in enumerate(train):
for j in batch.data:
j.wait_to_read()
if (i + 1) % args.disp_batches == 0:
logging.info('Epoch [%d]/Batch [%d]\tSpeed: %.2f samples/sec' % (
i_epoch, i, args.disp_batches * args.batch_size / (time.time() - tic)))
tic = time.time()
return
if args.make_plots:
import numpy as np
from common.util import to_categorical, plotHist
X_pieces = []
y_pieces = []
tic = time.time()
for i, batch in enumerate(train):
for data, label in zip(batch.data, batch.label):
X_pieces.append(data[0].asnumpy())
y_pieces.append(label[0].asnumpy())
if (i + 1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec' % (
i, args.disp_batches * args.batch_size / (time.time() - tic)))
tic = time.time()
X = np.concatenate(X_pieces).reshape((-1, train.provide_data[0][1][1]))
y_tmp = np.concatenate(y_pieces)
y = np.zeros(len(y_tmp), dtype=np.int)
y[y_tmp <= 3] = 1
y[np.logical_and(y_tmp >= 4, y_tmp <= 5)] = 2
y[np.logical_and(y_tmp >= 6, y_tmp <= 8)] = 3
y[ | np.logical_and(y_tmp >= 9, y_tmp <= 10) | numpy.logical_and |
import numpy as np
import unittest
from src.davil import nutil
class TestNumpyUtils(unittest.TestCase):
def test_copy_to_from_subarray_with_mask(self):
sub = np.reshape(np.arange(1, 10), (3, 3))
mask = np.array([[1, 0, 1],
[0, 1, 0],
[0, 1, 1]])
ref = np.array([[1, 2, 3, 4, 5],
[6, 1, 8, 3, 10],
[11, 12, 5, 14, 15],
[16, 17, 8, 9, 20],
[21, 22, 23, 24, 25]])
arr = np.reshape(np.arange(1, 26), (5, 5))
nutil.copy_to_from_subarray(arr, sub, (1, 1), pivot='top_left', subarray_mask=mask)
np.testing.assert_array_equal(arr, ref)
arr = np.reshape(np.arange(1, 26), (5, 5))
nutil.copy_to_from_subarray(arr, sub, (2, 2), pivot='center', subarray_mask=mask)
np.testing.assert_array_equal(arr, ref)
def test_copy_to_from_subarray_2d(self):
sub = np.reshape(np.arange(1, 10), (3, 3))
ref = np.array([[1, 2, 3, 4, 5],
[6, 1, 2, 3, 10],
[11, 4, 5, 6, 15],
[16, 7, 8, 9, 20],
[21, 22, 23, 24, 25]])
arr = np.reshape(np.arange(1, 26), (5, 5))
nutil.copy_to_from_subarray(arr, sub, (1, 1), pivot='top_left')
np.testing.assert_array_equal(arr, ref)
arr = np.reshape(np.arange(1, 26), (5, 5))
nutil.copy_to_from_subarray(arr, sub, (2, 2), pivot='center')
np.testing.assert_array_equal(arr, ref)
def test_copy_to_from_subarray_2d_out_of_bounds(self):
sub = np.reshape(np.arange(1, 10), (3, 3))
ref1 = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 1, 2],
[21, 22, 23, 4, 5]])
arr = np.reshape(np.arange(1, 26), (5, 5))
nutil.copy_to_from_subarray(arr, sub, (3, 3), pivot='top_left')
np.testing.assert_array_equal(arr, ref1)
ref2 = np.array([[5, 6, 3, 4, 5],
[8, 9, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]])
arr = np.reshape(np.arange(1, 26), (5, 5))
nutil.copy_to_from_subarray(arr, sub, (0, 0), pivot='center')
np.testing.assert_array_equal(arr, ref2)
def test_copy_to_from_subarray_3d(self):
sub0 = np.reshape(np.arange(1, 10), (3, 3))
sub1 = np.reshape(np.arange(10, 19), (3, 3))
sub = | np.stack([sub0, sub1], axis=2) | numpy.stack |
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
#
# This file is part of the Flask-Plots Project
# https://github.com/juniors90/Flask-Plots/
# Copyright (c) 2021, <NAME>
# License:
# MIT
# Full Text:
# https://github.com/juniors90/Flask-Plots/blob/master/LICENSE
#
# =====================================================================
# TESTS
# =====================================================================
from matplotlib.testing.decorators import check_figures_equal
import numpy as np
class TestPlots:
x = | np.random.normal(size=100) | numpy.random.normal |
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage
import skimage.data
import skimage.morphology
import scipy
import scipy.special
import imgaug as ia
import imgaug.random as iarandom
from imgaug import parameters as iap
from imgaug.testutils import reseed
def _eps(arr):
if ia.is_np_array(arr) and arr.dtype.kind == "f":
return np.finfo(arr.dtype).eps
return 1e-4
class Test_handle_continuous_param(unittest.TestCase):
def test_value_range_is_none(self):
result = iap.handle_continuous_param(
1, "[test1]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
result = iap.handle_continuous_param(
1, "[test1b]",
value_range=(None, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_stochastic_parameter(self):
result = iap.handle_continuous_param(
iap.Deterministic(1), "[test2]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_integers(self):
result = iap.handle_continuous_param(
1, "[test3]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range(self):
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test4]",
value_range=(2, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test4]" in str(context.exception))
def test_param_is_inside_value_range_and_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_continuous_param(
1, "[test5]",
value_range=(None, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test6]",
value_range=(None, 0),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test6]" in str(context.exception))
def test_param_is_inside_value_range_and_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_continuous_param(
1, "[test7]",
value_range=(-1, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test8]",
value_range=(2, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test8]" in str(context.exception))
def test_tuple_as_value_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test9]",
value_range=None,
tuple_to_uniform=False,
list_to_choice=True)
self.assertTrue("[test9]" in str(context.exception))
def test_tuple_as_value_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_continuous_param(
(1, 2), "[test10]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_as_value_and_tuples_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_continuous_param(
(1, 2), "[test11]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_value_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test12]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test12]" in str(context.exception))
def test_tuple_value_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test13]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test13]" in str(context.exception))
def test_list_as_value_but_no_lists_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2, 3], "[test14]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=False)
self.assertTrue("[test14]" in str(context.exception))
def test_list_as_value_and_lists_allowed(self):
# list as value and list allowed
result = iap.handle_continuous_param(
[1, 2, 3], "[test15]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_list_value_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test16]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test16]" in str(context.exception))
def test_list_value_and_allowed_and_fully_outside_of_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test17]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_and_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_continuous_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test19]",
value_range=False,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_discrete_param(unittest.TestCase):
def test_float_value_inside_value_range_but_no_floats_allowed(self):
# float value without value range when no float value is allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1.5, "[test0]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.assertTrue("[test0]" in str(context.exception))
def test_value_range_is_none(self):
# value without value range
result = iap.handle_discrete_param(
1, "[test1]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
# value without value range as (None, None)
result = iap.handle_discrete_param(
1, "[test1b]", value_range=(None, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_is_stochastic_parameter(self):
# stochastic parameter
result = iap.handle_discrete_param(
iap.Deterministic(1), "[test2]", value_range=None,
tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_inside_value_range(self):
# value within value range
result = iap.handle_discrete_param(
1, "[test3]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range(self):
# value outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test4]", value_range=(2, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test4]" in str(context.exception))
def test_value_inside_value_range_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_discrete_param(
1, "[test5]", value_range=(None, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test6]", value_range=(None, 0), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test6]" in str(context.exception))
def test_value_inside_value_range_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_discrete_param(
1, "[test7]", value_range=(-1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test8]", value_range=(2, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test8]" in str(context.exception))
def test_value_is_tuple_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test9]", value_range=None, tuple_to_uniform=False,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test9]" in str(context.exception))
def test_value_is_tuple_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_discrete_param(
(1, 2), "[test10]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_discrete_param(
(1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_vr_allow_floats_false(self):
# tuple as value and tuple allowed and tuple within value range with
# allow_floats=False
result = iap.handle_discrete_param(
(1, 2), "[test11b]", value_range=(0, 10),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 3), "[test12]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test12]" in str(context.exception))
def test_value_tuple_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test13]" in str(context.exception))
def test_value_list_but_not_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True,
list_to_choice=False, allow_floats=True)
self.assertTrue("[test14]" in str(context.exception))
def test_value_list_and_allowed(self):
# list as value and list allowed
result = iap.handle_discrete_param(
[1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_value_list_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 3], "[test16]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test16]" in str(context.exception))
def test_value_list_and_allowed_and_fully_outside_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_discrete_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test19]", value_range=False, tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_categorical_string_param(unittest.TestCase):
def test_arg_is_all(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
ia.ALL, "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == valid_values
def test_arg_is_valid_str(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
"class1", "foo", valid_values)
assert isinstance(param, iap.Deterministic)
assert param.value == "class1"
def test_arg_is_invalid_str(self):
valid_values = ["class1", "class2"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
"class3", "foo", valid_values)
expected = (
"Expected parameter 'foo' to be one of: class1, class2. "
"Got: class3.")
assert expected == str(ctx.exception)
def test_arg_is_valid_list(self):
valid_values = ["class1", "class2", "class3"]
param = iap.handle_categorical_string_param(
["class1", "class3"], "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == ["class1", "class3"]
def test_arg_is_list_with_invalid_types(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", False], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"strings, got types: str, bool."
)
assert expected in str(ctx.exception)
def test_arg_is_invalid_list(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", "class4"], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"the following allowed strings: class1, class2, class3. "
"Got strings: class1, class4."
)
assert expected in str(ctx.exception)
def test_arg_is_stochastic_param(self):
param = iap.Deterministic("class1")
param_out = iap.handle_categorical_string_param(
param, "foo", ["class1"])
assert param_out is param
def test_arg_is_invalid_datatype(self):
with self.assertRaises(Exception) as ctx:
_ = iap.handle_categorical_string_param(
False, "foo", ["class1"])
expected = "Expected parameter 'foo' to be imgaug.ALL"
assert expected in str(ctx.exception)
class Test_handle_probability_param(unittest.TestCase):
def test_bool_like_values(self):
for val in [True, False, 0, 1, 0.0, 1.0]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test1]")
assert isinstance(p, iap.Deterministic)
assert p.value == int(val)
def test_float_probabilities(self):
for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test2]")
assert isinstance(p, iap.Binomial)
assert isinstance(p.p, iap.Deterministic)
assert val-1e-8 < p.p.value < val+1e-8
def test_probability_is_stochastic_parameter(self):
det = iap.Deterministic(1)
p = iap.handle_probability_param(det, "[test3]")
assert p == det
def test_probability_has_bad_datatype(self):
with self.assertRaises(Exception) as context:
_p = iap.handle_probability_param("test", "[test4]")
self.assertTrue("Expected " in str(context.exception))
def test_probability_is_negative(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(-0.01, "[test5]")
def test_probability_is_above_100_percent(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(1.01, "[test6]")
class Test_force_np_float_dtype(unittest.TestCase):
def test_common_dtypes(self):
dtypes = [
("float16", "float16"),
("float32", "float32"),
("float64", "float64"),
("uint8", "float64"),
("int32", "float64")
]
for dtype_in, expected in dtypes:
with self.subTest(dtype_in=dtype_in):
arr = np.zeros((1,), dtype=dtype_in)
observed = iap.force_np_float_dtype(arr).dtype
assert observed.name == expected
class Test_both_np_float_if_one_is_float(unittest.TestCase):
def test_float16_float32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.float32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float32"
def test_float16_int32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.int32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float64"
def test_int32_float16(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.float16)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float16"
def test_int32_uint8(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.uint8)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float64"
class Test_draw_distributions_grid(unittest.TestCase):
def setUp(self):
reseed()
def test_basic_functionality(self):
params = [mock.Mock(), mock.Mock()]
params[0].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
params[1].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
draw_grid_mock = mock.Mock()
draw_grid_mock.return_value = np.zeros((4, 3, 2), dtype=np.uint8)
with mock.patch('imgaug.imgaug.draw_grid', draw_grid_mock):
grid_observed = iap.draw_distributions_grid(
params, rows=2, cols=3, graph_sizes=(20, 21),
sample_sizes=[(1, 2), (3, 4)], titles=["A", "B"])
assert grid_observed.shape == (4, 3, 2)
assert params[0].draw_distribution_graph.call_count == 1
assert params[1].draw_distribution_graph.call_count == 1
assert params[0].draw_distribution_graph.call_args[1]["size"] == (1, 2)
assert params[0].draw_distribution_graph.call_args[1]["title"] == "A"
assert params[1].draw_distribution_graph.call_args[1]["size"] == (3, 4)
assert params[1].draw_distribution_graph.call_args[1]["title"] == "B"
assert draw_grid_mock.call_count == 1
assert draw_grid_mock.call_args[0][0][0].shape == (20, 21, 3)
assert draw_grid_mock.call_args[0][0][1].shape == (20, 21, 3)
assert draw_grid_mock.call_args[1]["rows"] == 2
assert draw_grid_mock.call_args[1]["cols"] == 3
class Test_draw_distributions_graph(unittest.TestCase):
def test_basic_functionality(self):
# this test is very rough as we get a not-very-well-defined image out
# of the function
param = iap.Uniform(0.0, 1.0)
graph_img = param.draw_distribution_graph(title=None, size=(10000,),
bins=100)
# at least 10% of the image should be white-ish (background)
nb_white = np.sum(graph_img[..., :] > [200, 200, 200])
nb_all = np.prod(graph_img.shape)
graph_img_title = param.draw_distribution_graph(title="test",
size=(10000,),
bins=100)
assert graph_img.ndim == 3
assert graph_img.shape[2] == 3
assert nb_white > 0.1 * nb_all
assert graph_img_title.ndim == 3
assert graph_img_title.shape[2] == 3
assert not np.array_equal(graph_img_title, graph_img)
class TestStochasticParameter(unittest.TestCase):
def setUp(self):
reseed()
def test_copy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.copy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] == param.other_param.a[0]
def test_deepcopy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.deepcopy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] != param.other_param.a[0]
class TestStochasticParameterOperators(unittest.TestCase):
def setUp(self):
reseed()
def test_multiply_stochasic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 * param2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert param3.val == param2
def test_multiply_stochastic_param_with_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 * 2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_multiply_integer_with_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 * param1
assert isinstance(param3, iap.Multiply)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_multiply_string_with_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" * param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_multiply_stochastic_param_with_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 * "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_params(self):
# Divide (__truediv__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 / param2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_divide_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 / 2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_divide_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 / param1
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_divide_string_by_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" / param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 / "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_div_stochastic_params(self):
# Divide (__div__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1.__div__(param2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_div_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1.__div__(2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_div_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__div__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_rdiv_stochastic_param_by_integer(self):
# Divide (__rdiv__)
param1 = iap.Normal(0, 1)
param3 = param1.__rdiv__(2)
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_rdiv_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__rdiv__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_stochastic_params(self):
# Divide (__floordiv__)
param1_int = iap.DiscreteUniform(0, 10)
param2_int = iap.Choice([1, 2])
param3 = param1_int // param2_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert param3.other_param.val == param2_int
def test_floordiv_symbol_stochastic_param_by_integer(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = param1_int // 2
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert isinstance(param3.other_param.val, iap.Deterministic)
assert param3.other_param.val.value == 2
def test_floordiv_symbol_integer_by_stochastic_param(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = 2 // param1_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert isinstance(param3.other_param.other_param, iap.Deterministic)
assert param3.other_param.other_param.value == 2
assert param3.other_param.val == param1_int
def test_floordiv_symbol_string_by_stochastic_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = "test" // param1_int
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_symbol_stochastic_param_by_string_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = param1_int // "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 + param2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert param3.val == param2
def test_add_integer_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 + 2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_add_stochastic_param_to_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 + param1
assert isinstance(param3, iap.Add)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_add_stochastic_param_to_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" + param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_string_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 + "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 - param2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert param3.val == param2
def test_subtract_integer_from_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 - 2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_subtract_stochastic_param_from_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 - param1
assert isinstance(param3, iap.Subtract)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_subtract_stochastic_param_from_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" - param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_string_from_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 - "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 ** param2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert param3.val == param2
def test_exponentiate_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 ** 2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_exponentiate_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 ** param1
assert isinstance(param3, iap.Power)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_exponentiate_string_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" ** param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_param_by_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 ** "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
class TestBinomial(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p_is_zero(self):
param = iap.Binomial(0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(int 0))"
)
def test___init___p_is_one(self):
param = iap.Binomial(1.0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(float 1.00000000))"
)
def test_p_is_zero(self):
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_p_is_one(self):
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_p_is_50_percent(self):
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_p_is_list(self):
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (
(0.25 - 0.05 < p < 0.25 + 0.05)
or (0.75 - 0.05 < p < 0.75 + 0.05)
)
def test_p_is_tuple(self):
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
def test_samples_same_values_for_same_seeds(self):
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestChoice(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Choice([0, 1, 2])
assert (
param.__str__()
== param.__repr__()
== "Choice(a=[0, 1, 2], replace=True, p=None)"
)
def test_value_is_list(self):
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_sampled_values_match_expected_counts(self):
param = iap.Choice([0, 1, 2])
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_value_is_list_containing_negative_number(self):
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
def test_value_is_list_of_floats(self):
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert (
(
-1.2 - _eps(sample)
< sample <
-1.2 + _eps(sample)
)
or
(
1.7 - _eps(sample)
< sample <
1.7 + _eps(sample)
)
)
assert np.all(
np.logical_or(
np.logical_and(
-1.2 - _eps(sample) < samples,
samples < -1.2 + _eps(sample)
),
np.logical_and(
1.7 - _eps(sample) < samples,
samples < 1.7 + _eps(sample)
)
)
)
def test_value_is_list_of_strings(self):
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
def test_sample_without_replacing(self):
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
def test_non_uniform_probabilities_over_elements(self):
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
def test_list_contains_stochastic_parameter(self):
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_samples_same_values_for_same_seeds(self):
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_value_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice(123)
self.assertTrue(
"Expected a to be an iterable" in str(context.exception))
def test_p_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=123)
self.assertTrue("Expected p to be" in str(context.exception))
def test_value_and_p_have_unequal_lengths(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=[1])
self.assertTrue("Expected lengths of" in str(context.exception))
class TestDiscreteUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.DiscreteUniform(0, 2)
assert (
param.__str__()
== param.__repr__()
== "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
)
def test_bounds_are_ints(self):
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_samples_match_expected_counts(self):
param = iap.DiscreteUniform(0, 2)
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_lower_bound_is_negative(self):
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(samples == -1, samples == 0),
samples == 1
)
)
def test_bounds_are_floats(self):
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_have_wrong_order(self):
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_are_the_same(self):
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestPoisson(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Poisson(1)
assert (
param.__str__()
== param.__repr__()
== "Poisson(Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Poisson(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_poisson(self):
param = iap.Poisson(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).poisson(
lam=1, size=(100, 1000))
assert samples.shape == (100, 1000)
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = int(np.sum(samples_direct == i))
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
def test_samples_same_values_for_same_seeds(self):
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Normal(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Normal(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_normal(self):
param = iap.Normal(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).normal(loc=0, scale=1,
size=(100, 1000))
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Normal(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Normal(0, 1)
param2 = iap.Normal(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert 100 - 10 < np.std(samples2) < 100 + 10
def test_samples_same_values_for_same_seeds(self):
param = iap.Normal(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestTruncatedNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.TruncatedNormal(0, 1)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(float -inf), "
"high=Deterministic(float inf)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test___init___custom_range(self):
param = iap.TruncatedNormal(0, 1, low=-100, high=50.0)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(int -100), "
"high=Deterministic(float 50.00000000)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_scale_is_zero(self):
param = iap.TruncatedNormal(0.5, 0, low=-10, high=10)
samples = param.draw_samples((100,))
assert np.allclose(samples, 0.5)
def test_scale(self):
param1 = iap.TruncatedNormal(0.0, 0.1, low=-100, high=100)
param2 = iap.TruncatedNormal(0.0, 5.0, low=-100, high=100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert np.isclose(np.std(samples1), 0.1, rtol=0, atol=0.20)
assert np.isclose(np.std(samples2), 5.0, rtol=0, atol=0.40)
def test_loc_is_stochastic_parameter(self):
param = iap.TruncatedNormal(iap.Choice([-100, 100]), 0.01,
low=-1000, high=1000)
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((5,))
observed = np.mean(samples)
dist1 = np.abs(-100 - observed)
dist2 = np.abs(100 - observed)
if dist1 < 1:
seen[0] += 1
elif dist2 < 1:
seen[1] += 1
else:
assert False
assert np.isclose(seen[0], 100, rtol=0, atol=20)
assert np.isclose(seen[1], 100, rtol=0, atol=20)
def test_samples_are_within_bounds(self):
param = iap.TruncatedNormal(0, 10.0, low=-5, high=7.5)
samples = param.draw_samples((1000,))
# are all within bounds
assert np.all(samples >= -5.0 - 1e-4)
assert np.all(samples <= 7.5 + 1e-4)
# at least some samples close to bounds
assert np.any(samples <= -4.5)
assert np.any(samples >= 7.0)
# at least some samples close to loc
assert np.any(np.abs(samples) < 0.5)
def test_samples_same_values_for_same_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=1234)
assert np.allclose(samples1, samples2)
def test_samples_different_values_for_different_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=2345)
assert not np.allclose(samples1, samples2)
class TestLaplace(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Laplace(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Laplace(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_laplace(self):
param = iap.Laplace(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).laplace(loc=0, scale=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Laplace(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Laplace(0, 1)
param2 = iap.Laplace(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
def test_scale_is_zero(self):
param1 = iap.Laplace(1, 0)
samples = param1.draw_samples((100,))
assert np.all(np.logical_and(
samples > 1 - _eps(samples),
samples < 1 + _eps(samples)
))
def test_samples_same_values_for_same_seeds(self):
param = iap.Laplace(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestChiSquare(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ChiSquare(1)
assert (
param.__str__()
== param.__repr__()
== "ChiSquare(df=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.ChiSquare(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_chisquare(self):
param = iap.ChiSquare(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).chisquare(df=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 3)
samples_direct = np.clip(samples_direct, 0, 3)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 3.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_df_is_stochastic_parameter(self):
param = iap.ChiSquare(iap.Choice([1, 10]))
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if 1 - 1.0 < exp < 1 + 1.0:
seen[0] += 1
elif 10 - 4.0 < exp < 10 + 4.0:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_larger_df_leads_to_more_variance(self):
param1 = iap.ChiSquare(1)
param2 = iap.ChiSquare(10)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0
assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0
def test_samples_same_values_for_same_seeds(self):
param = iap.ChiSquare(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestWeibull(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Weibull(1)
assert (
param.__str__()
== param.__repr__()
== "Weibull(a=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Weibull(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_weibull(self):
param = iap.Weibull(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).weibull(a=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 2)
samples_direct = np.clip(samples_direct, 0, 2)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 2.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Weibull(iap.Choice([1, 0.5]))
expected_first = scipy.special.gamma(1 + 1/1)
expected_second = scipy.special.gamma(1 + 1/0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((50000,))
observed = np.mean(samples)
matches_first = (
expected_first - 0.2 * expected_first
< observed <
expected_first + 0.2 * expected_first
)
matches_second = (
expected_second - 0.2 * expected_second
< observed <
expected_second + 0.2 * expected_second
)
if matches_first:
seen[0] += 1
elif matches_second:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_different_strengths(self):
param1 = iap.Weibull(1)
param2 = iap.Weibull(0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = (
scipy.special.gamma(1 + 2/1)
- (scipy.special.gamma(1 + 1/1))**2
)
expected_second = (
scipy.special.gamma(1 + 2/0.5)
- (scipy.special.gamma(1 + 1/0.5))**2
)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.2 * expected_first
< np.var(samples1) <
expected_first + 0.2 * expected_first
)
assert (
expected_second - 0.2 * expected_second
< np.var(samples2) <
expected_second + 0.2 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Weibull(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Uniform(0, 1.0)
assert (
param.__str__()
== param.__repr__()
== "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))"
)
def test_draw_sample(self):
param = iap.Uniform(0, 1.0)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10, 5))
assert samples.shape == (10, 5)
assert np.all(
np.logical_and(
0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_via_density_histogram(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10000,))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0),
density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / samples.size
assert (
density_expected - density_tolerance
< density <
density_expected + density_tolerance
)
def test_negative_value(self):
param = iap.Uniform(-1.0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_wrong_argument_order(self):
param = iap.Uniform(1.0, -1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_integers(self):
param = iap.Uniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_identical(self):
param = iap.Uniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1.0, 1.0)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestBeta(unittest.TestCase):
@classmethod
def _mean(cls, alpha, beta):
return alpha / (alpha + beta)
@classmethod
def _var(cls, alpha, beta):
return (alpha * beta) / ((alpha + beta)**2 * (alpha + beta + 1))
def setUp(self):
reseed()
def test___init__(self):
param = iap.Beta(0.5, 0.5)
assert (
param.__str__()
== param.__repr__()
== "Beta("
"Deterministic(float 0.50000000), "
"Deterministic(float 0.50000000)"
")"
)
def test_draw_sample(self):
param = iap.Beta(0.5, 0.5)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
assert samples.shape == (100, 1000)
assert np.all(
np.logical_and(
0 - _eps(samples) <= samples,
samples <= 1.0 + _eps(samples)
)
)
def test_via_comparison_to_np_beta(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).beta(
a=0.5, b=0.5, size=(100, 1000))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Beta(iap.Choice([0.5, 2]), 0.5)
expected_first = self._mean(0.5, 0.5)
expected_second = self._mean(2, 0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((10000,))
observed = np.mean(samples)
if expected_first - 0.05 < observed < expected_first + 0.05:
seen[0] += 1
elif expected_second - 0.05 < observed < expected_second + 0.05:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_compare_curves_of_different_arguments(self):
param1 = iap.Beta(2, 2)
param2 = iap.Beta(0.5, 0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = self._var(2, 2)
expected_second = self._var(0.5, 0.5)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.1 * expected_first
< np.var(samples1) <
expected_first + 0.1 * expected_first
)
assert (
expected_second - 0.1 * expected_second
< np.var(samples2) <
expected_second + 0.1 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Beta(0.5, 0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestDeterministic(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
pairs = [
(0, "Deterministic(int 0)"),
(1.0, "Deterministic(float 1.00000000)"),
("test", "Deterministic(test)")
]
for value, expected in pairs:
with self.subTest(value=value):
param = iap.Deterministic(value)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_samples_same_values_for_same_seeds(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0
]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
rs1 = iarandom.RNG(123456)
rs2 = iarandom.RNG(123456)
samples1 = param.draw_samples(20, random_state=rs1)
samples2 = param.draw_samples(20, random_state=rs2)
assert np.array_equal(samples1, samples2)
def test_draw_sample_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert sample1 == sample2
def test_draw_sample_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert np.isclose(
sample1, sample2, rtol=0, atol=_eps(sample1))
def test_draw_samples_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.all(samples == value)
def test_draw_samples_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.allclose(samples, value, rtol=0, atol=_eps(samples))
def test_argument_is_stochastic_parameter(self):
seen = [0, 0]
for _ in sm.xrange(200):
param = iap.Deterministic(iap.Choice([0, 1]))
seen[param.value] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_argument_has_invalid_type(self):
with self.assertRaises(Exception) as context:
_ = iap.Deterministic([1, 2, 3])
self.assertTrue(
"Expected StochasticParameter object or number or string"
in str(context.exception))
class TestFromLowerResolution(unittest.TestCase):
def setUp(self):
reseed()
def test___init___size_percent(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_percent=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_percent=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test___init___size_px(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_px=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_px=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test_binomial_hwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 1))
uq = np.unique(samples)
assert samples.shape == (8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_binomial_nhwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples_nhwc = param.draw_samples((1, 8, 8, 1))
uq = np.unique(samples_nhwc)
assert samples_nhwc.shape == (1, 8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_draw_samples_with_too_many_dimensions(self):
# (N, H, W, C, something) causing error
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
with self.assertRaises(Exception) as context:
_ = param.draw_samples((1, 8, 8, 1, 1))
self.assertTrue(
"FromLowerResolution can only generate samples of shape"
in str(context.exception)
)
def test_binomial_hw3(self):
# C=3
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 3))
uq = np.unique(samples)
assert samples.shape == (8, 8, 3)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_different_size_px_arguments(self):
# different sizes in px
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_arguments_with_tuple(self):
# different sizes in px, one given as tuple (a, b)
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=(2, 16))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(400):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_argument_with_stochastic_parameters(self):
# different sizes in px, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Deterministic(1))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Choice([8, 16]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_px_has_invalid_datatype(self):
# bad datatype for size_px
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=False)
self.assertTrue("Expected " in str(context.exception))
def test_min_size(self):
# min_size
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=1,
min_size=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent(self):
# different sizes in percent
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.01)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.8)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += | np.sum(samples2 == 1) | numpy.sum |
#!/usr/bin/env python3
'''A reference implementation of Bloom filter-based Iris-Code indexing.'''
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2017 Hochschule Darmstadt"
__license__ = "License Agreement provided by Hochschule Darmstadt(https://github.com/dasec/bloom-filter-iris-indexing/blob/master/hda-license.pdf)"
__version__ = "1.0"
import argparse
import copy
import math
import operator
import sys
from pathlib import Path
from timeit import default_timer as timer
from typing import Tuple, List, Set
import numpy as np
parser = argparse.ArgumentParser(description='Bloom filter-based Iris-Code indexing.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
required = parser.add_argument_group('required named arguments')
required.add_argument('-d', '--directory', action='store', type=Path, required=True, help='directory where the binary templates are stored')
required.add_argument('-n', '--enrolled', action='store', type=int, required=True, help='number of enrolled subjects')
required.add_argument('-bh', '--height', action='store', type=int, required=True, help='filter block height')
required.add_argument('-bw', '--width', action='store', type=int, required=True, help='fitler block width')
required.add_argument('-T', '--constructed', action='store', type=int, required=True, help='number of trees constructed')
required.add_argument('-t', '--traversed', action='store', type=int, required=True, help='number of trees traversed')
args = parser.parse_args()
required_python_version = (3, 5)
if (sys.version_info.major, sys.version_info.minor) < required_python_version:
sys.exit("Python {}.{} or newer is required to run this program".format(*required_python_version))
allowed_bf_heights = frozenset(range(8, 13))
allowed_bf_widths = frozenset({8, 16, 32, 64})
class BloomTemplate(object):
'''Represents a Bloom Filter template or a Bloom Filter tree node'''
def __init__(self, bloom_filter_sets: List[Set[int]], source: List[Tuple[str, str, str, str]]):
self.bloom_filter_sets = bloom_filter_sets
self.source = source
def compare(self, other) -> float:
'''Measures dissimilarity between two BloomTemplates'''
return sum(len(s1 ^ s2) / (len(s1) + len(s2)) for s1, s2 in zip(self.bloom_filter_sets, other.bloom_filter_sets)) / len(self)
def __add__(self, other):
'''Merge two BloomTemplates by ORing their bloom filter sets'''
return BloomTemplate([s1 | s2 for s1, s2 in zip(self.bloom_filter_sets, other.bloom_filter_sets)], self.source + [s for s in other.source if s not in self.source])
def __iadd__(self, other):
'''Add (OR) another template to self in-place'''
self.bloom_filter_sets = [s1 | s2 for s1, s2 in zip(self.bloom_filter_sets, other.bloom_filter_sets)]
self.source += (s for s in other.source if s not in self.source)
return self
def __len__(self) -> int:
'''Number of bloom filters in the template'''
return len(self.bloom_filter_sets)
def __getitem__(self, key: int) -> Set[int]:
'''Convenience access for individual bloom filters in the template'''
return self.bloom_filter_sets[key]
def __repr__(self) -> str:
return "Bloom filter template of {}".format(self.source)
# Convenience functions for template source comparison
def is_same_subject(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item[0] == o_item[0] for s_item, o_item in zip(self.source, other.source))
def is_same_image(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item[1] == o_item[1] for s_item, o_item in zip(self.source, other.source))
def is_same_side(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item[2] == o_item[2] for s_item, o_item in zip(self.source, other.source))
def is_same_dataset(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item[3] == o_item[3] for s_item, o_item in zip(self.source, other.source))
def is_same_genuine(self, other) -> bool:
return len(self.source) == len(other.source) and self.is_same_subject(other) and self.is_same_side(other) and self.is_same_dataset(other)
def is_same_source(self, other) -> bool:
return len(self.source) == len(other.source) and all(s_item == o_item for s_item, o_item in zip(self.source, other.source))
def is_multi_source(self) -> bool:
return len(self.source) > 1
@classmethod
def from_binary_template(cls, binary_template: List[List[int]], height: int, width: int, source: List[Tuple[str, str, str, str]]):
'''Creates a BloomTemplate with specified block size from an iris code represented as a 2-dimensional (row x column) array of 0's and 1's. The source is a list of tuples following format: [(subject, image_number, side, dataset), ...]'''
if height not in allowed_bf_heights or width not in allowed_bf_widths:
raise ValueError("Invalid block size: ({}, {})".format(height, width))
binary_template = np.array(binary_template)
bf_sets = []
bf_real = set()
bf_imaginary = set()
for column_number, column in enumerate(binary_template.T):
real_part = ''.join(map(str, column[:height]))
im_part_start = 10 if height <= 10 else len(binary_template) - height
im_part_end = im_part_start + height
imaginary_part = ''.join(map(str, column[im_part_start:im_part_end]))
bf_value_real = int(real_part, 2)
bf_value_imaginary = int(imaginary_part, 2)
bf_real.add(bf_value_real)
bf_imaginary.add(bf_value_imaginary)
if column_number != 0 and (column_number + 1) % width == 0:
bf_sets.append(bf_real)
bf_sets.append(bf_imaginary)
bf_real = set()
bf_imaginary = set()
return BloomTemplate(bf_sets, source)
BF_TREE = List[BloomTemplate]
class BloomTreeDb(object):
'''Represents a database of BloomTemplate trees'''
def __init__(self, enrolled: List[BloomTemplate], trees_constructed: int):
def is_power_of2(number: int) -> bool:
'''Check if a number is a power of 2.'''
return number > 0 and (number & (number - 1)) == 0
if not is_power_of2(len(enrolled)) or not is_power_of2(trees_constructed):
raise ValueError("Number of subjects ({}) and trees ({}) must both be a power of 2".format(len(enrolled), trees_constructed))
self.enrolled = enrolled
self.trees_constructed = trees_constructed
self.trees = self._build()
def search(self, probe: BloomTemplate, trees_traversed: int) -> Tuple[float, BloomTemplate]:
'''Perform a search for a template matching the probe in the database.'''
def find_promising_trees(probe: BloomTemplate, trees_traversed: int) -> List[BF_TREE]:
'''Preselection step - most promising trees are found based on the scores between the tree roots and the probe'''
if self.trees_constructed == trees_traversed:
return self.trees
else:
root_scores = [(tree[0].compare(probe), index) for index, tree in enumerate(self.trees)]
root_scores.sort(key=operator.itemgetter(0))
promising_tree_indexes = map(operator.itemgetter(1), root_scores[:trees_traversed])
return [self.trees[index] for index in promising_tree_indexes]
def traverse(trees: List[BF_TREE], probe: BloomTemplate) -> Tuple[float, BloomTemplate]:
'''Traverse the selected trees to find the node corresponding to a best score'''
best_score, best_match_node = 1.0, None
for _, tree in enumerate(trees):
step = 0
score = 1.0
for _ in range(int(math.log(len(self.enrolled), 2)) - int(math.log(self.trees_constructed, 2))):
left_child_index, right_child_index = BloomTreeDb.get_node_children_indices(step)
ds_left = tree[left_child_index].compare(probe)
ds_right = tree[right_child_index].compare(probe)
step, score = (left_child_index, ds_left) if ds_left < ds_right else (right_child_index, ds_right)
score, match_node = score, tree[step]
if score <= best_score:
best_score = score
best_match_node = match_node
return best_score, best_match_node
if trees_traversed < 1 or trees_traversed > self.trees_constructed:
raise ValueError("Invalid number of trees to traverse:", trees_traversed)
promising_trees = find_promising_trees(probe, trees_traversed)
return traverse(promising_trees, probe)
def _build(self) -> List[BF_TREE]:
'''Constructs the BloomTemplate trees using the parameters the db has been initiated with'''
def construct_bf_tree(enrolled_part: List[BloomTemplate]) -> BF_TREE:
'''Constructs a single BloomTemplate tree'''
bf_tree = []
for index in range(len(enrolled_part)-1):
node_level = BloomTreeDb.get_node_level(index)
start_index = int(len(enrolled_part) / (1 << node_level) * ((index + 1) % (1 << node_level)))
end_index = int(len(enrolled_part) / (1 << node_level) * ((index + 1) % (1 << node_level)) + len(enrolled_part) / (1 << node_level))
node = copy.deepcopy(enrolled_part[start_index])
for i in range(start_index, end_index):
node += enrolled_part[i]
bf_tree.append(node)
bf_tree += enrolled_part
return bf_tree
trees = []
i = 0
while i != len(self.enrolled):
i_old = i
i += int(len(self.enrolled) / self.trees_constructed)
bf_tree = construct_bf_tree(self.enrolled[i_old:i])
assert len(bf_tree) == int(len(self.enrolled) / self.trees_constructed) * 2 - 1
trees.append(bf_tree)
assert len(trees) == self.trees_constructed
return trees
def __repr__(self) -> str:
return "<BloomTreeDb object containing {} subjects in {} trees>".format(len(self.enrolled), self.trees_constructed)
'''Convenience methods for tree indexing'''
@staticmethod
def get_node_children_indices(index: int) -> Tuple[int, int]:
'''Compute indices of node children based on its index.'''
return 2 * index + 1, 2 * (index + 1)
@staticmethod
def get_node_level(index: int) -> int:
'''Compute the level of a node in a tree based on its index.'''
return int(math.floor(math.log(index + 1, 2)))
def load_binary_template(path: Path) -> List[List[int]]:
'''Reads a text file into an iris code matrix'''
with path.open("r") as f:
return [list(map(int, list(line.rstrip()))) for line in f.readlines()]
def extract_source_data(filename: str) -> List[Tuple[str, str, str, str]]:
'''This function parses the template filename (path.stem) and extract the subject, image number, image side and dataset and return it as list (this is necessary later on) with one tuple element (Subject, Image, Side, Dataset).
e.g. if the filename is "S1001L01.jpg" from Casia-Interval dataset, then the return value should be: [(1001, 01, L, Interval)] or similar, as long as the convention is consistent.
'''
raise NotImplementedError("Implement me!")
def split_dataset(templates: List[BloomTemplate], num_enrolled: int) -> Tuple[List[BloomTemplate], List[BloomTemplate], List[BloomTemplate]]:
'''This function splits the full template list into disjoint lists of enrolled, genuine and impostor templates'''
enrolled, genuine, impostor = [], [], []
raise NotImplementedError("Implement me!")
return enrolled, genuine, impostor
if __name__ == "__main__":
# Data preparation
start = timer()
binary_templates = [(load_binary_template(f), extract_source_data(f.stem)) for f in args.directory.iterdir() if f.is_file() and f.match('*.txt')] # see file example_binary_template.txt for required format
bloom_templates = [BloomTemplate.from_binary_template(template, args.height, args.width, source) for template, source in binary_templates]
enrolled_templates, genuine_templates, impostor_templates = split_dataset(bloom_templates, args.enrolled)
db = BloomTreeDb(enrolled_templates, args.constructed)
end = timer()
print("Total data preparation time: %02d:%02d" % divmod(end - start, 60))
# Lookup
start = timer()
results_genuine = [db.search(genuine_template, args.traversed) for genuine_template in genuine_templates] # List[Tuple[float, BloomTemplate]]
results_impostor = [db.search(impostor_template, args.traversed) for impostor_template in impostor_templates] # List[Tuple[float, BloomTemplate]]
genuine_scores = [result[0] for result in results_genuine] # List[float]
impostor_scores = [result[0] for result in results_impostor] # List[float]
genuine_matches = [result[1] for result in results_genuine] # List[BloomTemplate]
end = timer()
print("Total lookup time: %02d:%02d" % divmod(end - start, 60))
# Results
print("Experiment configuration: {} enrolled, {} trees, {} traversed trees, {} block height, {} block width".format(len(enrolled_templates), args.constructed, args.traversed, args.height, args.width))
print("Genuine distribution: {} scores, min/max {:.4f}/{:.4f}, mean {:.4f} +/- {:.4f}".format(len(genuine_scores), min(genuine_scores), max(genuine_scores), np.mean(genuine_scores), | np.std(genuine_scores) | numpy.std |
"""Tests with the ATTAS aircraft short-period mode estimation."""
import importlib
import os
import numpy as np
import scipy.io
import scipy.linalg
import sympy
import sym2num.model
import fem
import symfem
# Reload modules for testing
for m in (fem, symfem):
importlib.reload(m)
def load_data():
# Retrieve data
# Load experiment data
dirname = os.path.dirname(__file__)
data = np.loadtxt(os.path.join(dirname, 'data', 'hfb320_1_10.asc'))
Ts = 0.1
n = len(data)
t = np.arange(n) * Ts
y = data[:, 4:11]
u = data[:, [1,3]]
# Shift and rescale
yscale = np.r_[0.15, 70, 15, 30, 10, 5, 0.8]
y = (y - [106, 0.11, 0.1, 0, 0, 0.95, -9.5]) * yscale
u = (u - [-0.007, 11600]) * [100, 0.01]
return t, u, y[:, :]
def save_generated_model(symmodel):
clsname = type(symmodel).__name__
nx = symmodel.nx
nu = symmodel.nu
ny = symmodel.ny
with open(f'{clsname}_nx{nx}_nu{nu}_ny{ny}.py', mode='w') as f:
code = symmodel.print_code()
print(code, file=f)
def get_model(nx, nu, ny):
clsname = 'NaturalSqrtZOHModel'
modname = f'{clsname}_nx{nx}_nu{nu}_ny{ny}'
mod = importlib.import_module(modname)
genclsname = f'Generated{clsname}'
cls = getattr(mod, genclsname)
return cls()
def dt_eem(x, u, y):
N, nx = x.shape
_, nu = u.shape
_, ny = y.shape
A = np.zeros((nx, nx))
B = np.zeros((nx, nu))
C = np.zeros((ny, nx))
D = np.zeros((ny, nu))
for i in range(nx):
psi = np.zeros((N-1, nx+nu))
psi[:, :nx] = x[:-1]
psi[:, nx:] = u[:-1]
est = np.linalg.lstsq(psi, x[1:, i], rcond=None)
A[i, :] = est[0][:nx]
B[i, :] = est[0][nx:]
for i in range(ny):
psi = np.zeros((N, nx+nu))
psi[:, :nx] = x
psi[:, nx:] = u
est = np.linalg.lstsq(psi, y[:, i], rcond=None)
C[i, :] = est[0][:nx]
D[i, :] = est[0][nx:]
return A, B, C, D
if __name__ == '__main__':
nx = 4
nu = 2
ny = 7
# Load experiment data
t, u, y = load_data()
#symmodel = symfem.NaturalSqrtZOHModel(nx=nx, nu=nu, ny=ny)
#model = symmodel.compile_class()()
model = get_model(nx, nu, ny)
model.dt = t[1] - t[0]
problem = fem.NaturalSqrtZOHProblem(model, y, u)
# Equation error method initial guess
A0, B0, C0, D0 = dt_eem(y[:, :nx], u, y)
# Define initial guess for decision variables
dec0 = np.zeros(problem.ndec)
var0 = problem.variables(dec0)
var0['A'][:] = A0
var0['B'][:] = B0
var0['C'][:] = C0
var0['D'][:] = D0
var0['L'][:] = np.eye(nx, ny)
var0['Kn'][:] = np.eye(nx, ny) * 1e-2
var0['x'][:] = y[:, :nx]
var0['isRp_tril'][symfem.tril_diag(ny)] = 1e2
var0['sRp_tril'][symfem.tril_diag(ny)] = 1e-2
var0['sQ_tril'][symfem.tril_diag(nx)] = 1e-2
var0['sR_tril'][symfem.tril_diag(ny)] = 1e-2
var0['sPp_tril'][symfem.tril_diag(nx)] = 1e-2
var0['sPc_tril'][symfem.tril_diag(nx)] = 1e-2
var0['pred_orth'][:] = np.eye(2*nx, nx)
var0['corr_orth'][:] = np.eye(nx + ny, nx + ny)
var0['Qc'][:] = | np.eye(nx) | numpy.eye |
import numpy as np
import scipy
import dadapy.utils_.utils as ut
# --------------------------------------------------------------------------------------
# bounds for numerical estimation, change if needed
D_MAX = 50.0
D_MIN = np.finfo(np.float32).eps
# TODO: find a proper way to load the data with a relative path
# load, just once and for all, the coefficients for the polynomials in d at fixed L
import os
volumes_path = os.path.join(os.path.split(__file__)[0], "discrete_volumes")
coeff = np.loadtxt(volumes_path + "/L_coefficients_float.dat", dtype=np.float64)
# V_exact_int = np.loadtxt(volume_path + '/V_exact.dat',dtype=np.uint64)
# --------------------------------------------------------------------------------------
def compute_discrete_volume(L, d, O1=False):
"""Enumerate the points contained in a region of radius L according to Manhattan metric
Args:
L (nd.array( integer or float )): radii of the volumes of which points will be enumerated
d (float): dimension of the metric space
O1 (bool, default=Flase): first order approximation in the large L limit. Set to False in order to have the o(1/L) approx
Returns:
V (nd.array( integer or float )): points within the given volumes
"""
# if L is one dimensional make it an array
if isinstance(L, (int, np.integer, float, np.float)):
L = [L]
# explicit conversion to array of integers
l = np.array(L, dtype=np.int)
# exact formula for integer d, cannot be used for floating values
if isinstance(d, (int, np.integer)):
V = 0
for k in range(0, d + 1):
V += scipy.special.binom(d, k) * scipy.special.binom(l - k + d, d)
return V
else:
# exact enumerating formula for non integer d. Use the loaded coefficients to compute
# the polynomials in d at fixed (small) L.
# Exact within numerical precision, as far as the coefficients are available
def V_polynomials(ll):
D = d ** np.arange(coeff.shape[1], dtype=np.double)
V_poly = np.dot(coeff, D)
return V_poly[ll]
# Large L approximation obtained using Stirling formula
def V_Stirling(ll):
if O1:
correction = 2 ** d
else:
correction = (
np.exp(0.5 * (d + d ** 2) / ll) * (1 + np.exp(-d / ll)) ** d
)
return ll ** d / scipy.special.factorial(d) * correction
ind_small_l = l < coeff.shape[0]
V = np.zeros(l.shape[0])
V[ind_small_l] = V_polynomials(l[ind_small_l])
V[~ind_small_l] = V_Stirling(l[~ind_small_l])
return V
# --------------------------------------------------------------------------------------
def compute_derivative_discrete_vol(l, d):
"""compute derivative of discrete volumes with respect to dimension
Args:
L (int): radii at which the derivative is calculated
d (float): embedding dimension
Returns:
dV_dd (ndarray(float) or float): derivative at different values of radius
"""
# exact formula with polynomials, for small L
# assert isinstance(l, (int, np.int))
if l < coeff.shape[0]:
l = int(l)
D = d ** | np.arange(-1, coeff.shape[1] - 1, dtype=np.double) | numpy.arange |
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable,Function
import utils.frame_utils as frame_utils
import datasets
from datasets import StaticRandomCrop,StaticCenterCrop
try:
from networks.resample2d_package.resample2d import Resample2d
from networks.channelnorm_package.channelnorm import ChannelNorm
from networks.correlation_package.correlation import Correlation
except:
from .networks.resample2d_package.resample2d import Resample2d
from .networks.channelnorm_package.channelnorm import ChannelNorm
from networks.correlation_package.correlation import Correlation
def run_test(rgb_max = 255):
device = torch.device('cuda')
input_re_1 = Variable(torch.from_numpy(np.array(np.arange(0,1*2*3*4),np.float32)).resize(1,2,3,4).cuda(),requires_grad=True)
input_re_2 = Variable(torch.from_numpy(np.array( | np.arange(0,1*2*3*4) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 29 22:06:08 2014
@author: <NAME>
"""
import numpy as np
class quaternion():
"""A simple quaternion class in order to represent a rotation.
To build a quaternion object, one needs to input either the angle and
the unit vector about which the rotation happens or directly the scalar
and vector parts of the quaternion.
Examples
--------
>>> import quaternion as quat
>>> Q1 = quat.quaternion([1.,0.,0.], angl = 90.)
>>> Q2 = quat.quaternion([(0.5)**0.5,0.,0.], W = (0.5)**0.5)
Notes
-----
See <NAME>: "Application of Quaternions to Computation with
Rotations", Working Paper, Stanford AI Lab, 1979.
"""
def __init__(self, vect, **kwargs):
"""Initializes a quaternion object
Parameters
----------
vect: list of float, depending on kwargs it is be either the
coordonates of the unit vector about which the rotation happens or
directly the vector part of the quaternion
\**kwargs:
* angl: float, the angle of rotation represented by the quaternion.
* W: float,the scalar part of the quatenion object.
"""
for name, value in kwargs.items():
if name=='angl':
self.w = np.cos(value/2.*np.pi/180.)
self.x = vect[0]*np.sin(value/2.*np.pi/180.)
self.y = vect[1]*np.sin(value/2.*np.pi/180.)
self.z = vect[2]* | np.sin(value/2.*np.pi/180.) | numpy.sin |
import os
import json
import shutil
from concurrent import futures
from functools import partial
from glob import glob
import imageio
import h5py
import numpy as np
from PIL import Image, ImageDraw
from skimage import draw as skimage_draw
from skimage import morphology
from tqdm import tqdm
import torch_em
from .util import download_source, unzip, update_kwargs
URLS = {
"segmentation": "https://zenodo.org/record/4665863/files/hpa_dataset_v2.zip"
}
CHECKSUMS = {
"segmentation": "dcd6072293d88d49c71376d3d99f3f4f102e4ee83efb0187faa89c95ec49faa9"
}
def _download_hpa_data(path, name, download):
os.makedirs(path, exist_ok=True)
url = URLS[name]
checksum = CHECKSUMS[name]
zip_path = os.path.join(path, "data.zip")
download_source(zip_path, url, download=download, checksum=checksum)
unzip(zip_path, path, remove=True)
def _load_features(features):
# Loop over list and create simple dictionary & get size of annotations
annot_dict = {}
skipped = []
for feat_idx, feat in enumerate(features):
if feat["geometry"]["type"] not in ["Polygon", "LineString"]:
skipped.append(feat["geometry"]["type"])
continue
# skip empty roi
if len(feat["geometry"]["coordinates"][0]) <= 0:
continue
key_annot = "annot_" + str(feat_idx)
annot_dict[key_annot] = {}
annot_dict[key_annot]["type"] = feat["geometry"]["type"]
annot_dict[key_annot]["pos"] = np.squeeze(
np.asarray(feat["geometry"]["coordinates"])
)
annot_dict[key_annot]["properties"] = feat["properties"]
# print("Skipped geometry type(s):", skipped)
return annot_dict
def _generate_binary_masks(annot_dict, shape, erose_size=5, obj_size_rem=500, save_indiv=False):
# Get dimensions of image and created masks of same size
# This we need to save somewhere (e.g. as part of the geojson file?)
# Filled masks and edge mask for polygons
mask_fill = np.zeros(shape, dtype=np.uint8)
mask_edge = np.zeros(shape, dtype=np.uint8)
mask_labels = | np.zeros(shape, dtype=np.uint16) | numpy.zeros |
import librosa
import numpy as np
from utils import feature_extractor as utils
class EMG:
def __init__(self, audio, config):
self.audio = audio
self.dependencies = config["emg"]["dependencies"]
self.frame_size = int(config["frame_size"])
self.sampling_rate = int(config["sampling_rate"])
self.number_of_bins = int(config["emg"]["number_of_bins"])
self.is_raw_data = config["is_raw_data"]
self.time_lag = int(config["emg"]["time_lag"])
self.embedded_dimension = int(config["emg"]["embedded_dimension"])
self.boundary_frequencies = list(config["emg"]["boundary_frequencies"])
self.hfd_parameter = int(config["emg"]["hfd_parameter"])
self.r = int(config["emg"]["r"])
self.frames = int(np.ceil(len(self.audio.data) / self.frame_size))
def __enter__(self):
print ("Initializing emg calculation...")
def __exit__(self, exc_type, exc_val, exc_tb):
print ("Done with calculations...")
def get_current_frame(self, index):
return utils._get_frame_array(self.audio, index, self.frame_size)
def compute_hurst(self):
self.hurst = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
N = current_frame.size
T = np.arange(1, N + 1)
Y = np.cumsum(current_frame)
Ave_T = Y / T
S_T = np.zeros(N)
R_T = np.zeros(N)
for i in range(N):
S_T[i] = np.std(current_frame[:i + 1])
X_T = Y - T * Ave_T[i]
R_T[i] = np.ptp(X_T[:i + 1])
R_S = R_T / S_T
R_S = np.log(R_S)[1:]
n = np.log(T)[1:]
A = np.column_stack((n, np.ones(n.size)))
[m, c] = np.linalg.lstsq(A, R_S)[0]
self.hurst.append(m)
self.hurst = np.asarray(self.hurst)
def get_hurst(self):
return self.hurst
def compute_embed_seq(self):
self.embed_seq = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
shape = (current_frame.size - self.time_lag * (self.embedded_dimension - 1), self.embedded_dimension)
strides = (current_frame.itemsize, self.time_lag * current_frame.itemsize)
m = np.lib.stride_tricks.as_strided(current_frame, shape=shape, strides=strides)
self.embed_seq.append(m)
self.embed_seq = np.asarray(self.embed_seq)
def get_embed_seq(self):
return self.embed_seq
def compute_bin_power(self):
self.Power_Ratio = []
self.Power = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
C = np.fft.fft(current_frame)
C = abs(C)
Power = np.zeros(len(self.boundary_frequencies) - 1)
for Freq_Index in range(0, len(self.boundary_frequencies) - 1):
Freq = float(self.boundary_frequencies[Freq_Index])
Next_Freq = float(self.boundary_frequencies[Freq_Index + 1])
Power[Freq_Index] = sum(
C[int(np.floor(Freq / self.sampling_rate * len(current_frame))):
int(np.floor(Next_Freq / self.sampling_rate * len(current_frame)))])
self.Power.append(Power)
self.Power_Ratio.append(Power / sum(Power))
self.Power = np.asarray(self.Power)
self.Power_Ratio = | np.asarray(self.Power_Ratio) | numpy.asarray |
import numpy as np
import numpy.ma as ma
def rot_matrix(theta):
r = np.array(( (np.cos(theta), -np.sin(theta)),
(np.sin(theta), | np.cos(theta) | numpy.cos |
import unittest
import numpy as np
from pandas import Index
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as common
import pandas._tseries as lib
class TestTseriesUtil(unittest.TestCase):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isnull(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_merge_indexer(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.merge_indexer_object(new, old.indexMap)
expect_filler = [-1, 0, -1, -1, -1, 1, -1, -1, -1, -1, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.merge_indexer_object(new, old.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_pad(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([5, 10])
new = Index(range(5))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = lib.left_join_indexer_int64(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype='i4')
assert(np.array_equal(result, expected))
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.inner_join_indexer_int64(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([2, 4])
bexp = np.array([1, 2])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.outer_join_indexer_int64(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int32)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0])]
assert(not lib.is_lexsorted(failure))
# def test_get_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype='i4')
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype='i4')
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype='i4')
# result = lib.get_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype('i4')
b = np.random.randint(0, 1000, 100).astype('i4')
result = lib.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='mergesort')
assert(np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = lib.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert(np.array_equal(result, expected))
def test_duplicated_with_nas():
keys = [0, 1, np.nan, 0, 2, np.nan]
result = lib.duplicated(keys)
expected = [False, False, False, True, False, True]
assert(np.array_equal(result, expected))
result = lib.duplicated(keys, take_last=True)
expected = [True, False, True, False, False, False]
assert(np.array_equal(result, expected))
keys = [(0, 0), (0, np.nan), (np.nan, 0), (np.nan, np.nan)] * 2
result = lib.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = falses + trues
assert(np.array_equal(result, expected))
result = lib.duplicated(keys, take_last=True)
expected = trues + falses
assert(np.array_equal(result, expected))
def test_convert_objects():
arr = np.array(['a', 'b', np.nan, np.nan, 'd', 'e', 'f'], dtype='O')
result = lib.maybe_convert_objects(arr)
assert(result.dtype == np.object_)
def test_convert_objects_ints():
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dtype_str in dtypes:
arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')
assert(arr[0].dtype == np.dtype(dtype_str))
result = lib.maybe_convert_objects(arr)
assert(issubclass(result.dtype.type, np.integer))
def test_rank():
from scipy.stats import rankdata
from numpy import nan
def _check(arr):
mask = -np.isfinite(arr)
arr = arr.copy()
result = lib.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = np.nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_get_reverse_indexer():
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype='i4')
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype='i4')
assert( | np.array_equal(result, expected) | numpy.array_equal |
r"""
srundplug: Undulator spectra calculations. An easy (or not too difficult)
interface to make these calculations using Srw, Urgent, and Us.
functions (summary):
calc1d<code> returns (e,f)
f=flux (phot/s/0.1%bw) versus e=photon energy in eV
calc2d<code> returns (h,v,p)
p=power density (W/mm^2) versus h and v slit
directions in mm
calc3d<code> returns (e,h,v,f)
f = flux (phot/s/0.1%bw/mm^2) versus e=energy in eV,
h and v slit directions in mm
"""
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__copyright__ = "ESRF, 2014-2019"
#
#---------------------------- IMPORT ------------------------------------------
#
import os
import sys
import time
import array
import platform
import numpy
import shutil # to copy files
#SRW
USE_URGENT= True
USE_US = True
USE_SRWLIB = True
USE_PYSRU = False
if USE_SRWLIB:
try:
import oasys_srw.srwlib as srwlib
except:
USE_SRWLIB = False
print("SRW is not available")
#catch standard optput
try:
from io import StringIO # Python3
except ImportError:
from StringIO import StringIO # Python2
try:
import matplotlib.pylab as plt
except ImportError:
print("failed to import matplotlib. Do not try to do on-line plots.")
from srxraylib.plot.gol import plot, plot_contour, plot_surface, plot_image, plot_show
########################################################################################################################
#
# GLOBAL NAMES
#
########################################################################################################################
# #Physical constants (global, by now)
import scipy.constants as codata
codata_mee = numpy.array(codata.physical_constants["electron mass energy equivalent in MeV"][0])
m2ev = codata.c * codata.h / codata.e # lambda(m) = m2eV / energy(eV)
# counter for output files
scanCounter = 0
# try:
# from xoppylib.xoppy_util import locations
# except:
# raise Exception("IMPORT")
# directory where to find urgent and us binaries
try:
from xoppylib.xoppy_util import locations
home_bin = locations.home_bin()
except:
import platform
if platform.system() == 'Linux':
home_bin='/scisoft/xop2.4/bin.linux/'
print("srundplug: undefined home_bin. It has been set to ", home_bin)
elif platform.system() == 'Darwin':
home_bin = "/scisoft/xop2.4/bin.darwin/"
print("srundplug: undefined home_bin. It has been set to ", home_bin)
elif platform.system() == 'Windows':
home_bin = ""
print("srundplug: undefined home_bin. It has been set to ", home_bin)
else:
raise FileNotFoundError("srundplug: undefined home_bin")
#check
#if os.path.isfile(home_bin + 'us') == False:
# raise FileNotFoundError("srundplug: File not found: "+home_bin+'us')
#if os.path.isfile(home_bin + 'urgent') == False:
# raise FileNotFoundError("srundplug: File not found: " + home_bin + 'urgent')
# directory where to find urgent and us binaries
try:
home_bin
except NameError:
#home_bin='/users/srio/Oasys/Orange-XOPPY/orangecontrib/xoppy/bin.linux/'
home_bin='/scisoft/xop2.4/bin.linux/'
print("srundplug: undefined home_bin. It has been set to ",home_bin)
#check
#if os.path.isfile(home_bin+'us') == False:
# print("srundplug: File not found: "+home_bin+'us')
#if os.path.isfile(home_bin+'urgent') == False:
# sys.exit("srundplug: File not found: "+home_bin+'urgent')
########################################################################################################################
#
# 1D: calc1d<code> Flux calculations
#
########################################################################################################################
def calc1d_pysru(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=5,
npoints_grid=51,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run pySRU for calculating flux
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
t0 = time.time()
print("Inside calc1d_pysru")
from pySRU.Simulation import create_simulation
from pySRU.ElectronBeam import ElectronBeam
from pySRU.MagneticStructureUndulatorPlane import MagneticStructureUndulatorPlane
from pySRU.TrajectoryFactory import TrajectoryFactory, TRAJECTORY_METHOD_ANALYTIC,TRAJECTORY_METHOD_ODE
from pySRU.RadiationFactory import RadiationFactory,RADIATION_METHOD_NEAR_FIELD, \
RADIATION_METHOD_APPROX_FARFIELD
myBeam = ElectronBeam(Electron_energy=bl['ElectronEnergy'], I_current=bl['ElectronCurrent'])
myUndulator = MagneticStructureUndulatorPlane(K=bl['Kv'], period_length=bl['PeriodID'], length=bl['PeriodID']*bl['NPeriods'])
is_quadrant = 1
if is_quadrant:
X = numpy.linspace(0,0.5*bl['gapH'],npoints_grid)
Y = numpy.linspace(0,0.5*bl['gapV'],npoints_grid)
else:
X = numpy.linspace(-0.5*bl['gapH'],0.5*bl['gapH'],npoints_grid)
Y = numpy.linspace(-0.5*bl['gapH'],0.5*bl['gapH'],npoints_grid)
#
# Warning: The automatic calculation of Nb_pts_trajectory dependens on the energy at this setup and it
# will kept constant over the full spectrum. Therefore, the setup here is done for the most
# "difficult" case, i.e., the highest energy.
# Setting photon_energy=None will do it at the first harmonic, and it was found that the flux
# diverges at high energies in some cases (energy_radiated_approximation_and_farfield)
#
simulation_test = create_simulation(magnetic_structure=myUndulator,electron_beam=myBeam,
magnetic_field=None, photon_energy=photonEnergyMax,
traj_method=TRAJECTORY_METHOD_ODE,Nb_pts_trajectory=None,
rad_method=RADIATION_METHOD_NEAR_FIELD, Nb_pts_radiation=None,
initial_condition=None, distance=bl['distance'],XY_are_list=False,X=X,Y=Y)
# simulation_test.trajectory.plot()
simulation_test.print_parameters()
# simulation_test.radiation.plot(title=("radiation in a screen for first harmonic"))
print("Integrated flux at resonance: %g photons/s/0.1bw"%(simulation_test.radiation.integration(is_quadrant=is_quadrant)))
energies = numpy.linspace(photonEnergyMin,photonEnergyMax,photonEnergyPoints)
eArray,intensArray = simulation_test.calculate_spectrum_on_slit(abscissas_array=energies,use_eV=1,is_quadrant=is_quadrant,do_plot=0)
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using pySRU\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
#
# write flux to file
#
header="#N 4 \n#L PhotonEnergy[eV] PhotonWavelength[A] Flux[phot/sec/0.1%bw] Spectral Power[W/eV]\n"
f.write(header)
for i in range(eArray.size):
f.write(' ' + repr(eArray[i]) + ' ' + repr(m2ev/eArray[i]*1e10) + ' ' +
repr(intensArray[i]) + ' ' +
repr(intensArray[i]*codata.e*1e3) + '\n')
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
return (eArray,intensArray)
def calc1d_srw(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,zero_emittance=False,
srw_max_harmonic_number=None,fileName=None,fileAppend=False):
r"""
run SRW for calculating flux
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
t0 = time.time()
print("Inside calc1d_srw")
#derived
#TODO calculate the numerical factor using codata
#B0 = bl['Kv']/0.934/(bl['PeriodID']*1e2)
cte = codata.e/(2*numpy.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh']/bl['PeriodID']/cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
if srw_max_harmonic_number == None:
gamma = bl['ElectronEnergy'] / (codata_mee * 1e-3)
try:
Kh = bl['Kh']
except:
Kh = 0.0
resonance_wavelength = (1 + (bl['Kv']**2 + Kh**2) / 2.0) / 2 / gamma**2 * bl["PeriodID"]
resonance_energy = m2ev / resonance_wavelength
srw_max_harmonic_number = int(photonEnergyMax / resonance_energy * 2.5)
print ("Max harmonic considered:%d ; Resonance energy: %g eV\n"%(srw_max_harmonic_number,resonance_energy))
Nmax = srw_max_harmonic_number # 21,61
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
harmB.n = 1 #harmonic number ??? Mostly asymmetry
harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
harmB.B = B0 #magnetic field amplitude [T]
und = srwlib.SRWLMagFldU([harmB])
und.per = bl['PeriodID'] #period length [m]
und.nPer = bl['NPeriods'] #number of periods (will be rounded to integer)
#Container of all magnetic field elements
magFldCnt = srwlib.SRWLMagFldC([und], srwlib.array('d', [0]), srwlib.array('d', [0]), srwlib.array('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
magFldCnt = srwlib.SRWLMagFldC(_arMagFld=[und],
_arXc=srwlib.array('d', [0.0]),
_arYc=srwlib.array('d', [0.0]),
_arZc=srwlib.array('d', [0.0]))
#***********Electron Beam
eBeam = srwlib.SRWLPartBeam()
eBeam.Iavg = bl['ElectronCurrent'] #average current [A]
eBeam.partStatMom1.x = 0. #initial transverse positions [m]
eBeam.partStatMom1.y = 0.
# eBeam.partStatMom1.z = 0 #initial longitudinal positions (set in the middle of undulator)
eBeam.partStatMom1.z = - bl['PeriodID']*(bl['NPeriods']+4)/2 # initial longitudinal positions
eBeam.partStatMom1.xp = 0 #initial relative transverse velocities
eBeam.partStatMom1.yp = 0
eBeam.partStatMom1.gamma = bl['ElectronEnergy']*1e3/codata_mee #relative energy
if zero_emittance:
sigX = 1e-25
sigXp = 1e-25
sigY = 1e-25
sigYp = 1e-25
sigEperE = 1e-25
else:
sigX = bl['ElectronBeamSizeH'] #horizontal RMS size of e-beam [m]
sigXp = bl['ElectronBeamDivergenceH'] #horizontal RMS angular divergence [rad]
sigY = bl['ElectronBeamSizeV'] #vertical RMS size of e-beam [m]
sigYp = bl['ElectronBeamDivergenceV'] #vertical RMS angular divergence [rad]
sigEperE = bl['ElectronEnergySpread']
print("calc1dSrw: starting calculation using ElectronEnergySpead=%e \n"%((sigEperE)))
#2nd order stat. moments:
eBeam.arStatMom2[0] = sigX*sigX #<(x-<x>)^2>
eBeam.arStatMom2[1] = 0 #<(x-<x>)(x'-<x'>)>
eBeam.arStatMom2[2] = sigXp*sigXp #<(x'-<x'>)^2>
eBeam.arStatMom2[3] = sigY*sigY #<(y-<y>)^2>
eBeam.arStatMom2[4] = 0 #<(y-<y>)(y'-<y'>)>
eBeam.arStatMom2[5] = sigYp*sigYp #<(y'-<y'>)^2>
eBeam.arStatMom2[10] = sigEperE*sigEperE #<(E-<E>)^2>/<E>^2
#***********Precision Parameters
arPrecF = [0]*5 #for spectral flux vs photon energy
arPrecF[0] = 1 #initial UR harmonic to take into account
arPrecF[1] = Nmax #final UR harmonic to take into account
arPrecF[2] = 1.5 #longitudinal integration precision parameter
arPrecF[3] = 1.5 #azimuthal integration precision parameter
arPrecF[4] = 1 #calculate flux (1) or flux per unit surface (2)
#***********UR Stokes Parameters (mesh) for Spectral Flux
stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
#srio stkF.allocate(10000, 1, 1) #numbers of points vs photon energy, horizontal and vertical positions
stkF.allocate(photonEnergyPoints, 1, 1) #numbers of points vs photon energy, horizontal and vertical positions
stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
stkF.mesh.xStart = bl['gapHcenter'] - bl['gapH']/2 #initial horizontal position [m]
stkF.mesh.xFin = bl['gapHcenter'] + bl['gapH']/2 #final horizontal position [m]
stkF.mesh.yStart = bl['gapVcenter'] - bl['gapV']/2 #initial vertical position [m]
stkF.mesh.yFin = bl['gapVcenter'] + bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function calls)
print('Performing Spectral Flux (Stokes parameters) calculation ... ') # , end='')
srwlib.srwl.CalcStokesUR(stkF, eBeam, und, arPrecF)
print('Done calc1dSrw calculation in %10.3f s'%(time.time()-t0))
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using SRW\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#UD B0 = %f\n"%(B0))
#
# write flux to file
#
header="#N 4 \n#L PhotonEnergy[eV] PhotonWavelength[A] Flux[phot/sec/0.1%bw] Spectral Power[W/eV]\n"
f.write(header)
eArray = numpy.zeros(photonEnergyPoints)
intensArray = numpy.zeros(photonEnergyPoints)
for i in range(stkF.mesh.ne):
ener = stkF.mesh.eStart+i*(stkF.mesh.eFin-stkF.mesh.eStart)/numpy.array((stkF.mesh.ne-1)).clip(min=1)
if fileName is not None: f.write(' ' + repr(ener) + ' ' + repr(m2ev/ener*1e10) + ' ' +
repr(stkF.arS[i]) + ' ' +
repr(stkF.arS[i]*codata.e*1e3) + '\n')
eArray[i] = ener
intensArray[i] = stkF.arS[i]
if fileName is not None:
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
return (eArray,intensArray)
def calc1d_urgent(bl,photonEnergyMin=1000.0,photonEnergyMax=100000.0,photonEnergyPoints=500,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run Urgent for calculating flux
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc1d_urgent")
t0 = time.time()
for file in ["urgent.inp","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.inp","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) #KX
f.write("%f\n"%(bl['Kv'])) #KY
f.write("%f\n"%(Kphase*180.0/numpy.pi)) #PHASE
f.write("%d\n"%(bl['NPeriods'])) #N
f.write("%f\n"%(photonEnergyMin)) #EMIN
f.write("%f\n"%(photonEnergyMax)) #EMAX
f.write("%d\n"%(photonEnergyPoints)) #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(bl['gapHcenter']*1e3)) #XPC
f.write("%f\n"%(bl['gapVcenter']*1e3)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(50)) #NXP
f.write("%d\n"%(50)) #NYP
f.write("%d\n"%(4)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(3))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-1)) #IHARM
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin,'urgent.exe < urgent.inp')
else:
command = "'" + os.path.join(home_bin,"urgent' < urgent.inp")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print('Done calc1dUrgent calculation in %10.3f s'%(time.time()-t0))
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#N 10\n")
f.write("#L Energy(eV) Wavelength(A) Flux(ph/s/0.1%bw) Spectral Power(W/eV) imin imax p1 p2 p3 p4\n")
nArray = 0
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
nArray += 1
tmp = tmp.replace('D','e')
if fileName is not None: f.write(tmp)
else:
if fileName is not None: f.write("#UD "+tmp)
if fileName is not None:
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# stores results in numpy arrays for return
eArray = numpy.zeros(nArray)
intensArray = numpy.zeros(nArray)
iArray = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
iArray += 1
tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
eArray[iArray] = tmpf[0]
intensArray[iArray] = tmpf[2]
return (eArray,intensArray)
def calc1d_us(bl,photonEnergyMin=1000.0,photonEnergyMax=100000.0,photonEnergyPoints=500,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run US for calculating flux
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
t0 = time.time()
for file in ["us.inp","us.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
print("Inside calc1d_us")
with open("us.inp","wt") as f:
f.write("US run\n")
f.write(" %f %f %f Ring-Energy Current\n"%
(bl['ElectronEnergy'],bl['ElectronCurrent']*1e3,bl['ElectronEnergySpread']))
f.write(" %f %f %f %f Sx Sy Sxp Syp\n"%
(bl['ElectronBeamSizeH']*1e3,bl['ElectronBeamSizeV']*1e3,
bl['ElectronBeamDivergenceH']*1e3,bl['ElectronBeamDivergenceV']*1e3) )
f.write(" %f %d 0.000 %f Period N Kx Ky\n"%
(bl['PeriodID']*1e2,bl['NPeriods'],bl['Kv']) )
f.write(" %f %f %d Emin Emax Ne\n"%
(photonEnergyMin,photonEnergyMax,photonEnergyPoints) )
f.write(" %f %f %f %f %f 50 50 D Xpc Ypc Xps Yps Nxp Nyp\n"%
(bl['distance'],bl['gapHcenter']*1e3,bl['gapVcenter']*1e3,bl['gapH']*1e3,bl['gapV']*1e3) )
# f.write(" 4 4 0 Mode Method Iharm\n")
if zero_emittance:
f.write(" 4 3 0 Mode Method Iharm\n")
else:
f.write(" 4 4 0 Mode Method Iharm\n")
f.write(" 0 0 0.0 64 8.0 0 Nphi Nalpha Dalpha2 Nomega Domega Nsigma\n")
f.write("foreground\n")
if platform.system() == "Windows":
command = os.path.join(home_bin,'us.exe < us.inp')
else:
command = "'" + os.path.join(home_bin,'us') + "'"
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print('Done calc1dUs calculation in %10.3f s'%(time.time()-t0))
txt = open("us.out").readlines()
# write spec file
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using US\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#N 8\n")
f.write("#L Energy(eV) Wavelength(A) Flux(ph/s/0.1%bw) SpectralPower(W/ev) p1 p2 p3 p4\n")
nArray = 0
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
tmp = tmp.replace('D','e')
tmp = numpy.fromstring(tmp,dtype=float,sep=' ')
if fileName is not None:
f.write(("%g "*8+"\n")%(tmp[0],1e10*m2ev/tmp[0],tmp[1],tmp[1]*1e3*codata.e,tmp[2],tmp[3],tmp[4],tmp[5]))
nArray += 1
else:
if fileName is not None: f.write("#UD "+tmp)
if fileName is not None:
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# stores results in numpy arrays for return
eArray = numpy.zeros(nArray)
intensArray = numpy.zeros(nArray)
iArray = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
iArray += 1
tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
eArray[iArray] = tmpf[0]
intensArray[iArray] = tmpf[1]
return (eArray,intensArray)
########################################################################################################################
#
# 2D: calc2d<code> Power density calculations
#
########################################################################################################################
def calc2d_pysru(bl,zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
photonEnergyMin=50.0,photonEnergyMax=2500.0,photonEnergyPoints=2451,
fileName=None,fileAppend=False):
e,h,v,i = calc3d_pysru(bl,zero_emittance=zero_emittance,
photonEnergyMin=photonEnergyMin,photonEnergyMax=photonEnergyMax,photonEnergyPoints=photonEnergyPoints,
hSlitPoints=hSlitPoints,vSlitPoints=vSlitPoints,
fileName=fileName,fileAppend=fileAppend)
e_step = (photonEnergyMax - photonEnergyMin) / photonEnergyPoints
plot(e,(i.sum(axis=2)).sum(axis=1)*(v[1]-v[0])*(h[1]-h[0]),show=0,title="Spectrum for %s"%bl)
return (h,v,i.sum(axis=0)*e_step*codata.e*1e3)
def calc2d_srw(bl,zero_emittance=False,hSlitPoints=101,vSlitPoints=51,
srw_max_harmonic_number=51, # Not needed, kept for eventual compatibility
fileName=None,fileAppend=False,):
r"""
run SRW for calculating power density
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc2d_srw")
#Maximum number of harmonics considered. This is critical for speed.
cte = codata.e/(2*numpy.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
harmB.n = 1 #harmonic number ??? Mostly asymmetry
harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
harmB.B = B0 #magnetic field amplitude [T]
und = srwlib.SRWLMagFldU([harmB])
und.per = bl['PeriodID'] # period length [m]
und.nPer = bl['NPeriods'] # number of periods (will be rounded to integer)
magFldCnt = None
magFldCnt = srwlib.SRWLMagFldC([und], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
magFldCnt = srwlib.SRWLMagFldC(_arMagFld=[und],
_arXc=srwlib.array('d', [0.0]),
_arYc=srwlib.array('d', [0.0]),
_arZc=srwlib.array('d', [0.0]))
#***********Electron Beam
eBeam = None
eBeam = srwlib.SRWLPartBeam()
eBeam.Iavg = bl['ElectronCurrent'] #average current [A]
eBeam.partStatMom1.x = 0. #initial transverse positions [m]
eBeam.partStatMom1.y = 0.
# eBeam.partStatMom1.z = 0. #initial longitudinal positions (set in the middle of undulator)
eBeam.partStatMom1.z = - bl['PeriodID']*(bl['NPeriods']+4)/2 # initial longitudinal positions
eBeam.partStatMom1.xp = 0. #initial relative transverse velocities
eBeam.partStatMom1.yp = 0.
eBeam.partStatMom1.gamma = bl['ElectronEnergy']*1e3/codata_mee #relative energy
if zero_emittance:
sigEperE = 1e-25
sigX = 1e-25
sigXp = 1e-25
sigY = 1e-25
sigYp = 1e-25
else:
sigEperE = bl['ElectronEnergySpread'] #relative RMS energy spread
sigX = bl['ElectronBeamSizeH'] #horizontal RMS size of e-beam [m]
sigXp = bl['ElectronBeamDivergenceH'] #horizontal RMS angular divergence [rad]
sigY = bl['ElectronBeamSizeV'] #vertical RMS size of e-beam [m]
sigYp = bl['ElectronBeamDivergenceV'] #vertical RMS angular divergence [rad]
#2nd order stat. moments:
eBeam.arStatMom2[0] = sigX*sigX #<(x-<x>)^2>
eBeam.arStatMom2[1] = 0.0 #<(x-<x>)(x'-<x'>)>
eBeam.arStatMom2[2] = sigXp*sigXp #<(x'-<x'>)^2>
eBeam.arStatMom2[3] = sigY*sigY #<(y-<y>)^2>
eBeam.arStatMom2[4] = 0.0 #<(y-<y>)(y'-<y'>)>
eBeam.arStatMom2[5] = sigYp*sigYp #<(y'-<y'>)^2>
eBeam.arStatMom2[10] = sigEperE*sigEperE #<(E-<E>)^2>/<E>^2
#***********Precision Parameters
arPrecP = [0]*5 #for power density
arPrecP[0] = 1.5 #precision factor
arPrecP[1] = 1 #power density computation method (1- "near field", 2- "far field")
arPrecP[2] = 0.0 #initial longitudinal position (effective if arPrecP[2] < arPrecP[3])
arPrecP[3] = 0.0 #final longitudinal position (effective if arPrecP[2] < arPrecP[3])
arPrecP[4] = 20000 #number of points for (intermediate) trajectory calculation
#***********UR Stokes Parameters (mesh) for power densiyu
stkP = None
stkP = srwlib.SRWLStokes() #for power density
stkP.allocate(1, hSlitPoints, vSlitPoints) #numbers of points vs horizontal and vertical positions (photon energy is not taken into account)
stkP.mesh.zStart = bl['distance'] #longitudinal position [m] at which power density has to be calculated
stkP.mesh.xStart = -bl['gapH']/2.0 #initial horizontal position [m]
stkP.mesh.xFin = bl['gapH']/2.0 #final horizontal position [m]
stkP.mesh.yStart = -bl['gapV']/2.0 #initial vertical position [m]
stkP.mesh.yFin = bl['gapV']/2.0 #final vertical position [m]
#**********************Calculation (SRWLIB function calls)
print('Performing Power Density calculation (from field) ... ')
t0 = time.time()
try:
srwlib.srwl.CalcPowDenSR(stkP, eBeam, 0, magFldCnt, arPrecP)
print('Done Performing Power Density calculation (from field).')
except:
print("Error running SRW")
raise ("Error running SRW")
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
#
# write power density to file as mesh scan
#
scanCounter +=1
f.write("\n#S %d Undulator power density calculation using SRW\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write('\n#U B0 = ' + repr(B0 ) + '\n' )
f.write('\n#U hSlitPoints = ' + repr(hSlitPoints) + '\n' )
f.write('\n#U vSlitPoints = ' + repr(vSlitPoints) + '\n' )
f.write("#N 3 \n#L H[mm] V[mm] PowerDensity[W/mm^2] \n" )
hArray = numpy.zeros(stkP.mesh.nx)
vArray = numpy.zeros(stkP.mesh.ny)
totPower = numpy.array(0.0)
hProfile = numpy.zeros(stkP.mesh.nx)
vProfile = numpy.zeros(stkP.mesh.ny)
powerArray = numpy.zeros((stkP.mesh.nx,stkP.mesh.ny))
# fill arrays
ij = -1
for j in range(stkP.mesh.ny):
for i in range(stkP.mesh.nx):
ij += 1
xx = stkP.mesh.xStart + i*(stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)
yy = stkP.mesh.yStart + j*(stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)
#ij = i*stkP.mesh.nx + j
totPower += stkP.arS[ij]
powerArray[i,j] = stkP.arS[ij]
hArray[i] = xx*1e3 # mm
vArray[j] = yy*1e3 # mm
# dump
if fileName is not None:
for i in range(stkP.mesh.nx):
for j in range(stkP.mesh.ny):
f.write(repr(hArray[i]) + ' ' + repr(vArray[j]) + ' ' + repr(powerArray[i,j]) + '\n')
totPower = totPower * \
(stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)*1e3 * \
(stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)*1e3
hStep = (stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)
# dump profiles
if fileName is not None:
scanCounter +=1
f.write("\n#S %d Undulator power density calculation using SRW: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write( "#UD Total power [W]: "+repr(totPower)+"\n")
f.write( "#UD FWHM [mm] : "+repr(calc_fwhm(hProfile,hStep)[0]*1e3)+"\n")
f.write( "#N 2 \n")
f.write( "#L H[mm] PowerDensityCentralProfile[W/mm2] \n" )
for i in range(stkP.mesh.nx):
#xx = stkP.mesh.xStart + i*hStep
#f.write(repr(xx*1e3) + ' ' + repr(hProfile[i]) + '\n')
f.write(repr(hArray[i]) + ' ' + \
repr(powerArray[i,int(len(vArray)/2)]) + '\n')
scanCounter +=1
vStep = (stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)
f.write("\n#S %d Undulator power density calculation using SRW: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write( "#UD Total power [W]: "+repr(totPower)+"\n")
f.write( "#UD FWHM [mm] : "+repr(calc_fwhm(vProfile,vStep)[0]*1e3)+"\n")
f.write( "#N 2 \n")
f.write( "#L V[mm] PowerDensityCentralProfile[W/mm2] \n" )
for j in range(stkP.mesh.ny):
f.write(repr(vArray[j]) + ' ' + \
repr(powerArray[int(len(hArray)/2),j]) + '\n')
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak SRW: [W/mm2]: "+repr(powerArray.max()))
print( "Total power SRW [W]: "+repr(totPower))
return (hArray, vArray, powerArray)
def calc2d_us(bl,zero_emittance=False,hSlitPoints=51,vSlitPoints=51,fileName=None,fileAppend=False):
r"""
run US for calculating power density
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc2d_us")
for file in ["us.inp","us.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
with open("us.inp","wt") as f:
#f.write("%d\n"%(1)) # ITYPE
#f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("US run\n")
f.write(" %f %f %f Ring-Energy Current\n"%
(bl['ElectronEnergy'],bl['ElectronCurrent']*1e3,bl['ElectronEnergySpread']))
f.write(" %f %f %f %f Sx Sy Sxp Syp\n"%
(bl['ElectronBeamSizeH']*1e3,bl['ElectronBeamSizeV']*1e3,
bl['ElectronBeamDivergenceH']*1e3,bl['ElectronBeamDivergenceV']*1e3) )
f.write(" %f %d 0.000 %f Period N Kx Ky\n"%
(bl['PeriodID']*1e2,bl['NPeriods'],bl['Kv']) )
f.write(" 9972.1 55000.0 500 Emin Emax Ne\n")
f.write(" %f 0.000 0.000 %f %f %d %d D Xpc Ypc Xps Yps Nxp Nyp\n"%
(bl['distance'],bl['gapH']*1e3,bl['gapV']*1e3,hSlitPoints-1,vSlitPoints-1) )
if zero_emittance:
f.write(" 6 3 0 Mode Method Iharm\n")
else:
f.write(" 6 1 0 Mode Method Iharm\n")
f.write(" 0 0 0.0 64 8.0 0 Nphi Nalpha Dalpha2 Nomega Domega Nsigma\n")
f.write("foreground\n")
if platform.system() == "Windows":
command = os.path.join(home_bin,'us.exe < us.inp')
else:
command = "'" + os.path.join(home_bin,'us') + "'"
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("Done.")
print("\n--------------------------------------------------------\n")
txt = open("us.out").readlines()
# write spec file
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator power density calculation using US\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 7\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2] p1 p2 p3 p4\n")
mesh = numpy.zeros((7,(hSlitPoints)*(vSlitPoints)))
hh = numpy.zeros((hSlitPoints))
vv = numpy.zeros((vSlitPoints))
int_mesh = numpy.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None: f.write(tmp)
tmpf = numpy.array( [float(j) for j in tmp.split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if fileName is not None: f.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hhh = numpy.concatenate((-hh[::-1],hh[1:]))
vvv = numpy.concatenate((-vv[::-1],vv[1:]))
tmp = numpy.concatenate( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = numpy.concatenate( (tmp[:,::-1],tmp[:,1:]),axis=1)
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US (whole slit)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 3\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hhh)):
for j in range(len(vvv)):
f.write("%f %f %f\n"%(hhh[i],vvv[j],int_mesh2[i,j]) )
totPower = int_mesh2.sum() * (hh[1]-hh[0]) * (vv[1]-vv[0])
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L H[mm] PowerDensity[W/mm2]\n")
for i in range(len(hhh)):
f.write("%f %f\n"%(hhh[i],int_mesh2[i,int(len(vvv)/2)]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L V[mm] PowerDensity[W/mm2]\n")
for i in range(len(vvv)):
f.write("%f %f\n"%(vvv[i],int_mesh2[int(len(hhh)/2),i]) )
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak US: [W/mm2]: "+repr(int_mesh2.max()))
print( "Total power US [W]: "+repr(totPower))
return (hhh, vvv, int_mesh2)
def calc2d_urgent(bl,zero_emittance=False,fileName=None,fileAppend=False,hSlitPoints=21,vSlitPoints=51):
r"""
run Urgent for calculating power density
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc2d_urgent")
for file in ["urgent.inp","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.inp","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) #KX
f.write("%f\n"%(bl['Kv'])) #KY
f.write("%f\n"%(Kphase*180.0/numpy.pi)) #PHASE
f.write("%d\n"%(bl['NPeriods'])) #N
f.write("1000.0\n") #EMIN
f.write("100000.0\n") #EMAX
f.write("1\n") #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(0.00000)) #XPC
f.write("%f\n"%(0.00000)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(hSlitPoints-1)) #NXP
f.write("%d\n"%(vSlitPoints-1)) #NYP
f.write("%d\n"%(6)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(2))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-200)) #IHARM TODO: check max harmonic number
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin,'urgent.exe < urgent.inp')
else:
command = "'" + os.path.join(home_bin,"urgent' < urgent.inp")
print("\n\n--------------------------------------------------------\n")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print("Done.")
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent (a slit quadrant)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 4\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2] Flux[Phot/s/0.1%bw]\n")
mesh = numpy.zeros((4,(hSlitPoints)*(vSlitPoints)))
hh = numpy.zeros((hSlitPoints))
vv = numpy.zeros((vSlitPoints))
int_mesh = numpy.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None: f.write(tmp)
tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if len(tmp) > 0: # remove the last block
if tmp.split(" ")[0] == 'HARMONIC':
break
if fileName is not None: f.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hhh = numpy.concatenate((-hh[::-1],hh[1:]))
vvv = numpy.concatenate((-vv[::-1],vv[1:]))
tmp = numpy.concatenate( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = numpy.concatenate( (tmp[:,::-1],tmp[:,1:]),axis=1)
totPower = int_mesh2.sum() * (hh[1]-hh[0]) * (vv[1]-vv[0])
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent (whole slit)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 3\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hhh)):
for j in range(len(vvv)):
f.write("%f %f %f\n"%(hhh[i],vvv[j],int_mesh2[i,j]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L H[mm] PowerDensity[W/mm2]\n")
for i in range(len(hhh)):
f.write("%f %f\n"%(hhh[i],int_mesh2[i,int(len(vvv)/2)]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L V[mm] PowerDensity[W/mm2]\n")
for i in range(len(vvv)):
f.write("%f %f\n"%(vvv[i],int_mesh2[int(len(hhh)/2),i]) )
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak URGENT: [W/mm2]: "+repr(int_mesh2.max()))
print( "Total power URGENT [W]: "+repr(totPower))
print("\n--------------------------------------------------------\n\n")
return (hhh, vvv, int_mesh2)
########################################################################################################################
#
# 3D: calc3d<code> Emission calculations
#
########################################################################################################################
def calc3d_srw(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
fileName=None,fileAppend=False):
r"""
run SRW for calculating intensity vs H,V,energy
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc3d_srw")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if zero_emittance:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'],Iavg=bl['ElectronCurrent'],) # no emmitance now
else:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'], sigE = bl['ElectronEnergySpread'], Iavg=bl['ElectronCurrent'],
sigX=bl['ElectronBeamSizeH'], sigY=bl['ElectronBeamSizeV'],
sigXp=bl['ElectronBeamDivergenceH'], sigYp=bl['ElectronBeamDivergenceV'])
eBeam.partStatMom1.z = - bl['PeriodID'] * (bl['NPeriods'] + 4) / 2 # initial longitudinal positions
#***********Precision Parameters
mesh = srwlib.SRWLRadMesh(photonEnergyMin,photonEnergyMax,photonEnergyPoints,
-bl['gapH']/2,bl['gapH']/2,hSlitPoints,
-bl['gapV']/2,bl['gapV']/2,vSlitPoints,bl['distance'])
cte = codata.e/(2*numpy.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
# harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
# harmB.n = 1 #harmonic number ??? Mostly asymmetry
# harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
# harmB.B = B0 #magnetic field amplitude [T]
# und = srwlib.SRWLMagFldU([harmB])
# und.per = bl['PeriodID'] # period length [m]
# und.nPer = bl['NPeriods'] # number of periods (will be rounded to integer)
#
# magFldCnt = None
# magFldCnt = srwlib.SRWLMagFldC([und], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
und0 = srwlib.SRWLMagFldU([srwlib.SRWLMagFldH(1, 'v', B0)], bl['PeriodID'], bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und0 = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
print('Running SRW (SRWLIB Python)')
#
# #***********UR Stokes Parameters (mesh) for Spectral Flux
# stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
# stkF.allocate(photonEnergyPoints, hSlitPoints, vSlitPoints) #numbers of points vs photon energy, horizontal and vertical positions
# stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
# stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
# stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
# stkF.mesh.xStart = -bl['gapH']/2 #initial horizontal position [m]
# stkF.mesh.xFin = bl['gapH']/2 #final horizontal position [m]
# stkF.mesh.yStart = -bl['gapV']/2 #initial vertical position [m]
# stkF.mesh.yFin = bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function calls)
print('Performing Spectral Flux 3d calculation ... ') # , end='')
t0 = time.time()
if zero_emittance:
#
# single electron
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terminating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramSE = [1, 0.01, 0, 0, 50000, 1, 0]
wfr = srwlib.SRWLWfr()
wfr.mesh = mesh
wfr.partBeam = eBeam
wfr.allocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = SrwDriftElectronBeam(eBeam, und)
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramSE)
print('Extracting stokes ... ')
stk = srwlib.SRWLStokes()
stk.mesh = mesh
stk.allocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = SrwDriftElectronBeam(eBeam, -eBeam.moved)
wfr.calc_stokes(stk)
# Stokes0ToSpec(stk,fname=fileName)
#
# intensArray,eArray,hArray,vArray = Stokes0ToArrays(stk)
Shape = (4,stk.mesh.ny,stk.mesh.nx,stk.mesh.ne)
data = numpy.ndarray(buffer=stk.arS, shape=Shape,dtype=stk.arS.typecode)
data0 = data #[0]
hArray = numpy.linspace(stk.mesh.xStart,stk.mesh.xFin,stk.mesh.nx)
vArray = numpy.linspace(stk.mesh.yStart,stk.mesh.yFin,stk.mesh.ny)
eArray = numpy.linspace(stk.mesh.eStart,stk.mesh.eFin,stk.mesh.ne)
# intensArray = numpy.zeros((eArray.size,hArray.size,vArray.size))
print('Filling output array... ')
intensArray = numpy.zeros((eArray.size,hArray.size,vArray.size))
for ie in range(eArray.size):
for ix in range(hArray.size):
for iy in range(vArray.size):
# intensArray[ie,ix,iy] = data0[iy,ix,ie]
intensArray[ie,ix,iy,] = data[0,iy,ix,ie]
else:
#
# convolution
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terminating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramME = [1, 0.01, 0, 0, 50000, 1, 0]
wfr = srwlib.SRWLWfr()
wfr.mesh = mesh
wfr.partBeam = eBeam
wfr.allocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = _srw_drift_electron_beam(eBeam, und)
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramME)
#
# Extract intensity
#
print('Extracting stokes and filling output array... ')
mesh0 = wfr.mesh
# arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny) #"flat" array to take 2D intensity data
# arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny*mesh.ne) #"flat" array to take 2D intensity data
INTENSITY_TYPE_SINGLE_ELECTRON=0
INTENSITY_TYPE_MULTI_ELECTRON=1
hArray=numpy.linspace(wfr.mesh.xStart,wfr.mesh.xFin, wfr.mesh.nx)
vArray=numpy.linspace(wfr.mesh.yStart,wfr.mesh.yFin, wfr.mesh.ny)
eArray=numpy.linspace(wfr.mesh.eStart,wfr.mesh.eFin, wfr.mesh.ne)
intensArray = numpy.zeros((eArray.size,hArray.size,vArray.size,))
for ie in range(eArray.size):
arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny) #"flat" array to take 2D intensity data
# 6 is for total polarizarion; 0=H, 1=V
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_MULTI_ELECTRON, 3, eArray[ie], 0, 0)
Shape = (mesh0.ny,mesh0.nx)
data = numpy.ndarray(buffer=arI0, shape=Shape,dtype=arI0.typecode)
for ix in range(hArray.size):
for iy in range(vArray.size):
intensArray[ie,ix,iy,] = data[iy,ix]
print(' done\n')
print('Done Performing Spectral Flux 3d calculation in sec '+str(time.time()-t0))
if fileName is not None:
print(' saving SE Stokes to h5 file %s...'%fileName)
for ie in range(eArray.size):
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using SRW at E=%6.3f eV (whole slit )\n"%(scanCounter,eArray[ie]))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hArray.size))
fout.write("#UD vSlitPoints = %f\n"%(vArray.size))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],intensArray[ie,i,j]) )
fout.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# grid in mm
return (eArray, 1e3*hArray, 1e3*vArray, intensArray)
def calc3d_srw_step_by_step(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
photonEnergyIntelligentGrid=False,
zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
fileName=None,fileAppend=False,):
r"""
run SRW for calculating intensity vs H,V,energy
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc3d_srw_step_by_step")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyIntelligentGrid and photonEnergyPoints > 1:
e, f = calc1d_srw(bl,photonEnergyMin=photonEnergyMin,photonEnergyMax=photonEnergyMax,photonEnergyPoints=photonEnergyPoints,
zero_emittance=zero_emittance,srw_max_harmonic_number=None,fileName=None,fileAppend=False)
# cs = numpy.cumsum(f)
from scipy.integrate import cumtrapz
cs = cumtrapz(f,e,initial=0)
cs /= cs[-1]
# plot(cs,e)
# plot(e, numpy.gradient(f,e))
abs = numpy.linspace(0,1.0,photonEnergyPoints)
e1 = numpy.interp(abs,cs,e)
e1[0] = photonEnergyMin
e1[-1] = photonEnergyMax
# print(">>>>>>>e ",e)
# print(">>>>>>>e1: ",e1)
eArray = e1
else:
eArray = numpy.linspace(photonEnergyMin, photonEnergyMax, photonEnergyPoints, )
if zero_emittance:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'],Iavg=bl['ElectronCurrent'],) # no emmitance now
else:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'], sigE = bl['ElectronEnergySpread'], Iavg=bl['ElectronCurrent'],
sigX=bl['ElectronBeamSizeH'], sigY=bl['ElectronBeamSizeV'],
sigXp=bl['ElectronBeamDivergenceH'], sigYp=bl['ElectronBeamDivergenceV'])
eBeam.partStatMom1.z = - bl['PeriodID'] * (bl['NPeriods'] + 4) / 2 # initial longitudinal positions
cte = codata.e/(2*numpy.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
und0 = srwlib.SRWLMagFldU([srwlib.SRWLMagFldH(1, 'v', B0)], bl['PeriodID'], bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und0 = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
print('Running SRW (SRWLIB Python)')
#
# #***********UR Stokes Parameters (mesh) for Spectral Flux
# stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
# stkF.allocate(photonEnergyPoints, hSlitPoints, vSlitPoints) #numbers of points vs photon energy, horizontal and vertical positions
# stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
# stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
# stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
# stkF.mesh.xStart = -bl['gapH']/2 #initial horizontal position [m]
# stkF.mesh.xFin = bl['gapH']/2 #final horizontal position [m]
# stkF.mesh.yStart = -bl['gapV']/2 #initial vertical position [m]
# stkF.mesh.yFin = bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function calls)
print('Performing Spectral Flux 3d calculation ... ') # , end='')
t0 = time.time()
hArray = numpy.linspace(-bl['gapH'] / 2, bl['gapH'] / 2, hSlitPoints, )
vArray = numpy.linspace(-bl['gapV'] / 2, bl['gapV'] / 2, vSlitPoints, )
intensArray = numpy.zeros((eArray.size, hArray.size, vArray.size,))
timeArray = numpy.zeros_like(eArray)
#
# convolution
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terminating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramME = [1, 0.01, 0, 0, 50000, 1, 0]
t00 = 0
for ie in range(eArray.size):
print("Calculating photon energy: %f (point %d of %d) time:%g"%(eArray[ie],ie+1,eArray.size+1,time.time()-t00))
t00 = time.time()
try:
mesh = srwlib.SRWLRadMesh(eArray[ie], eArray[ie], 1,
-bl['gapH'] / 2, bl['gapH'] / 2, hSlitPoints,
-bl['gapV'] / 2, bl['gapV'] / 2, vSlitPoints, bl['distance'])
wfr = srwlib.SRWLWfr()
wfr.allocate(1, mesh.nx, mesh.ny)
# eBeam = _srw_drift_electron_beam(eBeam, und)
wfr.mesh = mesh
wfr.partBeam = eBeam
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramME)
#
# Extract intensity
#
print('Extracting stokes and filling output array... ')
mesh0 = wfr.mesh
# arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny) #"flat" array to take 2D intensity data
# arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny*mesh.ne) #"flat" array to take 2D intensity data
INTENSITY_TYPE_SINGLE_ELECTRON=0
INTENSITY_TYPE_MULTI_ELECTRON=1
arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny) #"flat" array to take 2D intensity data
# 6 is for total polarizarion; 0=H, 1=V
if zero_emittance:
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_SINGLE_ELECTRON, 3, eArray[ie], 0, 0)
else:
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_MULTI_ELECTRON, 3, eArray[ie], 0, 0)
Shape = (mesh0.ny,mesh0.nx)
data = numpy.ndarray(buffer=arI0, shape=Shape,dtype=arI0.typecode)
for ix in range(hArray.size):
for iy in range(vArray.size):
intensArray[ie,ix,iy,] = data[iy,ix]
except:
print("Error running SRW")
timeArray[ie] = time.time() - t00
print(' done\n')
print('Done Performing Spectral Flux 3d calculation in sec '+str(time.time()-t0))
if fileName is not None:
print(' saving SE Stokes to h5 file %s...'%fileName)
for ie in range(eArray.size):
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using SRW at E=%6.3f eV (whole slit )\n"%(scanCounter,eArray[ie]))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hArray.size))
fout.write("#UD vSlitPoints = %f\n"%(vArray.size))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],intensArray[ie,i,j]) )
fout.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# grid in mm
# tmp = intensArray.sum(axis=2).sum(axis=1)
# f = open("tmp.dat",'w')
# for i in range(eArray.size):
# f.write("%f %f %f\n"%(eArray[i],timeArray[i],tmp[i]))
# f.close()
# print("File written to disk: tmp.dat")
return (eArray, 1e3*hArray, 1e3*vArray, intensArray)
def calc3d_urgent(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=50,vSlitPoints=50,
fileName=None,fileAppend=False,copyUrgentFiles=False):
r"""
run Urgent for calculating intensity vs H,V,energy
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc3d_urgent")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyPoints == 1:
eStep = 0.0
else:
eStep = (photonEnergyMax-photonEnergyMin)/(photonEnergyPoints-1)
eArray = numpy.zeros( photonEnergyPoints )
intensArray = numpy.zeros( photonEnergyPoints )
hArray = numpy.zeros( (hSlitPoints*2-1) )
vArray = numpy.zeros( (vSlitPoints*2-1) )
int_mesh2integrated = numpy.zeros( (hSlitPoints*2-1,vSlitPoints*2-1) )
int_mesh3 = numpy.zeros( (photonEnergyPoints,hSlitPoints*2-1,vSlitPoints*2-1) )
for iEner in range(photonEnergyPoints):
ener = photonEnergyMin + iEner*eStep
eArray[iEner] = ener
for file in ["urgent.inp","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.inp","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) # KX
f.write("%f\n"%(bl['Kv'])) # KY
f.write("%f\n"%(Kphase)) # PHASE
f.write("%d\n"%(bl['NPeriods'])) # N
f.write("%f\n"%(ener)) #EMIN
f.write("100000.0\n") #EMAX
f.write("1\n") #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(0.00000)) #XPC
f.write("%f\n"%(0.00000)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(hSlitPoints-1)) #NXP
f.write("%d\n"%(vSlitPoints-1)) #NYP
f.write("%d\n"%(1)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(3))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-1)) #IHARM TODO: check max harmonic number
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin, 'urgent.exe < urgent.inp')
else:
command = "'" + os.path.join(home_bin, "urgent' < urgent.inp")
print("\n\n--------------------------------------------------------\n")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print("Done.")
if copyUrgentFiles:
shutil.copy2("urgent.inp","urgent_energy_index%d.inp"%iEner)
shutil.copy2("urgent.out","urgent_energy_index%d.out"%iEner)
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using Urgent at E=%0.3f keV (a slit quadrant)\n"%(scanCounter,ener*1e-3))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#N 7\n")
fout.write("#L H[mm] V[mm] Flux[Phot/s/mm^2/0.1%bw] l1 l2 l3 l4\n")
if zero_emittance:
mesh = numpy.zeros((8,(hSlitPoints)*(vSlitPoints)))
else:
mesh = numpy.zeros((7,(hSlitPoints)*(vSlitPoints)))
hh = numpy.zeros((hSlitPoints))
vv = numpy.zeros((vSlitPoints))
int_mesh = numpy.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None:
fout.write(tmp)
tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if fileName is not None:
fout.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hArray = numpy.concatenate((-hh[::-1],hh[1:]))
vArray = numpy.concatenate((-vv[::-1],vv[1:]))
#hArray = hhh*0.0
#vArray = vvv*0.0
totIntens = 0.0
tmp = numpy.concatenate( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = numpy.concatenate( (tmp[:,::-1],tmp[:,1:]),axis=1)
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using Urgent at E=%6.3f eV (whole slit )\n"%(scanCounter,ener))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
if fileName is not None: fout.write("%f %f %f\n"%(hArray[i],vArray[j],int_mesh2[i,j]) )
int_mesh3[iEner,i,j] = int_mesh2[i,j]
int_mesh2integrated[i,j] += int_mesh2[i,j]
totIntens += int_mesh2[i,j]
totIntens = totIntens * (hh[1]-hh[0]) * (vv[1]-vv[0])
intensArray[iEner] = totIntens
# now dump the integrated power
# convert from phot/s/0,1%bw/mm2 to W/mm^2
int_mesh2integrated = int_mesh2integrated *codata.e*1e3 * eStep
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density vs H,E (integrated in energy) calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#UD IntegratedPower[W] = %f\n"%( int_mesh2integrated.sum()*(hArray[1]-hArray[0])*(vArray[1]-vArray[0])))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],int_mesh2integrated[i,j]) )
#print(">>>>>>>>>>>>>>>power1",int_mesh2integrated.sum()*(hArray[1]-hArray[0])*(vArray[1]-vArray[0]))
#print(">>>>>>>>>>>>>>>power2",intensArray.sum()*codata.e*1e3*(eArray[1]-eArray[0]))
#print(">>>>>>>>>>>>>>>power3",int_mesh3.sum()*codata.e*1e3*(eArray[1]-eArray[0])*(hArray[1]-hArray[0])*(vArray[1]-vArray[0]))
# now dump the spectrum as the sum
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density vs energy (integrated in H,V) calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
if photonEnergyPoints > 1:
fout.write("#UD IntegratedPower[W] = %f\n"%(intensArray.sum()*codata.e*1e3*(eArray[1]-eArray[0])))
fout.write("#N 3\n")
fout.write("#L photonEnergy[eV] Flux[phot/s/0.1%bw] PowerDensity[W/eV]\n")
for i in range(photonEnergyPoints):
fout.write("%f %f %f\n"%(eArray[i],intensArray[i],intensArray[i]*codata.e*1e3) )
fout.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print("\n--------------------------------------------------------\n\n")
# append direct calculation for comparison
# tmp = calc1d_urgent(bl,photonEnergyMin=photonEnergyMin,
# photonEnergyMax=photonEnergyMax,
# photonEnergyPoints=photonEnergyPoints,
# fileName=fileName,fileAppend=True)
# return abscissas in mm
return (eArray, hArray, vArray, int_mesh3)
def calc3d_us(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=50,vSlitPoints=50,
fileName=None,fileAppend=True,copyUsFiles=False):
r"""
run Us for calculating intensity vs H,V,energy
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc3d_us")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyPoints == 1:
eStep = 0.0
else:
eStep = (photonEnergyMax-photonEnergyMin)/(photonEnergyPoints-1)
eArray = numpy.zeros( photonEnergyPoints )
intensArray = numpy.zeros( photonEnergyPoints )
hArray = numpy.zeros( (hSlitPoints*2-1) )
vArray = numpy.zeros( (vSlitPoints*2-1) )
int_mesh2integrated = numpy.zeros( (hSlitPoints*2-1,vSlitPoints*2-1) )
int_mesh3 = | numpy.zeros( (photonEnergyPoints,hSlitPoints*2-1,vSlitPoints*2-1) ) | numpy.zeros |
# _*_ coding: utf-8 _*_
"""
Manipulating functions for grid.
References:
* https://bitbucket.org/tmiyachi/pymet
"""
import numpy as np
from numba import jit
from scipy import interpolate, ndimage
from pyproj import Geod
from dk_met_base import constants, arr
NA = np.newaxis
a0 = constants.Re
g = constants.g0
PI = constants.pi
d2r = PI/180.
def calc_dx_dy(lon, lat, shape='WGS84', radius=6370997.):
"""
This definition calculates the distance between grid points
that are in a latitude/longitude format.
Using pyproj GEOD; different Earth Shapes
https://jswhit.github.io/pyproj/pyproj.Geod-class.html
Common shapes: 'sphere', 'WGS84', 'GRS80'
:param lon: 1D or 2D longitude array.
:param lat: 1D or 2D latitude array.
:param shape: earth shape.
:param radius: earth radius.
:return: dx, dy; 2D arrays of distances between grid points
in the x and y direction in meters
:Example:
>>> lat = np.arange(90,-0.1,-0.5)
>>> lon = np.arange(0,360.1,0.5)
>>> dx, dy = calc_dx_dy(lon, lat)
"""
# check longitude and latitude
if lon.ndim == 1:
longitude, latitude = np.meshgrid(lon, lat)
else:
longitude = lon
latitude = lat
if radius != 6370997.:
gg = Geod(a=radius, b=radius)
else:
gg = Geod(ellps=shape)
dx = np.empty(latitude.shape)
dy = np.zeros(longitude.shape)
for i in range(latitude.shape[1]):
for j in range(latitude.shape[0] - 1):
_, _, dx[j, i] = gg.inv(
longitude[j, i], latitude[j, i], longitude[j + 1, i],
latitude[j + 1, i])
dx[j + 1, :] = dx[j, :]
for i in range(latitude.shape[1] - 1):
for j in range(latitude.shape[0]):
_, _, dy[j, i] = gg.inv(
longitude[j, i], latitude[j, i], longitude[j, i + 1],
latitude[j, i + 1])
dy[:, i + 1] = dy[:, i]
return dx, dy
def dvardx(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
calculate center finite difference along x or longitude.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
:Examples:
>>> var.shape
(24, 73, 72)
>>> lon = np.arange(0, 180, 2.5)
>>> lat = np.arange(-90, 90.1, 2.5)
>>> result = dvardx(var, lon, lat, 2, 1, cyclic=False)
>>> result.shape
(24, 73, 72)
"""
var = np.array(var)
ndim = var.ndim
var = np.rollaxis(var, xdim, ndim)
if cyclic and sphere:
dvar = np.concatenate(((var[..., 1] - var[..., -1])[..., NA],
(var[..., 2:] - var[..., :-2]),
(var[..., 0] - var[..., -2])[..., NA]), axis=-1)
dx = np.r_[(lon[1] + 360 - lon[-1]), (lon[2:] - lon[:-2]),
(lon[0] + 360 - lon[-2])]
else:
dvar = np.concatenate(((var[..., 1] - var[..., 0])[..., NA],
(var[..., 2:] - var[..., :-2]),
(var[..., -1] - var[..., -2])[..., NA]),
axis=-1)
dx = np.r_[(lon[1] - lon[0]), (lon[2:] - lon[:-2]),
(lon[-1] - lon[-2])]
dvar = np.rollaxis(dvar, ndim - 1, xdim)
if sphere:
dx = a0 * PI / 180. * arr.expand(dx, ndim, xdim) * \
arr.expand(np.cos(lat * d2r), ndim, ydim)
else:
dx = arr.expand(dx, ndim, xdim)
out = dvar / dx
return out
def dvardy(var, lat, ydim, sphere=True):
"""
calculate center finite difference along y or latitude.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lat: array_like, latitude
:param ydim: the latitude dimension index
:param sphere: sphere coordinate
:return: ndarray
:Examples:
>>> var.shape
(24, 73, 144)
>>> lat = np.arange(-90, 90.1, 2.5)
>>> result = dvardy(var, lat, 1)
>>> result.shape
(24, 73, 144)
"""
var = np.array(var)
ndim = var.ndim
var = np.rollaxis(var, ydim, ndim)
dvar = np.concatenate([(var[..., 1] - var[..., 0])[..., NA],
(var[..., 2:]-var[..., :-2]),
(var[..., -1] - var[..., -2])[..., NA]],
axis=-1)
dy = np.r_[(lat[1]-lat[0]), (lat[2:]-lat[:-2]), (lat[-1]-lat[-2])]
if sphere:
dy = a0*PI/180.*dy
out = dvar/dy
out = np.rollaxis(out, ndim-1, ydim)
return out
def dvardp(var, lev, zdim, punit=100.):
"""
calculate center finite difference along vertical coordinate.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lev: 1d-array, isobaric levels.
:param zdim: the vertical dimension index.
:param punit: pressure level units.
:return: ndarray.
"""
var = np.array(var)
ndim = var.ndim
lev = lev * punit
# roll lat dim axis to last
var = np.rollaxis(var, zdim, ndim)
dvar = np.concatenate([(var[..., 1] - var[..., 0])[..., NA],
(var[..., 2:] - var[..., :-2]),
(var[..., -1] - var[..., -2])[..., NA]],
axis=-1)
dp = np.r_[np.log(lev[1] / lev[0]) * lev[0],
np.log(lev[2:] / lev[:-2]) * lev[1:-1],
np.log(lev[-1] / lev[-2]) * lev[-1]]
out = dvar / dp
# reroll lat dim axis to original dim
out = np.rollaxis(out, ndim - 1, zdim)
return out
def d2vardx2(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
calculate second center finite difference along x or longitude.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.array(var)
ndim = var.ndim
# roll lon dim axis to last
var = np.rollaxis(var, xdim, ndim)
if cyclic and sphere:
dvar = np.concatenate(((var[..., 1]-2*var[..., 0] +
var[..., -1])[..., NA],
(var[..., 2:]-2*var[..., 1:-1] + var[..., :-2]),
(var[..., 0]-2*var[..., -1] +
var[..., -2])[..., NA]), axis=-1)
dx = np.r_[(lon[1]+360-lon[-1]), (lon[2:]-lon[:-2]),
(lon[0]+360-lon[-2])]
else: # edge is zero
dvar = np.concatenate(((var[..., 0]-var[..., 0])[..., NA],
(var[..., 2:]-2*var[..., 1:-1]+var[..., :-2]),
(var[..., 0]-var[..., 0])[..., NA]), axis=-1)
dx = np.r_[(lon[1]-lon[0]), (lon[2:]-lon[:-2]), (lon[-1]-lon[-2])]
dvar = np.rollaxis(dvar, ndim-1, xdim)
if sphere:
dx2 = a0 ** 2 * (PI/180.) ** 2 * arr.expand(dx ** 2, ndim, xdim) * \
arr.expand(np.cos(lat * d2r) ** 2, ndim, ydim)
else:
dx2 = arr.expand(dx ** 2, ndim, xdim)
out = 4.*dvar/dx2
# reroll lon dim axis to original dim
out = np.rollaxis(out, ndim-1, xdim)
return out
def d2vardy2(var, lat, ydim, sphere=True):
"""
calculate second center finite difference along y or latitude.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lat: array_like, latitude
:param ydim: the latitude dimension index
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.array(var)
ndim = var.ndim
# roll lat dim axis to last
var = np.rollaxis(var, ydim, ndim)
# edge is zero
dvar = np.concatenate([(var[..., 0] - var[..., 0])[..., NA],
(var[..., 2:] - 2*var[..., 1:-1] + var[..., :-2]),
(var[..., 0] - var[..., 0])[..., NA]], axis=-1)
dy = np.r_[(lat[1]-lat[0]), (lat[2:]-lat[:-2]), (lat[-1]-lat[-2])]
if sphere:
dy2 = a0**2 * dy**2
else:
dy2 = dy**2
out = 4.*dvar/dy2
# reroll lat dim axis to original dim
out = np.rollaxis(out, ndim-1, ydim)
return out
def dvardvar(var1, var2, dim):
"""
Calculate d(var1)/d(var2) along axis=dim.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var1: numpy nd array, denominator of derivative
:param var2: numpy nd array, numerator of derivative
:param dim: along dimension.
:return:
"""
var1, var2 = np.array(var1), np.array(var2)
ndim = var1.ndim
# roll dim axis to last
var1 = np.rollaxis(var1, dim, ndim)
var2 = np.rollaxis(var2, dim, ndim)
dvar1 = np.concatenate([(var1[..., 1] - var1[..., 0])[..., NA],
(var1[..., 2:] - var1[..., :-2]),
(var1[..., -1] - var1[..., -2])[..., NA]], axis=-1)
dvar2 = np.concatenate([(var2[..., 1] - var2[..., 0])[..., NA],
(var2[..., 2:] - var2[..., :-2]),
(var2[..., -1] - var2[..., -2])[..., NA]], axis=-1)
out = dvar1 / dvar2
# reroll lat dim axis to original dim
out = np.rollaxis(out, ndim - 1, dim)
return out
def div(u, v, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate horizontal divergence.
:param u: ndarray, u-component wind.
:param v: ndarray, v-component wind.
:param lon: array_like, longitude.
:param lat: array_like, latitude.
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
u, v = np.array(u), np.array(v)
ndim = u.ndim
out = dvardx(u, lon, lat, xdim, ydim, cyclic=cyclic,
sphere=sphere) + dvardy(v, lat, ydim, sphere=sphere)
if sphere:
out = out - v * arr.expand(np.tan(lat * d2r), ndim, ydim) / a0
out = np.rollaxis(out, ydim, 0)
out[0, ...] = 0.
out[-1, ...] = 0.
out = np.rollaxis(out, 0, ydim + 1)
return out
def rot(u, v, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate vertical vorticity.
:param u: ndarray, u-component wind.
:param v: ndarray, v-component wind.
:param lon: array_like, longitude.
:param lat: array_like, latitude.
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
u, v = np.array(u), np.array(v)
ndim = u.ndim
out = dvardx(v, lon, lat, xdim, ydim, cyclic=cyclic,
sphere=sphere) - dvardy(u, lat, ydim, sphere=sphere)
if sphere:
out = out + u * arr.expand(np.tan(lat * d2r), ndim, ydim) / a0
out = np.rollaxis(out, ydim, 0)
out[0, ...] = 0.
out[-1, ...] = 0.
out = np.rollaxis(out, 0, ydim + 1)
return out
def laplacian(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate laplacian operation on sphere.
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.asarray(var)
ndim = var.ndim
if sphere:
out = d2vardx2(var, lon, lat, xdim, ydim,
cyclic=cyclic, sphere=sphere) + \
d2vardy2(var, lat, ydim, sphere=sphere) - \
arr.expand(np.tan(lat * d2r), ndim, ydim) * \
dvardy(var, lat, ydim)/a0
else:
out = d2vardx2(var, lon, lat, xdim, ydim,
cyclic=cyclic, sphere=sphere) + \
d2vardy2(var, lat, ydim, sphere=sphere)
return out
def grad(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate gradient operator.
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.asarray(var)
outu = dvardx(var, lon, lat, xdim, ydim, cyclic=cyclic, sphere=sphere)
outv = dvardy(var, lat, ydim, sphere=sphere)
return outu, outv
def skgrad(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate skew gradient.
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.asarray(var)
outu = -dvardy(var, lat, ydim, sphere=sphere)
outv = dvardx(var, lon, lat, xdim, ydim, cyclic=cyclic, sphere=sphere)
return outu, outv
def gradient_sphere(f, *varargs):
"""
Return the gradient of a 2-dimensional array on a sphere given a latitude
and longitude vector.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
https://github.com/scavallo/python_scripts/blob/master/utils/weather_modules.py
:param f: A 2-dimensional array containing samples of a scalar function.
:param varargs: latitude, longitude and so on.
:return: dfdx and dfdy arrays of the same shape as `f`
giving the derivative of `f` with
respect to each dimension.
:Example:
temperature = temperature(pressure,latitude,longitude)
levs = pressure vector
lats = latitude vector
lons = longitude vector
>>> tempin = temperature[5,:,:]
>>> dfdlat, dfdlon = gradient_sphere(tempin, lats, lons)
>>> dfdp, dfdlat, dfdlon = gradient_sphere(temperature, levs, lats, lons)
"""
r_earth = 6371200.
N = f.ndim # number of dimensions
n = len(varargs) # number of arguments
argsin = list(varargs)
if N != n:
raise SyntaxError(
"dimensions of input must match the remaining arguments")
df = np.gradient(f)
if n == 2:
lats = argsin[0]
lons = argsin[1]
dfdy = df[0]
dfdx = df[1]
elif n == 3:
levs = argsin[0]
lats = argsin[1]
lons = argsin[2]
dfdz = df[0]
dfdy = df[1]
dfdx = df[2]
else:
raise SyntaxError("invalid number of arguments")
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D']:
otype = 'd'
latarr = np.zeros_like(f).astype(otype)
lonarr = np.zeros_like(f).astype(otype)
if N == 2:
nlat, nlon = np.shape(f)
for jj in range(0, nlat):
latarr[jj, :] = lats[jj]
for ii in range(0, nlon):
lonarr[:, ii] = lons[ii]
else:
nz, nlat, nlon = np.shape(f)
for jj in range(0, nlat):
latarr[:, jj, :] = lats[jj]
for ii in range(0, nlon):
lonarr[:, :, ii] = lons[ii]
# use central differences on interior and first differences on endpoints
dlats = np.zeros_like(lats).astype(otype)
dlats[1:-1] = (lats[2:] - lats[:-2])
dlats[0] = (lats[1] - lats[0])
dlats[-1] = (dlats[-2] - dlats[-1])
dlons = np.zeros_like(lons).astype(otype)
dlons[1:-1] = (lons[2:] - lons[:-2])
dlons[0] = (lons[1] - lons[0])
dlons[-1] = (dlons[-2] - dlons[-1])
dlatarr = np.zeros_like(f).astype(otype)
dlonarr = np.zeros_like(f).astype(otype)
if N == 2:
for jj in range(0, nlat):
dlatarr[jj, :] = dlats[jj]
for ii in range(0, nlon):
dlonarr[:, ii] = dlons[ii]
elif N == 3:
for jj in range(0, nlat):
dlatarr[:, jj, :] = dlats[jj]
for ii in range(0, nlon):
dlonarr[:, :, ii] = dlons[ii]
dlatsrad = dlatarr * (PI / 180.)
dlonsrad = dlonarr * (PI / 180.)
latrad = latarr * (PI / 180.)
if n == 2:
dx1 = r_earth * dlatsrad
dx2 = r_earth * np.cos(latrad) * dlonsrad
dfdy = dfdy / dx1
dfdx = dfdx / dx2
return dfdy, dfdx
elif n == 3:
dx1 = r_earth * dlatsrad
dx2 = r_earth * np.cos(latrad) * dlonsrad
dfdy = dfdy / dx1
dfdx = dfdx / dx2
zin = levs
dz = np.zeros_like(zin).astype(otype)
dz[1:-1] = (zin[2:] - zin[:-2]) / 2.0
dz[0] = (zin[1] - zin[0])
dz[-1] = (zin[-1] - zin[-2])
dx3 = np.ones_like(f).astype(otype)
for kk in range(0, nz):
dx3[kk, :, :] = dz[kk]
dfdz = dfdz / dx3
return dfdz, dfdy, dfdx
def vint(var, bottom, top, lev, zdim, punit=100.):
"""
Calculate vertical integration.
:param var: array_like.
:param bottom: bottom boundary of integration.
:param top: top boundary of integration.
:param lev: isobaric levels.
:param zdim: vertical dimension.
:param punit: levels units.
:return: array_like.
"""
var = np.ma.asarray(var)
lev = np.asarray(lev)
ndim = var.ndim
lev = lev[(lev <= bottom) & (lev >= top)]
lev_m = np.r_[bottom, (lev[1:] + lev[:-1])/2., top]
dp = lev_m[:-1] - lev_m[1:]
# roll lat dim axis to last
var = arr.mrollaxis(var, zdim, ndim)
out = var[..., (lev <= bottom) & (lev >= top)] * dp / g * punit
if bottom > top:
out = out.sum(axis=-1)
else:
out = -out.sum(axis=-1)
return out
def total_col(infld, pres, temp, hght):
"""
Compute column integrated value of infld.
https://github.com/scavallo/classcode/blob/master/utils/weather_modules.py
:param infld: Input 3D field to column integrate
:param pres: Input 3D air pressure (Pa)
:param temp: Input 3D temperature field (K)
:param hght: Input 3D geopotential height field (m
:return: Output total column integrated value
"""
[iz, iy, ix] = np.shape(infld)
density = pres / (287 * temp)
tmp = pres[0, :, :].squeeze()
coltot = np.zeros_like(tmp).astype('f')
for jj in range(0, iy):
for ii in range(0, ix):
colnow = infld[:, jj, ii] * density[:, jj, ii]
hghtnow = hght[:, jj, ii].squeeze()
coltot[jj, ii] = np.trapz(colnow[::-1], hghtnow[::-1])
return coltot
def vmean(var, bottom, top, lev, zdim):
"""
Calculate vertical mean.
:param var: array_like.
:param bottom: bottom boundary of integration.
:param top: top boundary of integration.
:param lev: isobaric levels.
:param zdim: vertical dimension.
:return: array_like.
"""
var = np.ma.asarray(var)
lev = np.asarray(lev)
ndim = var.ndim
lev = lev[(lev <= bottom) & (lev >= top)]
lev_m = np.r_[bottom, (lev[1:] + lev[:-1])/2., top]
dp = lev_m[:-1] - lev_m[1:]
# roll lat dim axis to last
var = arr.mrollaxis(var, zdim, ndim)
out = var[..., (lev <= bottom) & (lev >= top)] * dp
out = out.sum(axis=-1)/(dp.sum())
return out
def vinterp(var, oldz, newz, zdim, logintrp=True, bounds_error=True):
"""
perform vertical linear interpolation.
:param var: array_like variable.
:param oldz: original vertical level.
:param newz: new vertical level.
:param zdim: the dimension of vertical.
:param logintrp: log linear interpolation.
:param bounds_error: options for scipy.interpolate.interp1d.
:return:
"""
var = np.array(var)
ndim = var.ndim
new_z = np.array(newz)
old_z = np.array(oldz)
if logintrp:
old_z = np.log(old_z)
new_z = np.log(new_z)
old_zn = var.shape[zdim]
new_zn = len(new_z)
# roll z dim axis to last
var = np.rollaxis(var, zdim, ndim)
old_shape = var.shape
new_shape = list(old_shape)
new_shape[-1] = new_zn
var = var.reshape(-1, old_zn)
if old_z.ndim == ndim:
old_z = np.rollaxis(old_z, zdim, ndim).reshape(-1, old_zn)
f = interpolate.interp1d(old_z, var, axis=-1, kind='linear',
bounds_error=bounds_error)
out = f(new_z)
elif old_z.ndim == 1:
f = interpolate.interp1d(old_z, var, kind='linear',
bounds_error=bounds_error)
out = f(new_z)
# reroll lon dim axis to original dim
out = out.reshape(new_shape)
out = np.rollaxis(out, ndim - 1, zdim)
return out
def _grid_smooth_bes(x):
"""
Bessel function. (copied from RIP)
:param x: float number
:return: bessel function value.
"""
rint = 0.0
for i in range(1000):
u = i * 0.001 - 0.0005
rint = rint + np.sqrt(1 - u*u) * np.cos(x*u)*0.001
return 2.0 * x * rint / (4.0 * np.arctan(1.0))
def grid_smooth(field, radius=6, method='CRES', **kwargs):
"""
Perform grid field smooth filter.
refer to
https://github.com/Unidata/IDV/blob/master/src/ucar/unidata/data/grid/GridUtil.java
* Apply a weigthed smoothing function to the grid.
The smoothing types are:
SMOOTH_CRESSMAN: the smoothed value is given by a weighted average of
values at surrounding grid points. The weighting function is the
Cressman weighting function:
w = ( D**2 - d**2 ) / ( D**2 + d**2 )
In the above, d is the distance (in grid increments) of the neighboring
point to the smoothing point, and D is the radius of influence
[in grid increments]
SMOOTH_CIRCULAR: the weighting function is the circular apperture
diffraction function (following a suggestion of Barnes et al. 1996):
w = bessel(3.8317*d/D)/(3.8317*d/D)
SMOOTH_RECTANGULAR: the weighting function is the product of the
rectangular aperture diffraction function in the x and y
directions (the function used in Barnes et al. 1996):
w = [sin(pi*x/D)/(pi*x/D)]*[sin(pi*y/D)/(pi*y/D)]
Adapted from smooth.f written by <NAME> in his RIP package
:param field: 2D array variable.
:param radius: if type is CRES, CIRC or RECT, radius of window
in grid units (in grid increments)
if type is GWFS, radius is the standard deviation
of gaussian function, larger for smoother
:param method: string value, smooth type:
SM9S, 9-point smoother
GWFS, Gaussian smoother
CRES, Cressman smoother, default
CIRC, Barnes circular apperture diffraction function
RECT, Barnes rectangular apperture diffraction function
:param kwargs: parameters for scipy.ndimage.filters.convolve function.
:return: 2D array like smoothed field.
"""
# construct kernel
if method == 'SM9S':
kernel = [[0.3, 0.5, 0.3], [0.5, 1, 0.5], [0.3, 0.5, 0.3]]
elif method == 'GWFS':
return ndimage.filters.gaussian_filter(field, radius, **kwargs)
elif method == 'CRES':
width = np.int(np.ceil(radius)*2+1)
center = np.ceil(radius)
kernel = np.zeros((width, width))
for jj in range(width):
for ii in range(width):
x = ii - center
y = jj - center
d = np.sqrt(x*x + y*y)
if d > radius:
continue
kernel[jj, ii] = (radius*radius - d*d)/(radius*radius + d*d)
elif method == 'CIRC':
width = np.int(np.ceil(radius) * 2 + 1)
center = np.ceil(radius)
kernel = np.zeros((width, width))
for jj in range(width):
for ii in range(width):
x = ii - center
y = jj - center
d = np.sqrt(x * x + y * y)
if d > radius:
continue
if d == 0.:
kernel[jj, ii] = 0.5
else:
kernel[jj, ii] = _grid_smooth_bes(
3.8317*d/radius)/(3.8317*d/radius)
elif method == 'RECT':
width = np.int(np.ceil(radius) * 2 + 1)
center = np.ceil(radius)
kernel = np.zeros((width, width))
for jj in range(width):
for ii in range(width):
x = ii - center
y = jj - center
d = np.sqrt(x * x + y * y)
if d > radius:
continue
kernel[jj, ii] = (np.sin(PI*x/radius)/(PI*x/radius)) * \
(np.sin(PI*y/radius)/(PI*y/radius))
else:
return field
# return smoothed field
return ndimage.filters.convolve(field, kernel, **kwargs)
@jit
def grid_smooth_area_average(in_field, lon, lat, radius=400.e3):
"""
Smoothing grid field with circle area average.
:param in_field: 2D or multiple dimension array grid field,
the rightest dimension [..., lat, lon].
:param lon: 1D array longitude.
:param lat: 1D array latitude.
:param radius: smooth radius, [m]
:return: smoothed grid field.
"""
# set constants
deg_to_rad = np.arctan(1.0)/45.0
earth_radius = 6371000.0
# reshape field to 3d array
old_shape = in_field.shape
if np.ndim(in_field) == 2:
ndim = 1
else:
ndim = np.product(old_shape[0:-2])
field = in_field.reshape(ndim, *old_shape[-2:])
# grid coordinates
x, y = np.meshgrid(lon, lat)
# define output field
out_field = np.full_like(field, np.nan)
# loop every grid point
lat1 = np.cos(lat * deg_to_rad)
lat2 = np.cos(y * deg_to_rad)
for j in range(lat.size):
dlat = (y - lat[j]) * deg_to_rad
a1 = (np.sin(dlat/2.0))**2
b1 = lat1[j] * lat2
for i in range(lon.size):
# great circle distance
dlon = (x - lon[i]) * deg_to_rad
a = np.sqrt(a1+b1*(np.sin(dlon/2.0))**2)
dist = earth_radius * 2.0 * np.arcsin(a)
dist = dist <= radius
# compute average
if np.any(dist):
for k in range(ndim):
temp = field[k, :, :]
out_field[k, j, i] = np.mean(temp[dist])
# return smoothed field
return out_field.reshape(old_shape)
def grid_subset(lon, lat, bound):
"""
Get the upper and lower bound of a grid subset.
:param lon: 1D array, longitude.
:param lat: 1D array, latitude.
:param bound: subset boundary, [lonmin, lonmax, latmin, latmax]
:return: subset boundary index.
"""
# latitude lower and upper index
latli = np.argmin(np.abs(lat - bound[2]))
latui = np.argmin(np.abs(lat - bound[3]))
# longitude lower and upper index
lonli = np.argmin(np.abs(lon - bound[0]))
lonui = np.argmin(np.abs(lon - bound[1]))
# return subset boundary index
return lonli, lonui+1, latli, latui+1
def vertical_cross(in_field, lon, lat, line_points, npts=100):
"""
Interpolate 2D or multiple dimensional grid data to vertical cross section.
:param in_field: 2D or multiple dimensional grid data,
the rightest dimension [..., lat, lon].
:param lon: grid data longitude.
:param lat: grid data latitude.
:param line_points: cross section line points,
should be [n_points, 2] array.
:param npts: the point number of great circle line.
:return: cross section [..., n_points], points
"""
if np.ndim(in_field) < 2:
raise ValueError("in_field must be at least 2 dimension")
# reshape field to 3d array
old_shape = in_field.shape
if np.ndim(in_field) == 2:
field = in_field.reshape(1, *old_shape)
else:
field = in_field.reshape(np.product(old_shape[0:-2]), *old_shape[-2:])
# get great circle points
points = None
n_line_points = line_points.shape[0]
geod = Geod("+ellps=WGS84")
for i in range(n_line_points-1):
seg_points = geod.npts(
lon1=line_points[i, 0], lat1=line_points[i, 1],
lon2=line_points[i+1, 0], lat2=line_points[i+1, 1], npts=npts)
if points is None:
points = np.array(seg_points)
else:
points = np.vstack((points, np.array(seg_points)))
# convert to pixel coordinates
x = np.interp(points[:, 0], lon, np.arange(len(lon)))
y = np.interp(points[:, 1], lat, np.arange(len(lat)))
# loop every level
zdata = []
for i in range(field.shape[0]):
zdata.append(
ndimage.map_coordinates(np.transpose(field[i, :, :]),
np.vstack((x, y))))
# reshape zdata
zdata = np.array(zdata)
if np.ndim(in_field) > 2:
zdata = zdata.reshape(np.append(old_shape[0:-2], points.shape[0]))
# return vertical cross section
return zdata, points
def interpolate1d(x, z, points, mode='linear', bounds_error=False):
"""
1D interpolation routine.
:param x: 1D array of x-coordinates on which to interpolate
:param z: 1D array of values for each x
:param points: 1D array of coordinates where interpolated values are sought
:param mode: Determines the interpolation order. Options are
'constant' - piecewise constant nearest neighbour interpolation
'linear' - bilinear interpolation using the two
nearest neighbours (default)
:param bounds_error: Boolean flag. If True (default) an exception will
be raised when interpolated values are requested
outside the domain of the input data. If False, nan
is returned for those values
:return: 1D array with same length as points with interpolated values
:Notes:
Input coordinates x are assumed to be monotonically increasing,
but need not be equidistantly spaced.
z is assumed to have dimension M where M = len(x).
"""
# Check inputs
#
# make sure input vectors are numpy array
x = np.array(x)
# Input vectors should be monotoneously increasing.
if (not np.min(x) == x[0]) and (not max(x) == x[-1]):
raise Exception('Input vector x must be monotoneously increasing.')
# Input array Z's dimensions
z = np.array(z)
if not len(x) == len(z):
raise Exception('Input array z must have same length as x')
# Get interpolation points
in_points = np.array(points)
xi = in_points[:]
# Check boundary
if bounds_error:
if np.min(xi) < x[0] or np.max(xi) > x[-1]:
raise Exception('Interpolation points was out of the domain.')
# Identify elements that are outside interpolation domain or NaN
outside = (xi < x[0]) + (xi > x[-1])
outside += np.isnan(xi)
inside = -outside
xi = xi[inside]
# Find upper neighbours for each interpolation point
idx = np.searchsorted(x, xi, side='left')
# Internal check (index == 0 is OK)
msg = 'Interpolation point outside domain. This should never happen.'
if len(idx) > 0:
if not max(idx) < len(x):
raise RuntimeError(msg)
# Get the two neighbours for each interpolation point
x0 = x[idx - 1]
x1 = x[idx]
z0 = z[idx - 1]
z1 = z[idx]
# Coefficient for weighting between lower and upper bounds
alpha = (xi - x0) / (x1 - x0)
if mode == 'linear':
# Bilinear interpolation formula
dx = z1 - z0
zeta = z0 + alpha * dx
else:
# Piecewise constant (as verified in input_check)
# Set up masks for the quadrants
left = alpha < 0.5
# Initialise result array with all elements set to right neighbour
zeta = z1
# Then set the left neighbours
zeta[left] = z0[left]
# Self test
if len(zeta) > 0:
mzeta = np.nanmax(zeta)
mz = np.nanmax(z)
msg = ('Internal check failed. Max interpolated value %.15f '
'exceeds max grid value %.15f ' % (mzeta, mz))
if not (np.isnan(mzeta) or np.isnan(mz)):
if not mzeta <= mz:
raise RuntimeError(msg)
# Populate result with interpolated values for points inside domain
# and NaN for values outside
r = np.zeros(len(points))
r[inside] = zeta
r[outside] = np.nan
return r
def interpolate2d(x, y, Z, points, mode='linear', bounds_error=False):
"""
Interpolating from 2D field to points.
Refer to
https://github.com/inasafe/python-safe/blob/master/safe/engine/interpolation2d.py.
* provides piecewise constant (nearest neighbour) and
* bilinear interpolation is fast (based on numpy vector operations)
* depends only on numpy
* guarantees that interpolated values never exceed the four nearest
* neighbours handles missing values in domain sensibly using NaN
* is unit tested with a range of common and corner cases
:param x: 1D array of x-coordinates of the mesh on which to interpolate
:param y: 1D array of y-coordinates of the mesh on which to interpolate
:param Z: 2D array of values for each x, y pair
:param points: Nx2 array of coordinates where interpolated values
are sought
:param mode: Determines the interpolation order. Options are
'constant' - piecewise constant nearest neighbour interpolation
'linear' - bilinear interpolation using the four nearest
neighbours (default)
:param bounds_error: Boolean flag. If True (default) an exception will
be raised when interpolated values are requested
outside the domain of the input data. If False, nan
is returned for those values.
:return: 1D array with same length as points with interpolated values
:Notes:
Input coordinates x and y are assumed to be monotonically increasing,
but need not be equidistantly spaced.
Z is assumed to have dimension M x N, where M = len(x) and N = len(y).
In other words it is assumed that the x values follow the first
(vertical) axis downwards and y values the second (horizontal) axis
from left to right.
2D bilinear interpolation aims at obtaining an interpolated value z at
a point (x,y) which lies inside a square formed by points (x0, y0),
(x1, y0), (x0, y1) and (x1, y1) for which values z00, z10, z01 and
z11 are known.
This obtained be first applying equation (1) twice in in the
x-direction to obtain interpolated points q0 and q1 for (x, y0)
and (x, y1), respectively.
q0 = alpha*z10 + (1-alpha)*z00 (2)
and
q1 = alpha*z11 + (1-alpha)*z01 (3)
Then using equation (1) in the y-direction on the results from
(2) and (3)
z = beta*q1 + (1-beta)*q0 (4)
where beta = (y-y0)/(y1-y0) (4a)
Substituting (2) and (3) into (4) yields
z = alpha*beta*z11 + beta*z01 - alpha*beta*z01 +
alpha*z10 + z00 - alpha*z00 - alpha*beta*z10 - beta*z00 +
alpha*beta*z00
= alpha*beta*(z11 - z01 - z10 + z00) +
alpha*(z10 - z00) + beta*(z01 - z00) + z00
which can be further simplified to
z = alpha*beta*(z11 - dx - dy - z00) + alpha*dx + beta*dy + z00 (5)
where
dx = z10 - z00
dy = z01 - z00
Equation (5) is what is implemented in the function
interpolate2d above.
"""
# Check inputs
#
# make sure input vectors are numpy array
x = np.array(x)
y = np.array(y)
# Input vectors should be monotoneously increasing.
if (not np.min(x) == x[0]) and (not max(x) == x[-1]):
raise Exception('Input vector x must be monotoneously increasing.')
if (not np.min(y) == y[0]) and (not max(y) == y[-1]):
raise Exception('Input vector y must be monotoneously increasing.')
# Input array Z's dimensions
Z = np.array(Z)
m, n = Z.shape
if not(len(x) == m and len(y) == n):
raise Exception(
'Input array Z must have dimensions corresponding to the '
'lengths of the input coordinates x and y')
# Get interpolation points
in_points = np.array(points)
xi = in_points[:, 0]
eta = in_points[:, 1]
# Check boundary
if bounds_error:
if np.min(xi) < x[0] or np.max(xi) > x[-1] or \
np.min(eta) < y[0] or np.max(eta) > y[-1]:
raise RuntimeError('Interpolation points was out of the domain.')
# Identify elements that are outside interpolation domain or NaN
outside = (xi < x[0]) + (eta < y[0]) + (xi > x[-1]) + (eta > y[-1])
outside += np.isnan(xi) + np.isnan(eta)
inside = -outside
xi = xi[inside]
eta = eta[inside]
# Find upper neighbours for each interpolation point
idx = np.searchsorted(x, xi, side='left')
idy = np.searchsorted(y, eta, side='left')
# Internal check (index == 0 is OK)
msg = 'Interpolation point outside domain. This should never happen.'
if len(idx) > 0:
if not max(idx) < len(x):
raise RuntimeError(msg)
if len(idy) > 0:
if not max(idy) < len(y):
raise RuntimeError(msg)
# Get the four neighbours for each interpolation point
x0 = x[idx - 1]
x1 = x[idx]
y0 = y[idy - 1]
y1 = y[idy]
z00 = Z[idx - 1, idy - 1]
z01 = Z[idx - 1, idy]
z10 = Z[idx, idy - 1]
z11 = Z[idx, idy]
# Coefficients for weighting between lower and upper bounds
oldset = np.seterr(invalid='ignore') # Suppress warnings
alpha = (xi - x0) / (x1 - x0)
beta = (eta - y0) / (y1 - y0)
np.seterr(**oldset) # Restore
if mode == 'linear':
# Bilinear interpolation formula
dx = z10 - z00
dy = z01 - z00
z = z00 + alpha * dx + beta * dy + alpha * beta * (z11 - dx - dy - z00)
else:
# Piecewise constant (as verified in input_check)
# Set up masks for the quadrants
left = alpha < 0.5
right = -left
lower = beta < 0.5
upper = -lower
lower_left = lower * left
lower_right = lower * right
upper_left = upper * left
# Initialise result array with all elements set to upper right
z = z11
# Then set the other quadrants
z[lower_left] = z00[lower_left]
z[lower_right] = z10[lower_right]
z[upper_left] = z01[upper_left]
# Self test
if len(z) > 0:
mz = | np.nanmax(z) | numpy.nanmax |
# -*- coding: utf-8 -*-
from ungol.index import index as uii
from ungol.similarity import stats
from ungol.similarity import rhwmd as _rhwmd
import numpy as np
Strategy = _rhwmd.Strategy
def _get_docs(db: uii.Index, s_doc1: str, s_doc2: str):
assert s_doc1 in db.mapping, f'"{s_doc1}" not in database'
assert s_doc2 in db.mapping, f'"{s_doc2}" not in database'
return db.mapping[s_doc1], db.mapping[s_doc2]
# --- DISTANCE SCHEMES
#
#
# HR-WMD |----------------------------------------
#
#
#
# TODO: speech about what I learned about ripping apart the calculation.
# Notes:
# - prefixes: s_* for str, a_* for np.array, n_ for scalars
#
def _rhwmd_similarity(
index: uii.Index,
doc1: uii.Doc, doc2: uii.Doc,
verbose: bool) -> float:
# ----------------------------------------
# this is the important part
#
a_sims, a_idxs = _rhwmd.retrieve_nn(doc1, doc2)
# phony
# a_sims1 = np.ones(doc1_idxs.shape[0])
# a_sims2 = np.ones(doc2_idxs.shape[0])
# --- COMMON OOV
common_unknown = doc1.unknown.keys() & doc2.unknown.keys()
# U = len(common_unknown)
U = 0
a_unknown = np.ones(U)
# --- IDF
def idf(doc) -> np.array:
a_df = np.hstack((a_unknown, np.array(doc.docfreqs)))
N = len(index.mapping) # FIXME add unknown tokens
a_idf = np.log(N / a_df)
return a_idf
# --- idf combinations
a_idf_doc1 = idf(doc1)
a_idf_doc2 = idf(doc2)
a_idf_nn1 = a_idf_doc2[a_idxs[0]]
a_idf_nn2 = a_idf_doc1[a_idxs[1]]
# a_idf1 = a_idf_doc1 + a_idf_nn1
# a_idf2 = a_idf_doc2 + a_idf_nn2
# --- query idf
a_idf1 = a_idf_doc1
a_idf2 = a_idf_doc2
# --- phony
# a_idf1 = np.ones(len(doc1)) / len(doc1)
# a_idf2 = np.ones(len(doc2)) / len(doc2)
# --- WEIGHTING
boost = 1
def weighted(a_sims, a_idf):
a = np.hstack((np.ones(U), a_sims)) * a_idf
s1, s2 = a[:U].sum(), a[U:].sum()
return a, boost * s1 + s2
a_idf1_norm = a_idf1 / a_idf1.sum()
a_idf2_norm = a_idf2 / a_idf2.sum()
a_weighted_doc1, n_score1 = weighted(a_sims[0], a_idf1_norm)
a_weighted_doc2, n_score2 = weighted(a_sims[1], a_idf2_norm)
# assert 0 <= n_sim_weighted_doc1 and n_sim_weighted_doc1 <= 1
# assert 0 <= n_sim_weighted_doc2 and n_sim_weighted_doc2 <= 1
#
# the important part ends here
# ----------------------------------------
if not verbose:
return n_score1, n_score2, None
# ---
# create data object for the scorer to explain itself.
# the final score is set by the caller.
scoredata = stats.ScoreData(
name='rhwmd', score=None, docs=(doc1, doc2),
common_unknown=common_unknown)
# ---
scoredata.add_local_row('score', n_score1, n_score2)
# ---
scoredata.add_local_column(
'token',
np.array(doc1.tokens),
np.array(doc2.tokens), )
scoredata.add_local_column(
'nn',
np.array(doc2.tokens)[a_idxs[0]],
np.array(doc1.tokens)[a_idxs[1]], )
scoredata.add_local_column('sim', *a_sims)
scoredata.add_local_column('tf(token)', doc1.freq, doc2.freq)
scoredata.add_local_column('idf(token)', a_idf_doc1, a_idf_doc2)
scoredata.add_local_column('idf(nn)', a_idf_nn1, a_idf_nn2)
scoredata.add_local_column('idf', a_idf1, a_idf2)
scoredata.add_local_column('weight', a_weighted_doc1, a_weighted_doc2)
return n_score1, n_score2, scoredata
def rhwmd(index: uii.Index, s_doc1: str, s_doc2: str,
strategy: Strategy = Strategy.ADAPTIVE_SMALL,
verbose: bool = False):
doc1, doc2 = _get_docs(index, s_doc1, s_doc2)
score1, score2, scoredata = _rhwmd_similarity(index, doc1, doc2, verbose)
# select score based on a strategy
if strategy is Strategy.MIN:
score = min(score1, score2)
elif strategy is Strategy.MAX:
score = max(score1, score2)
elif strategy is Strategy.ADAPTIVE_SMALL:
score = score1 if len(doc1) < len(doc2) else score2
elif strategy is Strategy.ADAPTIVE_BIG:
score = score2 if len(doc1) < len(doc2) else score1
elif strategy is Strategy.SUM:
score = score1 + score2
else:
assert False, f'unknown strategy: "{strategy}"'
if scoredata is not None:
scoredata.score = score
scoredata.add_global_row('strategy', strategy.name)
return scoredata if verbose else score
#
#
# OKAPI BM25 |----------------------------------------
#
#
def _bm25_normalization(a_tf, n_len: int, k1: float, b: float):
# calculate numerator
a_num = (k1 + 1) * a_tf
# calculate denominator
a_den = k1 * ((1-b) + b * n_len) + a_tf
return a_num / a_den
def bm25(index: uii.Index, s_doc1: str, s_doc2: str,
k1=1.56, b=0.45, verbose: bool = False):
doc1, doc2 = _get_docs(index, s_doc1, s_doc2)
ref = index.ref
# gather common tokens
common = set(doc1.tokens) & set(doc2.tokens)
if not len(common):
return 0 if not verbose else stats.ScoreData(
name='bm25', score=0, docs=(doc1, doc2))
# get code indexes
a_common_idx = np.array([ref.vocabulary[t] for t in common])
# find corresponding document frequencies
a_df = np.array([ref.docfreqs[idx] for idx in a_common_idx])
# calculate idf value
a_idf = np.log(len(index.mapping) / a_df)
# find corresponding token counts
# note: a[:, None] == np.array([a]).T
a_tf = doc2.cnt[np.nonzero(a_common_idx[:, None] == doc2.idx)[1]]
assert len(a_tf) == len(common)
n_len = len(doc2) / index.avg_doclen
a_norm = _bm25_normalization(a_tf, n_len, k1, b)
# weight each idf value
a_res = a_idf * a_norm
score = a_res.sum()
if not verbose:
return score
# ---
scoredata = stats.ScoreData(name='bm25', score=score, docs=(doc1, doc2), )
# to preserve order
a_common_words = | np.array([ref.lookup[idx] for idx in a_common_idx]) | numpy.array |
"""
Kernel topic model with Gibbs Sampler
=====================================
Reference: Hennig et al., 2012 & Murphy's MLPP book Ch. 27
"""
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor as GPR
from sklearn.gaussian_process.kernels import RBF
from tqdm import tqdm, trange
np.random.seed(1)
# Words
W = np.array([0, 1, 2, 3, 4])
# D := document words
X = np.array([
[0, 0, 1, 2, 2],
[0, 0, 1, 1, 1],
[0, 1, 2, 2, 2],
[4, 4, 4, 4, 4],
[3, 3, 4, 4, 4],
[3, 4, 4, 4, 4]
])
N_D = X.shape[0] # num of docs
N_W = W.shape[0] # num of words
N_K = 2 # num of topics
N_F = 3 # num of features
# Document features
Phi = np.random.randn(N_D, N_F)
# Dirichlet priors
alpha = 1
beta = 1
# k independent GP priors
ls = 1 # length-scale for RBF kernel
tau = 1 # Observation noise variance
kernel = RBF([ls]*N_F)
GPRs = []
for k in range(N_K):
GPR_k = GPR(kernel=kernel, alpha=tau)
GPR_k = GPR_k.fit(Phi, np.zeros(N_D))
GPRs.append(GPR_k)
# -------------------------------------------------------------------------------------
# Laplace bridge
# -------------------------------------------------------------------------------------
def gauss2dir(mu, Sigma):
K = len(mu)
Sigma_diag = np.diag(Sigma)
alpha = 1/Sigma_diag * (1 - 2/K + np.exp(mu)/K**2 * np.sum(np.exp(-mu)))
return alpha
def dir2gauss(alpha):
K = len(alpha)
mu = np.log(alpha) - 1/K*np.sum( | np.log(alpha) | numpy.log |
import torch
import torch.utils.data as data
from PIL import Image
from spatial_transforms import *
from temporal_transforms import *
import os
import math
import functools
import json
import copy
from numpy.random import randint
import numpy as np
import random
from utils import load_value_file
import pdb
def pil_loader(path, modality):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
#print(path)
with Image.open(f) as img:
if modality == 'RGB':
return img.convert('RGB')
elif modality == 'Depth':
return img.convert('L') # 8-bit pixels, black and white check from https://pillow.readthedocs.io/en/3.0.x/handbook/concepts.html
def accimage_loader(path, modality):
try:
import accimage
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def get_default_image_loader():
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader
else:
return pil_loader
def video_loader(video_dir_path, frame_indices, modality, sample_duration, image_loader):
video = []
if modality == 'RGB':
for i in frame_indices:
image_path = os.path.join(video_dir_path, '{:05d}.jpg'.format(i))
if os.path.exists(image_path):
video.append(image_loader(image_path, modality))
else:
print(image_path, "------- Does not exist")
return video
elif modality == 'Depth':
for i in frame_indices:
image_path = os.path.join(video_dir_path.replace('color','depth'), '{:05d}.jpg'.format(i) )
if os.path.exists(image_path):
video.append(image_loader(image_path, modality))
else:
print(image_path, "------- Does not exist")
return video
elif modality == 'RGB-D':
for i in frame_indices:
image_path = os.path.join(video_dir_path, '{:05d}.jpg'.format(i))
image_path_depth = os.path.join(video_dir_path.replace('color','depth'), '{:05d}.jpg'.format(i) )
image = image_loader(image_path, 'RGB')
image_depth = image_loader(image_path_depth, 'Depth')
if os.path.exists(image_path):
video.append(image)
video.append(image_depth)
else:
print(image_path, "------- Does not exist")
return video
return video
def get_default_video_loader():
image_loader = get_default_image_loader()
return functools.partial(video_loader, image_loader=image_loader)
def load_annotation_data(data_file_path):
with open(data_file_path, 'r') as data_file:
return json.load(data_file)
def get_class_labels(data):
class_labels_map = {}
index = 0
for class_label in data['labels']:
class_labels_map[class_label] = index
index += 1
return class_labels_map
def get_annotation(data, whole_path):
annotation = []
for key, value in data['database'].items():
if key.split('^')[0] == whole_path:
annotation.append(value['annotations'])
return annotation
def make_dataset( annotation_path, video_path , whole_path,sample_duration, n_samples_for_each_video, stride_len):
data = load_annotation_data(annotation_path)
whole_video_path = os.path.join(video_path,whole_path)
annotation = get_annotation(data, whole_path)
class_to_idx = get_class_labels(data)
idx_to_class = {}
for name, label in class_to_idx.items():
idx_to_class[label] = name
dataset = []
print("[INFO]: Videot is loading...")
import glob
n_frames = len(glob.glob(whole_video_path + '/*.jpg'))
if not os.path.exists(whole_video_path):
print(whole_video_path , " does not exist")
label_list = []
for i in range(len(annotation)):
begin_t = int(annotation[i]['start_frame'])
end_t = int(annotation[i]['end_frame'])
for j in range(begin_t,end_t+1):
label_list.append(class_to_idx[annotation[i]['label']])
label_list = | np.array(label_list) | numpy.array |
# -*- coding: utf-8 -*-
"""
TODO: Please check readme.txt file first!
--
This Python2.7 program is to reproduce Table 2, 3, 4, and 5.
"""
import os
import sys
import pickle
import numpy as np
import multiprocessing
from itertools import product
from numpy.random import randint
try:
import sparse_module
try:
from sparse_module import wrap_head_tail_bisearch
except ImportError:
print('cannot find wrap_head_tail_bisearch method in sparse_module')
sparse_module = None
exit(0)
except ImportError:
print('\n'.join([
'cannot find the module: sparse_module',
'try run: \'python setup.py build_ext --inplace\' first! ']))
def expit(x):
"""
expit function. 1 /(1+exp(-x)). quote from Scipy:
The expit function, also known as the logistic function,
is defined as expit(x) = 1/(1+exp(-x)).
It is the inverse of the logit function.
expit is also known as logistic. Please see logistic
:param x: np.ndarray
:return: 1/(1+exp(-x)).
"""
out = np.zeros_like(x)
posi = np.where(x > 0.0)
nega = np.where(x <= 0.0)
out[posi] = 1. / (1. + np.exp(-x[posi]))
exp_x = np.exp(x[nega])
out[nega] = exp_x / (1. + exp_x)
return out
def logistic_predict(x, wt):
"""
To predict the probability for sample xi. {+1,-1}
:param x: (n,p) dimension, where p is the number of features.
:param wt: (p+1,) dimension, where wt[p] is the intercept.
:return: (n,1) dimension of predict probability of positive class
and labels.
"""
n, p = x.shape
pred_prob = expit(np.dot(x, wt[:p]) + wt[p])
pred_y = np.ones(n)
pred_y[pred_prob < 0.5] = -1.
return pred_prob, pred_y
def log_logistic(x):
""" return log( 1/(1+exp(-x)) )"""
out = np.zeros_like(x)
posi = np.where(x > 0.0)
nega = np.where(x <= 0.0)
out[posi] = -np.log(1. + np.exp(-x[posi]))
out[nega] = x[nega] - np.log(1. + np.exp(x[nega]))
return out
def logit_loss_grad_bl(x_tr, y_tr, wt, l2_reg, cp, cn):
"""
Calculate the balanced loss and gradient of the logistic function.
:param x_tr: (n,p), where p is the number of features.
:param y_tr: (n,), where n is the number of labels.
:param wt: current model. wt[-1] is the intercept.
:param l2_reg: regularization to avoid overfitting.
:param cp:
:param cn:
:return: {+1,-1} Logistic (val,grad) on training samples.
"""
assert len(wt) == (x_tr.shape[1] + 1)
c, n, p = wt[-1], x_tr.shape[0], x_tr.shape[1]
posi_idx = np.where(y_tr > 0) # corresponding to positive labels.
nega_idx = np.where(y_tr < 0) # corresponding to negative labels.
grad = np.zeros_like(wt)
wt = wt[:p]
yz = y_tr * (np.dot(x_tr, wt) + c)
z = expit(yz)
loss = -cp * np.sum(log_logistic(yz[posi_idx]))
loss += -cn * np.sum(log_logistic(yz[nega_idx]))
loss = loss / n + .5 * l2_reg * np.dot(wt, wt)
bl_y_tr = np.zeros_like(y_tr)
bl_y_tr[posi_idx] = cp * np.asarray(y_tr[posi_idx], dtype=float)
bl_y_tr[nega_idx] = cn * np.asarray(y_tr[nega_idx], dtype=float)
z0 = (z - 1) * bl_y_tr # z0 = (z - 1) * y_tr
grad[:p] = np.dot(x_tr.T, z0) / n + l2_reg * wt
grad[-1] = z0.sum() # do not need to regularize the intercept.
return loss, grad
def logit_loss_bl(x_tr, y_tr, wt, l2_reg, cp, cn):
"""
Calculate the balanced loss and gradient of the logistic function.
:param x_tr: (n,p), where p is the number of features.
:param y_tr: (n,), where n is the number of labels.
:param wt: current model. wt[-1] is the intercept.
:param l2_reg: regularization to avoid overfitting.
:param cp:
:param cn:
:return: return {+1,-1} Logistic (val,grad) on training samples.
"""
assert len(wt) == (x_tr.shape[1] + 1)
c, n, p = wt[-1], x_tr.shape[0], x_tr.shape[1]
posi_idx = np.where(y_tr > 0) # corresponding to positive labels.
nega_idx = np.where(y_tr < 0) # corresponding to negative labels.
wt = wt[:p]
yz = y_tr * (np.dot(x_tr, wt) + c)
loss = -cp * np.sum(log_logistic(yz[posi_idx]))
loss += -cn * np.sum(log_logistic(yz[nega_idx]))
loss = loss / n + .5 * l2_reg * np.dot(wt, wt)
return loss
def algo_head_tail_bisearch(
edges, x, costs, g, root, s_low, s_high, max_num_iter, verbose):
""" This is the wrapper of head/tail-projection proposed in [2].
:param edges: edges in the graph.
:param x: projection vector x.
:param costs: edge costs in the graph.
:param g: the number of connected components.
:param root: root of subgraph. Usually, set to -1: no root.
:param s_low: the lower bound of the sparsity.
:param s_high: the upper bound of the sparsity.
:param max_num_iter: the maximum number of iterations used in
binary search procedure.
:param verbose: print out some information.
:return: 1. the support of the projected vector
2. the projected vector
"""
prizes = x * x
# to avoid too large upper bound problem.
if s_high >= len(prizes) - 1:
s_high = len(prizes) - 1
re_nodes = wrap_head_tail_bisearch(
edges, prizes, costs, g, root, s_low, s_high, max_num_iter, verbose)
proj_w = np.zeros_like(x)
proj_w[re_nodes[0]] = x[re_nodes[0]]
return re_nodes[0], proj_w
def algo_graph_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s, edges, costs, num_blocks, lambda_,
g=1, root=-1, gamma=0.1, proj_max_num_iter=50, verbose=0):
np.random.seed() # do not forget it.
w_hat = np.copy(w0)
(m, p) = x_tr.shape
# if the block size is too large. just use single block
b = int(m) / int(num_blocks)
np_ = np.sum(y_tr == 1)
nn_ = np.sum(y_tr == -1)
cp = float(nn_) / float(len(y_tr))
cn = float(np_) / float(len(y_tr))
# graph projection para
h_low = int((len(w_hat) - 1) / 2)
h_high = int(h_low * (1. + gamma))
t_low = int(s)
t_high = int(s * (1. + gamma))
for epoch_i in range(max_epochs):
for ind, _ in enumerate(range(num_blocks)):
ii = randint(0, num_blocks)
block = range(b * ii, b * (ii + 1))
x_tr_b, y_tr_b = x_tr[block, :], y_tr[block]
loss_sto, grad_sto = logit_loss_grad_bl(
x_tr=x_tr_b, y_tr=y_tr_b, wt=w_hat,
l2_reg=lambda_, cp=cp, cn=cn)
# edges, x, costs, g, root, s_low, s_high, max_num_iter, verbose
h_nodes, p_grad = algo_head_tail_bisearch(
edges, grad_sto[:p], costs, g, root, h_low, h_high,
proj_max_num_iter, verbose)
p_grad = np.append(p_grad, grad_sto[-1])
fun_val_right = loss_sto
tmp_num_iter, ad_step, beta = 0, 1.0, 0.8
reg_term = np.linalg.norm(p_grad) ** 2.
while tmp_num_iter < 20:
x_tmp = w_hat - ad_step * p_grad
fun_val_left = logit_loss_bl(
x_tr=x_tr_b, y_tr=y_tr_b, wt=x_tmp,
l2_reg=lambda_, cp=cp, cn=cn)
if fun_val_left > fun_val_right - ad_step / 2. * reg_term:
ad_step *= beta
else:
break
tmp_num_iter += 1
bt_sto = np.zeros_like(w_hat)
bt_sto[:p] = w_hat[:p] - ad_step * p_grad[:p]
t_nodes, proj_bt = algo_head_tail_bisearch(
edges, bt_sto[:p], costs, g, root, t_low, t_high,
proj_max_num_iter, verbose)
w_hat[:p] = proj_bt[:p]
w_hat[p] = w_hat[p] - ad_step * grad_sto[p] # intercept.
return w_hat
def algo_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s, num_blocks, lambda_):
np.random.seed() # do not forget it.
w_hat = w0
(m, p) = x_tr.shape
b = int(m) / int(num_blocks)
np_ = np.sum(y_tr == 1)
nn_ = np.sum(y_tr == -1)
cp = float(nn_) / float(len(y_tr))
cn = float(np_) / float(len(y_tr))
for epoch_i in range(max_epochs):
for ind, _ in enumerate(range(num_blocks)):
ii = randint(0, num_blocks)
block = range(b * ii, b * (ii + 1))
x_tr_b, y_tr_b = x_tr[block, :], y_tr[block]
loss_sto, grad_sto = logit_loss_grad_bl(
x_tr=x_tr_b, y_tr=y_tr_b, wt=w_hat,
l2_reg=lambda_, cp=cp, cn=cn)
fun_val_right = loss_sto
tmp_num_iter, ad_step, beta = 0, 1.0, 0.8
reg_term = np.linalg.norm(grad_sto) ** 2.
while tmp_num_iter < 20:
x_tmp = w_hat - ad_step * grad_sto
fun_val_left = logit_loss_bl(
x_tr=x_tr_b, y_tr=y_tr_b, wt=x_tmp,
l2_reg=lambda_, cp=cp, cn=cn)
if fun_val_left > fun_val_right - ad_step / 2. * reg_term:
ad_step *= beta
else:
break
tmp_num_iter += 1
bt_sto = w_hat - ad_step * grad_sto
bt_sto[np.argsort(np.abs(bt_sto))[:p - s]] = 0.
w_hat = bt_sto
return w_hat
def run_single_test(para):
data, method_list, tr_idx, te_idx, s, num_blocks, lambda_, \
max_epochs, fold_i, subfold_i = para
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
res = {_: dict() for _ in method_list}
tr_data = dict()
tr_data['x'] = data['x'][tr_idx, :]
tr_data['y'] = data['y'][tr_idx]
te_data = dict()
te_data['x'] = data['x'][te_idx, :]
te_data['y'] = data['y'][te_idx]
x_tr, y_tr = tr_data['x'], tr_data['y']
w0 = np.zeros(np.shape(x_tr)[1] + 1)
# --------------------------------
# this corresponding (b=1) to IHT
w_hat = algo_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s, 1, lambda_)
x_te, y_te = te_data['x'], te_data['y']
pred_prob, pred_y = logistic_predict(x_te, w_hat)
posi_idx = np.nonzero(y_te == 1)[0]
nega_idx = np.nonzero(y_te == -1)[0]
print('-' * 80)
print('number of positive: %02d, missed: %02d '
'number of negative: %02d, missed: %02d ' %
(len(posi_idx), float(np.sum(pred_y[posi_idx] != 1)),
len(nega_idx), float(np.sum(pred_y[nega_idx] != -1))))
v1 = np.sum(pred_y[posi_idx] != 1) / float(len(posi_idx))
v2 = np.sum(pred_y[nega_idx] != -1) / float(len(nega_idx))
res['iht']['bacc'] = (v1 + v2) / 2.
res['iht']['acc'] = accuracy_score(y_true=y_te, y_pred=pred_y)
res['iht']['auc'] = roc_auc_score(y_true=y_te, y_score=pred_prob)
res['iht']['perf'] = res['iht']['bacc']
res['iht']['w_hat'] = w_hat
print('iht -- sparsity: %02d intercept: %.4f bacc: %.4f '
'non-zero: %.2f' %
(s, w_hat[-1], res['iht']['bacc'],
len(np.nonzero(w_hat)[0]) - 1))
# --------------------------------
w_hat = algo_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s, num_blocks, lambda_)
x_te, y_te = te_data['x'], te_data['y']
pred_prob, pred_y = logistic_predict(x_te, w_hat)
posi_idx = np.nonzero(y_te == 1)[0]
nega_idx = np.nonzero(y_te == -1)[0]
v1 = np.sum(pred_y[posi_idx] != 1) / float(len(posi_idx))
v2 = np.sum(pred_y[nega_idx] != -1) / float(len(nega_idx))
res['sto-iht']['bacc'] = (v1 + v2) / 2.
res['sto-iht']['acc'] = accuracy_score(y_true=y_te, y_pred=pred_y)
res['sto-iht']['auc'] = roc_auc_score(y_true=y_te, y_score=pred_prob)
res['sto-iht']['perf'] = res['sto-iht']['bacc']
res['sto-iht']['w_hat'] = w_hat
print('sto-iht -- sparsity: %02d intercept: %.4f bacc: %.4f '
'non-zero: %.2f' % (s, w_hat[-1], res['sto-iht']['bacc'],
len(np.nonzero(w_hat)[0]) - 1))
tr_data = dict()
tr_data['x'] = data['x'][tr_idx, :]
tr_data['y'] = data['y'][tr_idx]
te_data = dict()
te_data['x'] = data['x'][te_idx, :]
te_data['y'] = data['y'][te_idx]
x_tr, y_tr = tr_data['x'], tr_data['y']
w0 = np.zeros(np.shape(x_tr)[1] + 1)
# --------------------------------
# this corresponding (b=1) to GraphIHT
w_hat = algo_graph_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s,
data['edges'], data['costs'], 1, lambda_)
x_te, y_te = te_data['x'], te_data['y']
pred_prob, pred_y = logistic_predict(x_te, w_hat)
posi_idx = np.nonzero(y_te == 1)[0]
nega_idx = np.nonzero(y_te == -1)[0]
v1 = np.sum(pred_y[posi_idx] != 1) / float(len(posi_idx))
v2 = np.sum(pred_y[nega_idx] != -1) / float(len(nega_idx))
res['graph-iht']['bacc'] = (v1 + v2) / 2.
res['graph-iht']['acc'] = accuracy_score(y_true=y_te, y_pred=pred_y)
res['graph-iht']['auc'] = roc_auc_score(y_true=y_te, y_score=pred_prob)
res['graph-iht']['perf'] = res['graph-iht']['bacc']
res['graph-iht']['w_hat'] = w_hat
print('graph-iht -- sparsity: %02d intercept: %.4f bacc: %.4f '
'non-zero: %.2f' % (s, w_hat[-1], res['graph-iht']['bacc'],
len(np.nonzero(w_hat)[0]) - 1))
# --------------------------------
w_hat = algo_graph_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s,
data['edges'], data['costs'], num_blocks, lambda_)
x_te, y_te = te_data['x'], te_data['y']
pred_prob, pred_y = logistic_predict(x_te, w_hat)
posi_idx = np.nonzero(y_te == 1)[0]
nega_idx = np.nonzero(y_te == -1)[0]
v1 = np.sum(pred_y[posi_idx] != 1) / float(len(posi_idx))
v2 = np.sum(pred_y[nega_idx] != -1) / float(len(nega_idx))
res['graph-sto-iht']['bacc'] = (v1 + v2) / 2.
res['graph-sto-iht']['acc'] = accuracy_score(y_true=y_te, y_pred=pred_y)
res['graph-sto-iht']['auc'] = roc_auc_score(y_true=y_te, y_score=pred_prob)
res['graph-sto-iht']['perf'] = res['graph-sto-iht']['bacc']
res['graph-sto-iht']['w_hat'] = w_hat
print('graph-sto-iht -- sparsity: %02d intercept: %.4f bacc: %.4f '
'non-zero: %.2f' % (s, w_hat[-1], res['graph-sto-iht']['bacc'],
len(np.nonzero(w_hat)[0]) - 1))
return s, num_blocks, lambda_, res, fold_i, subfold_i
def run_parallel_tr(
data, method_list, s_list, b_list, lambda_list, max_epochs, num_cpus,
fold_i):
# 5-fold cross validation
s_auc = {_: {(s, num_blocks, lambda_): 0.0
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)} for _ in method_list}
s_acc = {_: {(s, num_blocks, lambda_): 0.0
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)} for _ in method_list}
s_bacc = {_: {(s, num_blocks, lambda_): 0.0
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)} for _ in method_list}
input_paras = []
for sf_ii in range(len(data['data_subsplits'][fold_i])):
s_tr = data['data_subsplits'][fold_i][sf_ii]['train']
s_te = data['data_subsplits'][fold_i][sf_ii]['test']
for s, num_block, lambda_ in product(s_list, b_list, lambda_list):
input_paras.append(
(data, method_list, s_tr, s_te, s, num_block, lambda_,
max_epochs, fold_i, sf_ii))
pool = multiprocessing.Pool(processes=num_cpus)
results_pool = pool.map(run_single_test, input_paras)
pool.close()
pool.join()
sub_res = dict()
for item in results_pool:
s, num_blocks, lambda_, re, fold_i, subfold_i = item
if subfold_i not in sub_res:
sub_res[subfold_i] = []
sub_res[subfold_i].append((s, num_blocks, lambda_, re))
for sf_ii in sub_res:
res = {_: dict() for _ in method_list}
for _ in method_list:
res[_]['s_list'] = s_list
res[_]['b_list'] = b_list
res[_]['lambda_list'] = lambda_list
res[_]['auc'] = dict()
res[_]['acc'] = dict()
res[_]['bacc'] = dict()
res[_]['perf'] = dict()
res[_]['w_hat'] = {(s, num_blocks, lambda_): None
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)}
for s, num_blocks, lambda_, re in sub_res[sf_ii]:
for _ in method_list:
res[_]['auc'][(s, num_blocks, lambda_)] = re[_]['auc']
res[_]['acc'][(s, num_blocks, lambda_)] = re[_]['acc']
res[_]['bacc'][(s, num_blocks, lambda_)] = re[_]['bacc']
res[_]['perf'][(s, num_blocks, lambda_)] = re[_]['perf']
res[_]['w_hat'][(s, num_blocks, lambda_)] = re[_]['w_hat']
for _ in method_list:
for (s, num_blocks, lambda_) in \
product(s_list, b_list, lambda_list):
key_para = (s, num_blocks, lambda_)
s_auc[_][key_para] += res[_]['auc'][key_para]
s_acc[_][key_para] += res[_]['acc'][key_para]
s_bacc[_][key_para] += res[_]['bacc'][key_para]
# tune by balanced accuracy
s_star = dict()
for _ in method_list:
s_star[_] = min(s_bacc[_], key=s_bacc[_].get)
best_para = s_star[_]
print('tr %15s fold_%2d s: %02d b: %03d lambda: %.4f bacc: %.4f' %
(_, fold_i, best_para[0], best_para[1], best_para[2],
s_bacc[_][best_para] / 5.0))
return s_star, s_bacc
def run_parallel_te(
data, method_list, tr_idx, te_idx, s_list, b_list,
lambda_list, max_epochs, num_cpus):
res = {_: dict() for _ in method_list}
for _ in method_list:
res[_]['s_list'] = s_list
res[_]['b_list'] = b_list
res[_]['lambda_list'] = lambda_list
res[_]['auc'] = dict()
res[_]['acc'] = dict()
res[_]['bacc'] = dict()
res[_]['perf'] = dict()
res[_]['w_hat'] = {(s, num_blocks, lambda_): None
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)}
input_paras = [(data, method_list, tr_idx, te_idx, s, num_block,
lambda_, max_epochs, '', '') for s, num_block, lambda_ in
product(s_list, b_list, lambda_list)]
pool = multiprocessing.Pool(processes=num_cpus)
results_pool = pool.map(run_single_test, input_paras)
pool.close()
pool.join()
for s, num_blocks, lambda_, re, fold_i, subfold_i in results_pool:
for _ in method_list:
res[_]['auc'][(s, num_blocks, lambda_)] = re[_]['auc']
res[_]['acc'][(s, num_blocks, lambda_)] = re[_]['acc']
res[_]['bacc'][(s, num_blocks, lambda_)] = re[_]['bacc']
res[_]['perf'][(s, num_blocks, lambda_)] = re[_]['perf']
res[_]['w_hat'][(s, num_blocks, lambda_)] = re[_]['w_hat']
return res
def get_single_data(trial_i, root_input):
import scipy.io as sio
cancer_related_genes = {
4288: 'MKI67', 1026: 'CDKN1A', 472: 'ATM', 7033: 'TFF3', 2203: 'FBP1',
7494: 'XBP1', 1824: 'DSC2', 1001: 'CDH3', 11200: 'CHEK2',
7153: 'TOP2A', 672: 'BRCA1', 675: 'BRCA2', 580: 'BARD1', 9: 'NAT1',
771: 'CA12', 367: 'AR', 7084: 'TK2', 5892: 'RAD51D', 2625: 'GATA3',
7155: 'TOP2B', 896: 'CCND3', 894: 'CCND2', 10551: 'AGR2',
3169: 'FOXA1', 2296: 'FOXC1'}
data = dict()
f_name = 'overlap_data_%02d.mat' % trial_i
re = sio.loadmat(root_input + f_name)['save_data'][0][0]
data['data_X'] = np.asarray(re['data_X'], dtype=np.float64)
data_y = [_[0] for _ in re['data_Y']]
data['data_Y'] = np.asarray(data_y, dtype=np.float64)
data_edges = [[_[0] - 1, _[1] - 1] for _ in re['data_edges']]
data['data_edges'] = np.asarray(data_edges, dtype=int)
data_pathways = [[_[0], _[1]] for _ in re['data_pathways']]
data['data_pathways'] = np.asarray(data_pathways, dtype=int)
data_entrez = [_[0] for _ in re['data_entrez']]
data['data_entrez'] = np.asarray(data_entrez, dtype=int)
data['data_splits'] = {i: dict() for i in range(5)}
data['data_subsplits'] = {i: {j: dict() for j in range(5)}
for i in range(5)}
for i in range(5):
xx = re['data_splits'][0][i][0][0]['train']
data['data_splits'][i]['train'] = [_ - 1 for _ in xx[0]]
xx = re['data_splits'][0][i][0][0]['test']
data['data_splits'][i]['test'] = [_ - 1 for _ in xx[0]]
for j in range(5):
xx = re['data_subsplits'][0][i][0][j]['train'][0][0]
data['data_subsplits'][i][j]['train'] = [_ - 1 for _ in xx[0]]
xx = re['data_subsplits'][0][i][0][j]['test'][0][0]
data['data_subsplits'][i][j]['test'] = [_ - 1 for _ in xx[0]]
re_path = [_[0] for _ in re['re_path_varInPath']]
data['re_path_varInPath'] = np.asarray(re_path)
re_path_entrez = [_[0] for _ in re['re_path_entrez']]
data['re_path_entrez'] = np.asarray(re_path_entrez)
re_path_ids = [_[0] for _ in re['re_path_ids']]
data['re_path_ids'] = np.asarray(re_path_ids)
re_path_lambdas = [_ for _ in re['re_path_lambdas'][0]]
data['re_path_lambdas'] = np.asarray(re_path_lambdas)
re_path_groups = [_[0][0] for _ in re['re_path_groups_lasso'][0]]
data['re_path_groups_lasso'] = np.asarray(re_path_groups)
re_path_groups_overlap = [_[0][0] for _ in re['re_path_groups_overlap'][0]]
data['re_path_groups_overlap'] = np.asarray(re_path_groups_overlap)
re_edge = [_[0] for _ in re['re_edge_varInGraph']]
data['re_edge_varInGraph'] = np.asarray(re_edge)
re_edge_entrez = [_[0] for _ in re['re_edge_entrez']]
data['re_edge_entrez'] = np.asarray(re_edge_entrez)
data['re_edge_groups_lasso'] = np.asarray(re['re_edge_groups_lasso'])
data['re_edge_groups_overlap'] = np.asarray(re['re_edge_groups_overlap'])
for method in ['re_path_re_lasso', 're_path_re_overlap',
're_edge_re_lasso', 're_edge_re_overlap']:
res = {fold_i: dict() for fold_i in range(5)}
for fold_ind, fold_i in enumerate(range(5)):
res[fold_i]['lambdas'] = re[method][0][fold_i]['lambdas'][0][0][0]
res[fold_i]['kidx'] = re[method][0][fold_i]['kidx'][0][0][0]
res[fold_i]['kgroups'] = re[method][0][fold_i]['kgroups'][0][0][0]
res[fold_i]['kgroupidx'] = re[method][0][fold_i]['kgroupidx'][0][0]
res[fold_i]['groups'] = re[method][0][fold_i]['groups'][0]
res[fold_i]['sbacc'] = re[method][0][fold_i]['sbacc'][0]
res[fold_i]['AS'] = re[method][0][fold_i]['AS'][0]
res[fold_i]['completeAS'] = re[method][0][fold_i]['completeAS'][0]
res[fold_i]['lstar'] = re[method][0][fold_i]['lstar'][0][0][0][0]
res[fold_i]['auc'] = re[method][0][fold_i]['auc'][0]
res[fold_i]['acc'] = re[method][0][fold_i]['acc'][0]
res[fold_i]['bacc'] = re[method][0][fold_i]['bacc'][0]
res[fold_i]['perf'] = re[method][0][fold_i]['perf'][0][0]
res[fold_i]['pred'] = re[method][0][fold_i]['pred']
res[fold_i]['Ws'] = re[method][0][fold_i]['Ws'][0][0]
res[fold_i]['oWs'] = re[method][0][fold_i]['oWs'][0][0]
res[fold_i]['nextGrad'] = re[method][0][fold_i]['nextGrad'][0]
data[method] = res
import networkx as nx
g = nx.Graph()
ind_pathways = {_: i for i, _ in enumerate(data['data_entrez'])}
all_nodes = {ind_pathways[_]: '' for _ in data['re_path_entrez']}
maximum_nodes, maximum_list_edges = set(), []
for edge in data['data_edges']:
if edge[0] in all_nodes and edge[1] in all_nodes:
g.add_edge(edge[0], edge[1])
isolated_genes = set()
maximum_genes = set()
for cc in nx.connected_component_subgraphs(g):
if len(cc) <= 5:
for item in list(cc):
isolated_genes.add(data['data_entrez'][item])
else:
for item in list(cc):
maximum_nodes = set(list(cc))
maximum_genes.add(data['data_entrez'][item])
maximum_nodes = np.asarray(list(maximum_nodes))
subgraph = nx.Graph()
for edge in data['data_edges']:
if edge[0] in maximum_nodes and edge[1] in maximum_nodes:
if edge[0] != edge[1]: # remove some self-loops
maximum_list_edges.append(edge)
subgraph.add_edge(edge[0], edge[1])
data['map_entrez'] = np.asarray([data['data_entrez'][_]
for _ in maximum_nodes])
data['edges'] = np.asarray(maximum_list_edges, dtype=int)
data['costs'] = np.asarray([1.] * len(maximum_list_edges),
dtype=np.float64)
data['x'] = data['data_X'][:, maximum_nodes]
data['y'] = data['data_Y']
data['nodes'] = np.asarray(range(len(maximum_nodes)), dtype=int)
data['cancer_related_genes'] = cancer_related_genes
for edge_ind, edge in enumerate(data['edges']):
uu = list(maximum_nodes).index(edge[0])
vv = list(maximum_nodes).index(edge[1])
data['edges'][edge_ind][0] = uu
data['edges'][edge_ind][1] = vv
method_list = ['re_path_re_lasso', 're_path_re_overlap',
're_edge_re_lasso', 're_edge_re_overlap']
found_set = {method: set() for method in method_list}
for method in method_list:
for fold_i in range(5):
best_lambda = data[method][fold_i]['lstar']
kidx = data[method][fold_i]['kidx']
re = list(data[method][fold_i]['lambdas']).index(best_lambda)
ws = data[method][fold_i]['oWs'][:, re]
for item in [kidx[_] for _ in np.nonzero(ws[1:])[0]]:
if item in cancer_related_genes:
found_set[method].add(cancer_related_genes[item])
data['found_related_genes'] = found_set
return data
def run_test(method_list, n_folds, max_epochs, s_list, b_list, lambda_list,
folding_i, num_cpus, root_input, root_output):
cv_res = {_: dict() for _ in range(n_folds)}
for fold_i in range(n_folds):
data = get_single_data(folding_i, root_input)
tr_idx = data['data_splits'][fold_i]['train']
te_idx = data['data_splits'][fold_i]['test']
f_data = data.copy()
tr_data = dict()
tr_data['x'] = f_data['x'][tr_idx, :]
tr_data['y'] = f_data['y'][tr_idx]
tr_data['data_entrez'] = f_data['data_entrez']
f_data['x'] = data['x']
# data normalization
x_mean = np.tile(np.mean(f_data['x'], axis=0), (len(f_data['x']), 1))
x_std = np.tile(np.std(f_data['x'], axis=0), (len(f_data['x']), 1))
f_data['x'] = np.nan_to_num( | np.divide(f_data['x'] - x_mean, x_std) | numpy.divide |
from coffea import hist, processor
from copy import deepcopy
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
import pickle
import lz4.frame
import numpy
import pandas
import awkward
from functools import partial
from coffea.processor.executor import _futures_handler, _decompress, _reduce
from coffea.processor.accumulator import accumulate
from coffea.nanoevents import NanoEventsFactory, schemas
from coffea.nanoevents.mapping import SimplePreloadedColumnSource
import pyspark
import pyspark.sql.functions as fn
from pyspark.sql.types import BinaryType, StringType, StructType, StructField
from jinja2 import Environment, PackageLoader, select_autoescape
from coffea.util import awkward
lz4_clevel = 1
# this is a UDF that takes care of summing histograms across
# various spark results where the outputs are histogram blobs
def agg_histos_raw(series, lz4_clevel):
goodlines = series[series.str.len() > 0]
if goodlines.size == 1: # short-circuit trivial aggregations
return goodlines[0]
return _reduce(lz4_clevel)(goodlines)
@fn.pandas_udf(BinaryType(), fn.PandasUDFType.GROUPED_AGG)
def agg_histos(series):
global lz4_clevel
return agg_histos_raw(series, lz4_clevel)
def reduce_histos_raw(df, lz4_clevel):
histos = df['histos']
outhist = _reduce(lz4_clevel)(histos[histos.str.len() > 0])
return pandas.DataFrame(data={'histos': | numpy.array([outhist], dtype='O') | numpy.array |
'''
Modified from https://github.com/wengong-jin/nips17-rexgen/blob/master/USPTO/core-wln-global/mol_graph.py
'''
import chainer
import numpy as np
from rdkit import Chem
from rdkit import RDLogger
from tqdm import tqdm
from chainer_chemistry.dataset.preprocessors.gwm_preprocessor import GGNNGWMPreprocessor
rdl = RDLogger.logger()
rdl.setLevel(RDLogger.CRITICAL)
elem_list = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K',
'Tl', 'Yb', 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', 'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In',
'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'W', 'Ru', 'Nb', 'Re', 'Te', 'Rh', 'Tc', 'Ba', 'Bi', 'Hf', 'Mo', 'U',
'Sm', 'Os', 'Ir', 'Ce', 'Gd', 'Ga', 'Cs', 'unknown']
def read_data(path):
data = []
with open(path, 'r') as f:
for line in f:
r, action = line.strip('\r\n ').split()
if len(r.split('>')) != 3 or r.split('>')[1] != '': raise ValueError('invalid line:', r)
react = r.split('>')[0]
product = r.split('>')[-1]
data.append([react, product, action])
return data
def onek_encoding_unk(x, allowable_set):
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def atom_features(atom):
return np.array(onek_encoding_unk(atom.GetSymbol(), elem_list)
+ onek_encoding_unk(atom.GetDegree(), [0, 1, 2, 3, 4, 5])
+ onek_encoding_unk(atom.GetExplicitValence(), [1, 2, 3, 4, 5, 6])
+ onek_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5])
+ [atom.GetIsAromatic()], dtype=np.float32)
def bond_features(bond):
bt = bond.GetBondType()
return np.array([bt == Chem.rdchem.BondType.SINGLE,
bt == Chem.rdchem.BondType.DOUBLE,
bt == Chem.rdchem.BondType.TRIPLE,
bt == Chem.rdchem.BondType.AROMATIC,
0 # add the check changed dimension.
], dtype=np.float32)
def T0_data(reaction, sample_index, idxfunc=lambda x: x.GetIntProp('molAtomMapNumber') - 1):
'''
data preprocessing
:param reaction: [0]: reactants and reagents; [1]: products; [2]: actions(pair with two reacted atom number and changed bond type)
:param sample_index: the index of the reaction in raw txt
:param idxfunc: get the real index in matrix
:return: f_atoms: atom feature matrix with stop node
f_bonds: atom adj feature matrix with stop node
super_node_x: gwm create a super node to share updated feature between several molecules
label: one-hot vector of atoms participated in reaction
mask_reagents: mask -1 in the position of reagents
mask_reactants_reagents: mask -1 in the position of reagents and give high values of reacted atoms
pair_label: sort the reacted atoms' indies, then create pair matrix label. size=|steps|*|reacted atoms|*|reacted atoms|
mask_pair_select: for |atoms|-|reagents| < 10, give 0 mask for pair matrics
action_final: size=(|steps|+1)*4; for each step: [idx1, idx2, (bond type), (pair index in pair matrix)]; the added one step if for stop signal
step_num: |action_final| - 1
stop_idx: index of stop node
sample_index: the index of the reaction in raw txt
'''
mol = Chem.MolFromSmiles(reaction[0])
n_atoms = mol.GetNumAtoms()
atom_fdim = len(elem_list) + 6 + 6 + 6 + 1
f_atoms = np.zeros((n_atoms + 1, atom_fdim))
for atom in mol.GetAtoms():
f_atoms[idxfunc(atom)] = atom_features(atom)
f_bonds = np.zeros(
(4 + 1, n_atoms + 1, n_atoms + 1))
for bond in mol.GetBonds():
a1 = idxfunc(bond.GetBeginAtom())
a2 = idxfunc(bond.GetEndAtom())
bond_f = bond_features(bond)
f_bonds[:, a1, a2] = bond_f
f_bonds[:, a2, a1] = bond_f
super_node_x = GGNNGWMPreprocessor().get_input_features(mol)[2]
# 13-19-1.0;13-7-0.0 --> b=[12,18,6] ---> b=[6,12,18]
b = []
for a in reaction[2].split(';'):
b.append(int(a.split('-')[0]) - 1)
b.append(int(a.split('-')[1]) - 1)
b = list(set(b))
b.sort()
# one-hot vector of reacted atoms, add stop node; will -=1 after padding
label = np.ones(n_atoms + 1).astype(np.int32)
label[b] = 2
# action array: note that it stacked [-1, -1, -1] for stop step; will -=1 after padding
action = np.array(reaction[2].replace(';', '-').split('-')).astype('float32').astype('int32').reshape(-1, 3)
step_num = np.array(action.shape[0])
assert step_num == len(reaction[2].split(';'))
# actions should be shuffled
np.random.shuffle(action)
action = np.vstack([action, np.zeros(3).astype('int32') - 1])
# stop node idx
stop_idx = np.array([n_atoms])
'''
9.19 discussion: reagents should not be masked
'''
# reagents mask when select atoms; note that this mask will not used when calculating loss; will -=2 after padding
mask_reagents = np.ones(n_atoms + 1).astype('int32')
mask_reagents += 1
mask_reagents[-1] = 0
c = []
for molecular in reaction[0].split('.'):
reactant_bool = False
for atomIdx in b:
if ':' + str(atomIdx + 1) + ']' in molecular:
reactant_bool = True
break
if reactant_bool is False:
m_tmp = Chem.MolFromSmiles(molecular)
for atom_tmp in m_tmp.GetAtoms():
c.append(idxfunc(atom_tmp))
mask_reagents[c] = 1
# reagents mask is same as mask_reagents, reactants mask give large values according to sorted b list; will -=2 after padding
mask_reactants_reagents = | np.ones(n_atoms + 1) | numpy.ones |
import numpy as np
import os
def get_Wqb_value(file_duck_dat):
f = open(file_duck_dat,'r')
data = []
for line in f:
a = line.split()
data.append([float(a[1]), float(a[3]), float(a[5]), float(a[8])])
f.close()
data = np.array(data[1:])
Work = data[:,3]
#split it into segments of 200 points
num_segments = int(len(data)/200)
num_segments = int(len(data)/200)
#alayze each segment to see if minimum in the segment is the local minimum
#local minimum is the point with the lowest value of 200 neighbouring points
#first local minumum is miminum used later to duck analysis
for segment in range(num_segments):
#detecting minium inthe segment
sub_data = data[segment * 200 : (segment + 1) * 200]
sub_Work = sub_data[:,3]
index_local = np.argmin(sub_Work)
#segment of 200 points arround detected minimum
index_global = index_local + segment * 200
if index_global > 100:
sub2_data = data[index_global - 100 : index_global + 101]
else:
sub2_data = data[0 : index_global + 101]
sub2_Work = sub2_data[:,3]
index_local2 = | np.argmin(sub2_Work) | numpy.argmin |
#!/usr/bin/env python
# <NAME>
# Last Change : 2007-08-24 10:19
from __future__ import absolute_import
import unittest
import numpy
import numpy.random
import os.path
from numpy.testing import assert_almost_equal
from PyDSTool.Toolbox.optimizers.criterion import *
from PyDSTool.Toolbox.optimizers.helpers import ForwardFiniteDifferences, CenteredFiniteDifferences
from PyDSTool.Toolbox.optimizers.line_search import *
from PyDSTool.Toolbox.optimizers.optimizer import *
from PyDSTool.Toolbox.optimizers.step import *
class Function(ForwardFiniteDifferences):
def __call__(self, x):
return (x[0] - 2) ** 2 + (2 * x[1] + 4) ** 2
class test_ForwardFiniteDifferences(unittest.TestCase):
def test_gradient_optimization(self):
startPoint = numpy.zeros(2, numpy.float)
optimi = StandardOptimizer(function = Function(),
step = FRConjugateGradientStep(),
criterion = criterion(iterations_max = 100, ftol = 0.0000001, gtol=0.0001),
x0 = startPoint,
line_search = StrongWolfePowellRule())
assert_almost_equal(optimi.optimize(), numpy.array((2., -2)))
def test_hessian_optimization(self):
startPoint = | numpy.zeros(2, numpy.float) | numpy.zeros |
from pathlib import Path
import argparse
import functools
import os
import numpy as np
from tqdm import tqdm
from glob import glob
import random
import json
import multiprocessing
from osgeo import gdal
import cv2
from misc_utils import load_image, save_image, load_vflow
def rect_flow(rgb, mag, angle, agl):
# filter magnitude of input flow vectors
mag = cv2.medianBlur(mag, 5)
# initialize output images with zeros
output_rgb = np.zeros(rgb.shape, dtype=np.uint8)
output_mag = np.zeros(mag.shape, dtype=np.float32)
output_agl = np.zeros(agl.shape, dtype=np.float32)
output_mask = np.ones(mag.shape, dtype=np.uint8)
# get the flow vectors to map original features to new images
y2 = mag * np.sin(angle)
x2 = mag * np.cos(angle)
x2 = (x2 + 0.5).astype(np.int32)
y2 = (y2 + 0.5).astype(np.int32)
rows, cols = np.mgrid[0 : mag.shape[0], 0 : mag.shape[1]]
rows2 = np.clip(rows + x2, 0, mag.shape[0] - 1)
cols2 = np.clip(cols + y2, 0, mag.shape[1] - 1)
# map input pixel values to output images
for i in range(0, mag.shape[0]):
for j in range(0, mag.shape[0]):
# favor taller things in output; this is a hard requirement
if mag[rows[i, j], cols[i, j]] < output_mag[rows2[i, j], cols2[i, j]]:
continue
output_rgb[rows2[i, j], cols2[i, j], :] = rgb[rows[i, j], cols[i, j], :]
output_agl[rows2[i, j], cols2[i, j]] = agl[rows[i, j], cols[i, j]]
output_mag[rows2[i, j], cols2[i, j]] = mag[rows[i, j], cols[i, j]]
output_mask[rows2[i, j], cols2[i, j]] = 0
# filter AGL
filtered_agl = cv2.medianBlur(output_agl, 5)
filtered_agl = cv2.medianBlur(filtered_agl, 5)
filtered_agl = cv2.medianBlur(filtered_agl, 5)
filtered_agl = cv2.medianBlur(filtered_agl, 5)
# filter occlusion mask to fill in pixels missed due to sampling error
filtered_mask = cv2.medianBlur(output_mask, 5)
filtered_mask = cv2.medianBlur(filtered_mask, 5)
filtered_mask = cv2.medianBlur(filtered_mask, 5)
filtered_mask = cv2.medianBlur(filtered_mask, 5)
# replace non-occluded but also non-mapped RGB pixels with median of neighbors
interp_mask = output_mask > filtered_mask
filtered_rgb = cv2.medianBlur(output_rgb, 5)
output_rgb[interp_mask, 0] = filtered_rgb[interp_mask, 0]
output_rgb[interp_mask, 1] = filtered_rgb[interp_mask, 1]
output_rgb[interp_mask, 2] = filtered_rgb[interp_mask, 2]
return output_rgb, filtered_agl, filtered_mask
def write_rectified_images(rgb, mag, angle_rads, agl, output_rgb_path):
# rectify all images
rgb_rct, agl_rct, mask_rct = rect_flow(rgb, mag, angle_rads, agl)
# format AGL image
max_agl = np.nanpercentile(agl, 99)
agl_rct[mask_rct > 0] = 0.0
agl_rct[agl_rct > max_agl] = max_agl
agl_rct *= 255.0 / max_agl
agl_rct = 255.0 - agl_rct
agl_rct = agl_rct.astype(np.uint8)
# format RGB image
rgb_rct[mask_rct > 0, 0] = 135
rgb_rct[mask_rct > 0, 1] = 206
rgb_rct[mask_rct > 0, 2] = 250
save_image(output_rgb_path, rgb)
save_image(output_rgb_path.replace(".tif", "_RECT.tif"), rgb_rct)
# AGL with rectification
rgb_rct[:, :, 0] = agl_rct
rgb_rct[:, :, 1] = agl_rct
rgb_rct[:, :, 2] = agl_rct
rgb_rct[mask_rct > 0, 0] = 135
rgb_rct[mask_rct > 0, 1] = 206
rgb_rct[mask_rct > 0, 2] = 250
save_image(
output_rgb_path.replace("RGB", "AGL").replace(".tif", "_RECT.tif"), rgb_rct
)
# AGL without rectification
agl_norect = np.copy(agl)
agl_norect[agl_norect > max_agl] = max_agl
agl_norect *= 255.0 / max_agl
agl_norect = 255.0 - agl_norect
agl_norect = agl_norect.astype(np.uint8)
save_image(output_rgb_path.replace("RGB", "AGL"), agl_norect)
def get_current_metrics(item, args):
# get arguments
vflow_gt_path, agl_gt_path, vflow_pred_path, aglpred_path, rgb_path, args = item
# load AGL, SCALE, and ANGLE predicted values
agl_pred = load_image(aglpred_path, args)
if agl_pred is None:
return None
vflow_items = load_vflow(
vflow_pred_path, agl=agl_pred, args=args, return_vflow_pred_mat=True
)
if vflow_items is None:
return None
vflow_pred, mag_pred, xdir_pred, ydir_pred, vflow_data = vflow_items
scale_pred, angle_pred = vflow_data["scale"], vflow_data["angle"]
# load AGL, SCALE, and ANGLE ground truth values
agl_gt = load_image(agl_gt_path, args)
if agl_gt is None:
return None
vflow_gt_items = load_vflow(vflow_gt_path, agl_gt, args, return_vflow_pred_mat=True)
if vflow_gt_items is None:
return None
vflow_gt, mag_gt, xdir_gt, ydir_gt, vflow_gt_data = vflow_gt_items
scale_gt, angle_gt = vflow_gt_data["scale"], vflow_gt_data["angle"]
# produce rectified images
if args.rectify and args.output_dir is not None:
rgb = load_image(rgb_path, args)
output_rgb_path = os.path.join(args.output_dir, os.path.basename(rgb_path))
write_rectified_images(rgb, mag_pred, angle_pred, agl_pred, output_rgb_path)
# compute differences
dir_pred = np.array([xdir_pred, ydir_pred])
dir_pred /= np.linalg.norm(dir_pred)
dir_gt = np.array([xdir_gt, ydir_gt])
dir_gt /= np.linalg.norm(dir_gt)
cos_ang = np.dot(dir_pred, dir_gt)
sin_ang = np.linalg.norm(np.cross(dir_pred, dir_gt))
rad_diff = np.arctan2(sin_ang, cos_ang)
# get mean error values
angle_error = np.degrees(rad_diff)
scale_error = np.abs(scale_pred - scale_gt)
mag_error = np.nanmean(np.abs(mag_pred - mag_gt))
epe = np.nanmean(np.sqrt(np.sum(np.square(vflow_gt - vflow_pred), axis=2)))
agl_error = np.nanmean(np.abs(agl_pred - agl_gt))
# get RMS error values
mag_rms = np.sqrt(np.nanmean(np.square(mag_pred - mag_gt)))
epe_rms = np.sqrt(np.nanmean(np.sum(np.square(vflow_gt - vflow_pred), axis=2)))
agl_rms = np.sqrt(np.nanmean(np.square(agl_pred - agl_gt)))
# gather data for computing R-square for AGL
agl_count = np.sum(np.isfinite(agl_gt))
agl_sse = np.nansum(np.square(agl_pred - agl_gt))
agl_gt_sum = np.nansum(agl_gt)
# gather data for computing R-square for VFLOW
vflow_count = np.sum(np.isfinite(vflow_gt))
vflow_gt_sum = np.nansum(vflow_gt)
vflow_sse = np.nansum(np.square(vflow_pred - vflow_gt))
items = (
angle_error,
scale_error,
mag_error,
epe,
agl_error,
mag_rms,
epe_rms,
agl_rms,
agl_count,
agl_sse,
agl_gt_sum,
vflow_count,
vflow_sse,
vflow_gt_sum,
)
return items
def get_r2_denoms(item, agl_gt_mean, vflow_gt_mean):
(
vflow_gt_path,
agl_gt_path,
vflow_pred_path,
aglpred_path,
rgb_path,
args,
) = item
agl_gt = load_image(agl_gt_path, args)
vflow_gt_items = load_vflow(vflow_gt_path, agl_gt, args, return_vflow_pred_mat=True)
vflow_gt, mag_gt, xdir_gt, ydir_gt, vflow_gt_data = vflow_gt_items
scale_gt, angle_gt = vflow_gt_data["scale"], vflow_gt_data["angle"]
agl_denom = np.nansum(np.square(agl_gt - agl_gt_mean))
vflow_denom = np.nansum(np.square(vflow_gt - vflow_gt_mean))
items = (agl_denom, vflow_denom)
return items
def get_city_scores(args, site):
# build lists of images to process
vflow_gt_paths = glob(os.path.join(args.truth_dir, site + "*_VFLOW*.json"))
if vflow_gt_paths == []:
return np.nan
angle_error, scale_error, mag_error, epe, agl_error, mag_rms, epe_rms, agl_rms = (
[],
[],
[],
[],
[],
[],
[],
[],
)
items = []
for vflow_gt_path in vflow_gt_paths:
vflow_name = os.path.basename(vflow_gt_path)
agl_name = vflow_name.replace("_VFLOW", "_AGL").replace(".json", ".tif")
agl_gt_path = os.path.join(args.truth_dir, agl_name)
rgb_path = agl_gt_path.replace("AGL", "RGB").replace(
".tif", f".{args.rgb_suffix}"
)
vflow_pred_path = os.path.join(args.predictions_dir, vflow_name)
agl_pred_path = os.path.join(args.predictions_dir, agl_name)
items.append(
(vflow_gt_path, agl_gt_path, vflow_pred_path, agl_pred_path, rgb_path, args)
)
# compute metrics for each image
pool = multiprocessing.Pool(args.num_processes)
results = []
for result in tqdm(
pool.imap_unordered(functools.partial(get_current_metrics, args=args), items),
total=len(items),
):
results.append(result)
pool.close()
pool.join()
# initialize AGL and VFLOW R-square data
agl_count, agl_sse, agl_gt_sum, vflow_count, vflow_sse, vflow_gt_sum = (
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
)
# gather results from list
angle_error, scale_error, mag_error, epe, agl_error, mag_rms, epe_rms, agl_rms = (
[],
[],
[],
[],
[],
[],
[],
[],
)
for result in results:
# get metrics for next image
if result is None:
return 0.0
(
curr_angle_error,
curr_scale_error,
curr_mag_error,
curr_epe,
curr_agl_error,
curr_mag_rms,
curr_epe_rms,
curr_agl_rms,
curr_agl_count,
curr_agl_sse,
curr_agl_gt_sum,
curr_vflow_count,
curr_vflow_sse,
curr_vflow_gt_sum,
) = result
# add metrics to lists
angle_error.append(curr_angle_error)
scale_error.append(curr_scale_error)
mag_error.append(curr_mag_error)
epe.append(curr_epe)
mag_rms.append(curr_mag_rms)
epe_rms.append(curr_epe_rms)
agl_error.append(curr_agl_error)
agl_rms.append(curr_agl_rms)
# update data for AGL R-square
agl_count = agl_count + curr_agl_count
agl_sse = agl_sse + curr_agl_sse
agl_gt_sum = agl_gt_sum + curr_agl_gt_sum
# update data for VFLOW R-square
vflow_count = vflow_count + curr_vflow_count
vflow_sse = vflow_sse + curr_vflow_sse
vflow_gt_sum = vflow_gt_sum + curr_vflow_gt_sum
# compute statistics over all images
mean_angle_error = np.nanmean(angle_error)
rms_angle_error = np.sqrt(np.nanmean( | np.square(angle_error) | numpy.square |
"""
Tests for ConvMolFeaturizer.
"""
import unittest
import numpy as np
from deepchem.feat.graph_features import ConvMolFeaturizer
class TestConvMolFeaturizer(unittest.TestCase):
"""
Test ConvMolFeaturizer featurizes properly.
"""
def test_carbon_nitrogen(self):
"""Test on carbon nitrogen molecule"""
# Note there is a central nitrogen of degree 4, with 4 carbons
# of degree 1 (connected only to central nitrogen).
raw_smiles = ['C[N+](C)(C)C']
import rdkit.Chem
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mols = featurizer.featurize(mols)
mol = mols[0]
# 5 atoms in compound
assert mol.get_num_atoms() == 5
# Get the adjacency lists grouped by degree
deg_adj_lists = mol.get_deg_adjacency_lists()
assert np.array_equal(deg_adj_lists[0], np.zeros([0, 0], dtype=np.int32))
# The 4 outer atoms connected to central nitrogen
assert np.array_equal(deg_adj_lists[1],
| np.array([[4], [4], [4], [4]], dtype=np.int32) | numpy.array |
import math
import pickle
import numpy as np
from skimage import morphology, measure
def yes_or_no(question: str)->bool:
reply = str(input(question+' (y/n): ')).lower().strip()
if reply == '':
return True
if reply[0] == 'y':
return True
if reply[0] == 'n':
return False
else:
return yes_or_no("Uhhhh... please enter ")
# obj0, obj1, obj2 are created here...
def save(filename, objects):
file = filename
# Saving the objects:
with open(file, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump(objects, f)
def load(filename):
file = filename
# Getting back the objects:
with open(file, 'rb') as f: # Python 3: open(..., 'rb')
object = pickle.load(f)
return object
def convert_rectangle(bbox: tuple)->dict:
rect = {'y': bbox[0],
'x': bbox[1],
'width': bbox[3] - bbox[1],
'height': bbox[2] - bbox[0]}
return rect
def crop_image(image: np.array, bbox: tuple)->np.array:
yi = bbox[0]
xi = bbox[1]
yf = bbox[2]
xf = bbox[3]
crop = image[yi:yf, xi:xf]
return crop
def mean2(x):
y = | np.sum(x) | numpy.sum |
import unittest
from ancb import NumpyCircularBuffer
from ancb import ( # type: ignore
star_can_broadcast, can_broadcast
)
from numpy import array_equal, allclose, shares_memory
from numpy import array, zeros, arange, ndarray, ones, empty
from numpy.random import rand, randint
from numpy import fill_diagonal, roll
from itertools import zip_longest
from operator import (
matmul, add, sub, mul, truediv, mod, floordiv, pow,
rshift, lshift, and_, or_, xor, neg, pos, abs, inv, invert,
iadd, iand, ifloordiv, ilshift, imod, imul,
ior, ipow, irshift, isub, itruediv, ixor
)
class TestBroadcastability(unittest.TestCase):
def test_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
self.assertTrue(can_broadcast(x.shape, y.shape))
self.assertFalse(can_broadcast(x.shape, z.shape))
self.assertFalse(can_broadcast(y.shape, z.shape))
self.assertTrue(can_broadcast(x.shape, x.shape))
self.assertTrue(can_broadcast(y.shape, y.shape))
self.assertTrue(can_broadcast(z.shape, z.shape))
self.assertTrue(can_broadcast(w.shape, w.shape))
self.assertTrue(can_broadcast(x.shape, w.shape))
self.assertTrue(can_broadcast(y.shape, w.shape))
self.assertTrue(can_broadcast(z.shape, w.shape))
def test_star_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
starexpr = zip_longest(x.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, x.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, z.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(w.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
class OperatorTestFactory(type):
def __new__(cls, name, bases, dct):
obj = super().__new__(cls, name, bases, dct)
bin_operators = [
matmul, add, sub, mul, truediv, mod, floordiv, pow
]
un_operators = [neg, pos, abs, invert, inv]
bitbin_operators = [rshift, lshift, and_, or_, xor]
i_operators = [
iadd, ifloordiv, imul, ipow, isub, itruediv
]
bit_ioperators = [
ilshift, irshift, ior, iand, ixor, imod
]
def unop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = -arange(3, dtype=int)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(-1)
buffer.append(-2)
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # unfrag
buffer.append(-3)
test -= 1
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # frag
return f
def bitbinop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = arange(1, 4, dtype=int)
x = randint(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
return f
def binop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
test = arange(1, 4, dtype=float)
x = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
return f
def iop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
data2 = zeros(3, dtype=float)
test1 = arange(1, 4, dtype=float)
test2 = arange(2, 5, dtype=float)
x = rand(3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer2 + 0, test2))
return f
def bitiop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
data2 = zeros(3, dtype=int)
test1 = arange(1, 4, dtype=int)
test2 = arange(2, 5, dtype=int)
x = randint(low=1, high=100, size=3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(allclose(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(allclose(buffer2 + 0, test2))
return f
for op in bin_operators:
setattr(obj, 'test_{}'.format(op.__name__), binop_testcase(op))
for op in bitbin_operators:
setattr(obj, 'test_{}'.format(op.__name__), bitbinop_testcase(op))
for op in un_operators:
setattr(obj, 'test_{}'.format(op.__name__), unop_testcase(op))
for op in i_operators:
setattr(obj, 'test_{}'.format(op.__name__), iop_testcase(op))
for op in bit_ioperators:
setattr(obj, 'test_{}'.format(op.__name__), bitiop_testcase(op))
return(obj)
class TestNumpyCircularBuffer(
unittest.TestCase, metaclass=OperatorTestFactory
):
"""
NumpyCircularBuffer tests
"""
def test_init(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertTrue(array_equal(data, buffer))
def test_fragmentation(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertFalse(buffer.fragmented)
buffer.append(0)
self.assertFalse(buffer.fragmented)
buffer.append(1)
self.assertFalse(buffer.fragmented)
buffer.append(2)
self.assertFalse(buffer.fragmented)
buffer.append(3)
self.assertTrue(buffer.fragmented)
buffer.append(4)
self.assertTrue(buffer.fragmented)
buffer.append(5)
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
def test_matmul_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(buffer @ C[:1], arange(1) @ C[:1]))
buffer.append(1)
self.assertTrue(allclose(buffer @ C[:2], arange(2) @ C[:2]))
buffer.append(2)
self.assertTrue(allclose(buffer @ C, arange(3) @ C))
buffer.append(3)
self.assertTrue(allclose(buffer @ C, (arange(1, 4)) @ C))
buffer.append(4)
self.assertTrue(allclose(buffer @ C, (arange(2, 5)) @ C))
buffer.append(5)
self.assertTrue(allclose(buffer @ C, (arange(3, 6)) @ C))
buffer.append(6)
self.assertTrue(allclose(buffer @ C, (arange(4, 7)) @ C))
buffer.pop()
self.assertTrue(allclose(buffer @ C[1:], (arange(5, 7)) @ C[1:]))
buffer.pop()
self.assertTrue(allclose(buffer @ C[2:], (arange(6, 7)) @ C[2:]))
def test_matmul_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(filler + 27)
test += 9
res_a = buffer @ A
res_b = buffer @ B
res_c = buffer @ C
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
self.assertTrue(allclose(res_c, test @ C))
def test_rmatmul_1d1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
res_c = C[:1] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:1] @ arange(1)))
buffer.append(1)
res_c = C[:2] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:2] @ arange(2)))
buffer.append(2)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3)))
buffer.append(3)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(1, 4)))
buffer.append(4)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
buffer.append(6)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(4, 7)))
buffer.pop()
res_c = C[1:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[1:] @ arange(5, 7)))
buffer.pop()
res_c = C[2:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[2:] @ arange(6, 7)))
def test_rmatmul_nd1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data = zeros(3)
A = zeros(9).reshape(3, 3)
B = arange(9).reshape(3, 3)
C = arange(3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = A @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertTrue(array_equal(A @ buffer, A @ array([0, 1, 2])))
buffer.append(3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ array([1, 2, 3])))
self.assertTrue(allclose(res_b, B @ array([1, 2, 3])))
self.assertTrue(allclose(res_c, C @ array([1, 2, 3])))
buffer.append(4)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(2, 5)))
self.assertTrue(allclose(res_b, B @ arange(2, 5)))
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(3, 6)))
self.assertTrue(allclose(res_b, B @ arange(3, 6)))
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
def test_rmatmul_1dnd(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data1 = zeros((3, 3))
data2 = zeros((3, 3, 3))
A = rand(3)
test1 = arange(9).reshape(3, 3)
test2 = arange(27).reshape(3, 3, 3)
buffer1 = NumpyCircularBuffer(data1)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(arange(3))
buffer1.append(arange(3, 6))
buffer1.append(arange(6, 9))
buffer2.append(arange(9).reshape(3, 3))
buffer2.append(arange(9, 18).reshape(3, 3))
buffer2.append(arange(18, 27).reshape(3, 3))
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(9, 12))
buffer2.append(arange(27, 36).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(12, 15))
buffer2.append(arange(36, 45).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(15, 18))
buffer2.append(arange(45, 54).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
def test_rmatmul_2d2d(self):
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
C = | rand(12) | numpy.random.rand |
# ### Stochlite Pybullet Environment
# Last Update by <NAME> (May, 2021)
import numpy as np
import gym
from gym import spaces
import envs.environments.stoch_env.trajectory_generator as trajectory_generator
import math
import random
from collections import deque
import pybullet
import envs.environments.stoch_env.bullet_client as bullet_client
import pybullet_data
import envs.environments.stoch_env.planeEstimation.get_terrain_normal as normal_estimator
import matplotlib.pyplot as plt
from envs.environments.stoch_env.utils.logger import DataLog
import os
# LEG_POSITION = ["fl_", "bl_", "fr_", "br_"]
# KNEE_CONSTRAINT_POINT_RIGHT = [0.014, 0, 0.076] #hip
# KNEE_CONSTRAINT_POINT_LEFT = [0.0,0.0,-0.077] #knee
RENDER_HEIGHT = 720
RENDER_WIDTH = 960
PI = np.pi
class StochliteEnv(gym.Env):
def __init__(self,
render = False,
on_rack = False,
gait = 'trot',
phase = [0, PI, PI,0],#[FR, FL, BR, BL]
action_dim = 20,
end_steps = 1000,
stairs = False,
downhill =False,
seed_value = 100,
wedge = False,
IMU_Noise = False,
deg = 11): # deg = 5
self._is_stairs = stairs
self._is_wedge = wedge
self._is_render = render
self._on_rack = on_rack
self.rh_along_normal = 0.24
self.seed_value = seed_value
random.seed(self.seed_value)
if self._is_render:
self._pybullet_client = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
self._pybullet_client = bullet_client.BulletClient()
# self._theta = 0
self._frequency = 2.5 # originally 2.5, changing for stability
self.termination_steps = end_steps
self.downhill = downhill
#PD gains
self._kp = 400
self._kd = 10
self.dt = 0.01
self._frame_skip = 50
self._n_steps = 0
self._action_dim = action_dim
self._obs_dim = 8
self.action = np.zeros(self._action_dim)
self._last_base_position = [0, 0, 0]
self.last_rpy = [0, 0, 0]
self._distance_limit = float("inf")
self.current_com_height = 0.25 # 0.243
#wedge_parameters
self.wedge_start = 0.5
self.wedge_halflength = 2
if gait is 'trot':
phase = [0, PI, PI, 0]
elif gait is 'walk':
phase = [0, PI, 3*PI/2 ,PI/2]
self._trajgen = trajectory_generator.TrajectoryGenerator(gait_type=gait, phase=phase)
self.inverse = False
self._cam_dist = 1.0
self._cam_yaw = 0.0
self._cam_pitch = 0.0
self.avg_vel_per_step = 0
self.avg_omega_per_step = 0
self.linearV = 0
self.angV = 0
self.prev_vel=[0,0,0]
self.prev_ang_vels = [0, 0, 0] # roll_vel, pitch_vel, yaw_vel of prev step
self.total_power = 0
self.x_f = 0
self.y_f = 0
self.clips=7
self.friction = 0.6
# self.ori_history_length = 3
# self.ori_history_queue = deque([0]*3*self.ori_history_length,
# maxlen=3*self.ori_history_length)#observation queue
self.step_disp = deque([0]*100, maxlen=100)
self.stride = 5
self.incline_deg = deg
self.incline_ori = 0
self.prev_incline_vec = (0,0,1)
self.terrain_pitch = []
self.add_IMU_noise = IMU_Noise
self.INIT_POSITION =[0,0,0.3] # [0,0,0.3], Spawning stochlite higher to remove initial drift
self.INIT_ORIENTATION = [0, 0, 0, 1]
self.support_plane_estimated_pitch = 0
self.support_plane_estimated_roll = 0
self.pertub_steps = 0
self.x_f = 0
self.y_f = 0
## Gym env related mandatory variables
self._obs_dim = 8 #[roll, pitch, roll_vel, pitch_vel, yaw_vel, SP roll, SP pitch, cmd_xvel, cmd_yvel, cmd_avel]
observation_high = np.array([np.pi/2] * self._obs_dim)
observation_low = -observation_high
self.observation_space = spaces.Box(observation_low, observation_high)
action_high = np.array([1] * self._action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.commands = np.array([0, 0, 0]) #Joystick commands consisting of cmd_x_velocity, cmd_y_velocity, cmd_ang_velocity
self.max_linear_xvel = 0.5 #0.4, made zero for only ang vel # calculation is < 0.2 m steplength times the frequency 2.5 Hz
self.max_linear_yvel = 0.25 #0.25, made zero for only ang vel # calculation is < 0.14 m times the frequency 2.5 Hz
self.max_ang_vel = 2 #considering less than pi/2 steer angle # less than one complete rotation in one second
self.max_steplength = 0.2 # by the kinematic limits of the robot
self.max_steer_angle = PI/2 #plus minus PI/2 rads
self.max_x_shift = 0.1 #plus minus 0.1 m
self.max_y_shift = 0.14 # max 30 degree abduction
self.max_z_shift = 0.1 # plus minus 0.1 m
self.max_incline = 15 # in deg
self.robot_length = 0.334 # measured from stochlite
self.robot_width = 0.192 # measured from stochlite
self.hard_reset()
self.Set_Randomization(default=True, idx1=2, idx2=2)
self.logger = DataLog()
if(self._is_stairs):
boxHalfLength = 0.1
boxHalfWidth = 1
boxHalfHeight = 0.015
sh_colBox = self._pybullet_client.createCollisionShape(self._pybullet_client.GEOM_BOX,halfExtents=[boxHalfLength,boxHalfWidth,boxHalfHeight])
boxOrigin = 0.3
n_steps = 15
self.stairs = []
for i in range(n_steps):
step =self._pybullet_client.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,basePosition = [boxOrigin + i*2*boxHalfLength,0,boxHalfHeight + i*2*boxHalfHeight],baseOrientation=[0.0,0.0,0.0,1])
self.stairs.append(step)
self._pybullet_client.changeDynamics(step, -1, lateralFriction=0.8)
def hard_reset(self):
'''
Function to
1) Set simulation parameters which remains constant throughout the experiments
2) load urdf of plane, wedge and robot in initial conditions
'''
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(numSolverIterations=int(300))
self._pybullet_client.setTimeStep(self.dt/self._frame_skip)
self.plane = self._pybullet_client.loadURDF("%s/plane.urdf" % pybullet_data.getDataPath())
self._pybullet_client.changeVisualShape(self.plane,-1,rgbaColor=[1,1,1,0.9])
self._pybullet_client.setGravity(0, 0, -9.8)
if self._is_wedge:
wedge_halfheight_offset = 0.01
self.wedge_halfheight = wedge_halfheight_offset + 1.5 * math.tan(math.radians(self.incline_deg)) / 2.0
self.wedgePos = [0, 0, self.wedge_halfheight]
self.wedgeOrientation = self._pybullet_client.getQuaternionFromEuler([0, 0, self.incline_ori])
if not (self.downhill):
wedge_model_path = "envs/environments/stoch_env/Wedges/uphill/urdf/wedge_" + str(
self.incline_deg) + ".urdf"
self.INIT_ORIENTATION = self._pybullet_client.getQuaternionFromEuler(
[math.radians(self.incline_deg) * math.sin(self.incline_ori),
-math.radians(self.incline_deg) * math.cos(self.incline_ori), 0])
self.robot_landing_height = wedge_halfheight_offset + 0.28 + math.tan(
math.radians(self.incline_deg)) * abs(self.wedge_start)
# self.INIT_POSITION = [self.INIT_POSITION[0], self.INIT_POSITION[1], self.robot_landing_height]
self.INIT_POSITION = [-0.8, 0.0, 0.38] #[-0.8, 0, self.robot_landing_height]
else:
wedge_model_path = "envs/environments/stoch_env/Wedges/downhill/urdf/wedge_" + str(
self.incline_deg) + ".urdf"
self.robot_landing_height = wedge_halfheight_offset + 0.28 + math.tan(
math.radians(self.incline_deg)) * 1.5
self.INIT_POSITION = [0, 0, self.robot_landing_height] # [0.5, 0.7, 0.3] #[-0.5,-0.5,0.3]
self.INIT_ORIENTATION = [0, 0, 0, 1] #[ 0, -0.0998334, 0, 0.9950042 ]
self.wedge = self._pybullet_client.loadURDF(wedge_model_path, self.wedgePos, self.wedgeOrientation)
self.SetWedgeFriction(0.7)
model_path = os.path.join(os.getcwd(), 'envs/environments/stoch_env/robots/stochlite/stochlite_description/urdf/stochlite_urdf.urdf')
self.stochlite = self._pybullet_client.loadURDF(model_path, self.INIT_POSITION,self.INIT_ORIENTATION)
self._joint_name_to_id, self._motor_id_list = self.BuildMotorIdList()
num_legs = 4
for i in range(num_legs):
self.ResetLeg(i, add_constraint=True)
self.ResetPoseForAbd()
if self._on_rack:
self._pybullet_client.createConstraint(
self.stochlite, -1, -1, -1, self._pybullet_client.JOINT_FIXED,
[0, 0, 0], [0, 0, 0], [0, 0, 0.4])
self._pybullet_client.resetBasePositionAndOrientation(self.stochlite, self.INIT_POSITION, self.INIT_ORIENTATION)
self._pybullet_client.resetBaseVelocity(self.stochlite, [0, 0, 0], [0, 0, 0])
self._pybullet_client.resetDebugVisualizerCamera(self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0])
self.SetFootFriction(self.friction)
# self.SetLinkMass(0,0)
# self.SetLinkMass(11,0)
def reset_standing_position(self):
num_legs = 4
for i in range(num_legs):
self.ResetLeg(i, add_constraint=False, standstilltorque=10)
self.ResetPoseForAbd()
# Conditions for standstill
for i in range(300):
self._pybullet_client.stepSimulation()
for i in range(num_legs):
self.ResetLeg(i, add_constraint=False, standstilltorque=0)
def reset(self):
'''
This function resets the environment
Note : Set_Randomization() is called before reset() to either randomize or set environment in default conditions.
'''
# self._theta = 0
self._last_base_position = [0, 0, 0]
self.commands = [0, 0, 0]
self.last_rpy = [0, 0, 0]
self.inverse = False
if self._is_wedge:
self._pybullet_client.removeBody(self.wedge)
wedge_halfheight_offset = 0.01
self.wedge_halfheight = wedge_halfheight_offset + 1.5 * math.tan(math.radians(self.incline_deg)) / 2.0
self.wedgePos = [0, 0, self.wedge_halfheight]
self.wedgeOrientation = self._pybullet_client.getQuaternionFromEuler([0, 0, self.incline_ori])
if not (self.downhill):
wedge_model_path = "envs/environments/stoch_env/Wedges/uphill/urdf/wedge_" + str(self.incline_deg) + ".urdf"
self.INIT_ORIENTATION = self._pybullet_client.getQuaternionFromEuler(
[math.radians(self.incline_deg) * math.sin(self.incline_ori),
-math.radians(self.incline_deg) * math.cos(self.incline_ori), 0])
self.robot_landing_height = wedge_halfheight_offset + 0.28 + math.tan(math.radians(self.incline_deg)) * abs(self.wedge_start)
# self.INIT_POSITION = [self.INIT_POSITION[0], self.INIT_POSITION[1], self.robot_landing_height]
self.INIT_POSITION = [-0.8, 0.0, 0.38] #[-0.8, 0, self.robot_landing_height]
else:
wedge_model_path = "envs/environments/stoch_env/Wedges/downhill/urdf/wedge_" + str(self.incline_deg) + ".urdf"
self.robot_landing_height = wedge_halfheight_offset + 0.28 + math.tan(math.radians(self.incline_deg)) * 1.5
self.INIT_POSITION = [0, 0, self.robot_landing_height] # [0.5, 0.7, 0.3] #[-0.5,-0.5,0.3]
self.INIT_ORIENTATION = [0, 0, 0, 1]
self.wedge = self._pybullet_client.loadURDF(wedge_model_path, self.wedgePos, self.wedgeOrientation)
self.SetWedgeFriction(0.7)
self._pybullet_client.resetBasePositionAndOrientation(self.stochlite, self.INIT_POSITION, self.INIT_ORIENTATION)
self._pybullet_client.resetBaseVelocity(self.stochlite, [0, 0, 0], [0, 0, 0])
self.reset_standing_position()
self._pybullet_client.resetDebugVisualizerCamera(self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0])
self._n_steps = 0
return self.GetObservation()
'''
Old Joy-stick Emulation Function
def updateCommands(self, num_plays, episode_length):
ratio = num_plays/episode_length
if num_plays < 0.2 * episode_length:
self.commands = [0, 0, 0]
elif num_plays < 0.8 * episode_length:
self.commands = np.array([self.max_linear_xvel, self.max_linear_yvel, self.max_ang_vel])*ratio
else:
self.commands = [self.max_linear_xvel, self.max_linear_yvel, self.max_ang_vel]
# self.commands = np.array([self.max_linear_xvel, self.max_linear_yvel, self.max_ang_vel])*ratio
'''
def apply_Ext_Force(self, x_f, y_f,link_index= 1,visulaize = False,life_time=0.01):
'''
function to apply external force on the robot
Args:
x_f : external force in x direction
y_f : external force in y direction
link_index : link index of the robot where the force need to be applied
visulaize : bool, whether to visulaize external force by arrow symbols
life_time : life time of the visualization
'''
force_applied = [x_f,y_f,0]
self._pybullet_client.applyExternalForce(self.stochlite, link_index, forceObj=[x_f,y_f,0],posObj=[0,0,0],flags=self._pybullet_client.LINK_FRAME)
f_mag = np.linalg.norm(np.array(force_applied))
if(visulaize and f_mag != 0.0):
point_of_force = self._pybullet_client.getLinkState(self.stochlite, link_index)[0]
lam = 1/(2*f_mag)
dummy_pt = [point_of_force[0]-lam*force_applied[0],
point_of_force[1]-lam*force_applied[1],
point_of_force[2]-lam*force_applied[2]]
self._pybullet_client.addUserDebugText(str(round(f_mag,2))+" N",dummy_pt,[0.13,0.54,0.13],textSize=2,lifeTime=life_time)
self._pybullet_client.addUserDebugLine(point_of_force,dummy_pt,[0,0,1],3,lifeTime=life_time)
def SetLinkMass(self,link_idx,mass=0):
'''
Function to add extra mass to front and back link of the robot
Args:
link_idx : link index of the robot whose weight to need be modified
mass : value of extra mass to be added
Ret:
new_mass : mass of the link after addition
Note : Presently, this function supports addition of masses in the front and back link only (0, 11)
'''
link_mass = self._pybullet_client.getDynamicsInfo(self.stochlite,link_idx)[0]
if(link_idx==0):
link_mass = mass # mass + 1.1
self._pybullet_client.changeDynamics(self.stochlite, 0, mass=link_mass)
elif(link_idx==11):
link_mass = mass # mass + 1.1
self._pybullet_client.changeDynamics(self.stochlite, 11, mass=link_mass)
return link_mass
def getlinkmass(self,link_idx):
'''
function to retrieve mass of any link
Args:
link_idx : link index of the robot
Ret:
m[0] : mass of the link
'''
m = self._pybullet_client.getDynamicsInfo(self.stochlite,link_idx)
return m[0]
def Set_Randomization(self, default = True, idx1 = 0, idx2=0, idx3=2, idx0=0, idx11=0, idxc=2, idxp=0, deg = 5, ori = 0): # deg = 5, changed for stochlite
'''
This function helps in randomizing the physical and dynamics parameters of the environment to robustify the policy.
These parameters include wedge incline, wedge orientation, friction, mass of links, motor strength and external perturbation force.
Note : If default argument is True, this function set above mentioned parameters in user defined manner
'''
if default:
frc=[0.5,0.6,0.8]
# extra_link_mass=[0,0.05,0.1,0.15]
cli=[5.2,6,7,8]
# pertub_range = [0, -30, 30, -60, 60]
self.pertub_steps = 150
self.x_f = 0
# self.y_f = pertub_range[idxp]
self.incline_deg = deg + 2*idx1
# self.incline_ori = ori + PI/12*idx2
self.new_fric_val =frc[idx3]
self.friction = self.SetFootFriction(self.new_fric_val)
# self.FrontMass = self.SetLinkMass(0,extra_link_mass[idx0])
# self.BackMass = self.SetLinkMass(11,extra_link_mass[idx11])
self.clips = cli[idxc]
else:
avail_deg = [5, 7, 9, 11, 13]
# avail_ori = [-PI/2, PI/2]
# extra_link_mass=[0,.05,0.1,0.15]
# pertub_range = [0, -30, 30, -60, 60]
cli=[5,6,7,8]
self.pertub_steps = 150 #random.randint(90,200) #Keeping fixed for now
self.x_f = 0
# self.y_f = pertub_range[random.randint(0,2)]
self.incline_deg = avail_deg[random.randint(0, 4)]
# self.incline_ori = avail_ori[random.randint(0, 1)] #(PI/12)*random.randint(0, 4) #resolution of 15 degree, changed for stochlite
self.new_fric_val = np.round(np.clip(np.random.normal(0.6,0.08),0.55,0.8),2)
self.friction = self.SetFootFriction(self.new_fric_val)
# i=random.randint(0,3)
# self.FrontMass = self.SetLinkMass(0,extra_link_mass[i])
# i=random.randint(0,3)
# self.BackMass = self.SetLinkMass(11,extra_link_mass[i])
self.clips = np.round(np.clip(np.random.normal(6.5,0.4),5,8),2)
def randomize_only_inclines(self, default=True, idx1=0, idx2=0, deg = 5, ori = 0): # deg = 5, changed for stochlite
'''
This function only randomizes the wedge incline and orientation and is called during training without Domain Randomization
'''
if default:
self.incline_deg = deg + 2 * idx1
# self.incline_ori = ori + PI / 12 * idx2
else:
avail_deg = [5, 7, 9, 11, 13]
# avail_ori = [-PI/2, PI/2]
self.incline_deg = avail_deg[random.randint(0, 4)]
# self.incline_ori = avail_ori[random.randint(0, 1)] #(PI / 12) * random.randint(0, 4) # resolution of 15 degree
def boundYshift(self, x, y):
'''
This function bounds Y shift with respect to current X shift
Args:
x : absolute X-shift
y : Y-Shift
Ret :
y : bounded Y-shift
'''
if x > 0.5619:
if y > 1/(0.5619-1)*(x-1):
y = 1/(0.5619-1)*(x-1)
return y
def getYXshift(self, yx):
'''
This function bounds X and Y shifts in a trapezoidal workspace
'''
y = yx[:4]
x = yx[4:]
for i in range(0,4):
y[i] = self.boundYshift(abs(x[i]), y[i])
y[i] = y[i] * 0.038
x[i] = x[i] * 0.0418
yx = np.concatenate([y,x])
return yx
def transform_action(self, action):
'''
Transform normalized actions to scaled offsets
Args:
action : 15 dimensional 1D array of predicted action values from policy in following order :
[(X-shifts of FL, FR, BL, BR), (Y-shifts of FL, FR, BL, BR),
(Z-shifts of FL, FR, BL, BR), (Augmented cmd_vel Vx, Vx, Wz)]
Ret :
action : scaled action parameters
Note : The convention of Cartesian axes for leg frame in the codebase follows this order, Z points up, X forward and Y right.
'''
action = np.clip(action,-1,1)
# X-Shifts scaled down by 0.1
action[:4] = action[:4] * 0.1
# Y-Shifts scaled down by 0.1 and offset of 0.05m abd outside added to the respective leg, in case of 0 state or 0 policy.
action[4] = action[4] * 0.1 + 0.05
action[5] = action[5] * 0.1 - 0.05
action[6] = action[6] * 0.1 + 0.05
action[7] = action[7] * 0.1 - 0.05
# X-Shifts scaled down by 0.1
action[8:12] = action[8:12] * 0.1
action[12:] = action[12:]
# print('Scaled Action in env', action)
return action
def get_foot_contacts(self):
'''
Retrieve foot contact information with the supporting ground and any special structure (wedge/stairs).
Ret:
foot_contact_info : 8 dimensional binary array, first four values denote contact information of feet [FR, FL, BR, BL] with the ground
while next four with the special structure.
'''
foot_ids = [5,2,11,8]
foot_contact_info = np.zeros(8)
for leg in range(4):
contact_points_with_ground = self._pybullet_client.getContactPoints(self.plane, self.stochlite, -1, foot_ids[leg])
if len(contact_points_with_ground) > 0:
foot_contact_info[leg] = 1
if self._is_wedge:
contact_points_with_wedge = self._pybullet_client.getContactPoints(self.wedge, self.stochlite, -1, foot_ids[leg])
if len(contact_points_with_wedge) > 0:
foot_contact_info[leg+4] = 1
if self._is_stairs:
for steps in self.stairs:
contact_points_with_stairs = self._pybullet_client.getContactPoints(steps, self.stochlite, -1,
foot_ids[leg])
if len(contact_points_with_stairs) > 0:
foot_contact_info[leg + 4] = 1
return foot_contact_info
def step(self, action):
'''
function to perform one step in the environment
Args:
action : array of action values
Ret:
ob : observation after taking step
reward : reward received after taking step
done : whether the step terminates the env
{} : any information of the env (will be added later)
'''
action = self.transform_action(action)
self.do_simulation(action, n_frames = self._frame_skip)
ob = self.GetObservation()
reward, done = self._get_reward()
return ob, reward[12], done,{'rewards': reward}
def CurrentVelocities(self):
'''
Returns robot's linear and angular velocities
Ret:
radial_v : linear velocity
current_w : angular velocity
'''
current_w = self.GetBaseAngularVelocity()[2]
current_v = self.GetBaseLinearVelocity()
radial_v = math.sqrt(current_v[0]**2 + current_v[1]**2)
return radial_v, current_w
def do_simulation(self, action, n_frames):
'''
Converts action parameters to corresponding motor commands with the help of a elliptical trajectory controller
'''
self.action = action
prev_motor_angles = self.GetMotorAngles()
ii = 0
leg_m_angle_cmd = self._trajgen.generate_trajectory(action, prev_motor_angles, self.dt)
m_angle_cmd_ext = np.array(leg_m_angle_cmd)
m_vel_cmd_ext = np.zeros(12)
force_visualizing_counter = 0
for _ in range(n_frames):
ii = ii + 1
applied_motor_torque = self._apply_pd_control(m_angle_cmd_ext, m_vel_cmd_ext)
self._pybullet_client.stepSimulation()
if self._n_steps >=self.pertub_steps and self._n_steps <= self.pertub_steps + self.stride:
force_visualizing_counter += 1
if(force_visualizing_counter%7==0):
self.apply_Ext_Force(self.x_f,self.y_f,visulaize=True,life_time=0.1)
else:
self.apply_Ext_Force(self.x_f,self.y_f,visulaize=False)
contact_info = self.get_foot_contacts()
pos, ori = self.GetBasePosAndOrientation()
# Camera follows robot in the debug visualizer
self._pybullet_client.resetDebugVisualizerCamera(self._cam_dist, 10, -10, pos)
Rot_Mat = self._pybullet_client.getMatrixFromQuaternion(ori)
Rot_Mat = np.array(Rot_Mat)
Rot_Mat = np.reshape(Rot_Mat,(3,3))
plane_normal, self.support_plane_estimated_roll, self.support_plane_estimated_pitch = normal_estimator.vector_method_Stochlite(self.prev_incline_vec, contact_info, self.GetMotorAngles(), Rot_Mat)
self.prev_incline_vec = plane_normal
motor_torque = self._apply_pd_control(m_angle_cmd_ext, m_vel_cmd_ext)
motor_vel = self.GetMotorVelocities()
self.total_power = self.power_consumed(motor_torque, motor_vel)
# print('power per step', self.total_power)
# Data Logging
# log_dir = os.getcwd()
# self.logger.log_kv("Robot_roll", pos[0])
# self.logger.log_kv("Robot_pitch", pos[1])
# self.logger.log_kv("SP_roll", self.support_plane_estimated_roll)
# self.logger.log_kv("SP_pitch", self.support_plane_estimated_pitch)
# self.logger.save_log(log_dir + '/experiments/logs_sensors')
# print("estimate", self.support_plane_estimated_roll, self.support_plane_estimated_pitch)
# print("incline", self.incline_deg)
self._n_steps += 1
def render(self, mode="rgb_array", close=False):
if mode != "rgb_array":
return np.array([])
base_pos, _ = self.GetBasePosAndOrientation()
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH)/RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
rgb_array = | np.array(px) | numpy.array |
from _pyquat import *
import math
import numpy as np
from scipy import linalg
import warnings
QUAT_SMALL = 1e-8
def fromstring(*args, **kwargs):
"""
Shortcut for pyquat.Quat.from_vector(numpy.fromstring()). If you
don't provide a 'sep' argument, this method will supply the
argument count=4 to numpy.fromstring() regardless of what you
provided for it.
"""
if 'sep' in kwargs and kwargs['sep'] == '':
kwargs['count'] = 4
return Quat(*(np.fromstring(*args, **kwargs)))
def qdot(q, w, big_w = None):
"""
Compute dq/dt given some angular velocity w and initial quaternion q.
"""
if big_w is None:
big_w = big_omega(w)
if isinstance(q, Quat):
return np.dot(big_w * 0.5, q.to_vector())
else:
return np.dot(big_w * 0.5, q)
def wdot(w, J, J_inv = None):
"""
Compute dw/dt given some angular velocity w and moment of inertia J.
"""
if J_inv is None:
J_inv = linalg.inv(J)
return np.dot(J_inv, np.dot(skew(np.dot(J, w)), w))
def state_transition_matrix(w, big_w = None):
"""
Generate a state transition matrix for a quaternion based on some
angular velocity w.
"""
if big_w is None:
big_w = big_omega(w)
return big_w * 0.5
def change(*args, **kwargs):
warnings.warn("deprecated", DeprecationWarning)
return propagate(*args, **kwargs)
def propagate(q, w, dt):
"""
Change a quaternion q by some angular velocity w over some small
timestep dt.
"""
# Find magnitude of angular velocity (in r/s)
w_norm = linalg.norm(w)
if w_norm < QUAT_SMALL:
return q.copy()
return Quat(*(np.dot(expm(w, dt), q.to_vector())))
def matrix_propagate(T, w, dt, r = 1):
"""Propagate an attitude matrix T forward by some angular velocity w
over time step dt. This method uses a Taylor expansion of degree r
where r is between 1 and 4 inclusive.
Note that there's minimal computational difference between orders
2 and 3, as each involves a second 3x3 matrix multiplication. The
step to 4th order is also quite small, involving only an
additional vector dot product. In most cases, you will probably
want to use r = 1 or r = 4.
Args:
T: transformation matrix (3x3)
w: angular velocity vector (length 3)
dt: time step size
r: Taylor expansion degree (between 1 and 4 inclusive)
Returns:
A 3x3 matrix giving the updated transformation.
"""
wt = w*dt
wtx = skew(wt)
exp = np.identity(3) + wtx
if r >= 2:
wtx2 = wtx.dot(wtx)
if r == 2:
exp += wtx2 * 0.5
elif r >= 3:
if r == 3:
exp += wtx2 * (0.5 - wt / 6.0)
elif r == 4:
wt2 = wt.T.dot(wt)
exp += wtx2 * (0.5 - wt / 6.0 - wt2 / 24.0)
else:
raise(NotImplemented, "degree must be between 1 and 4 inclusive")
else:
raise(NotImplemented, "degree must be between 1 and 4 inclusive")
return exp.T.dot(T)
def propagate_additively(q, w, dt):
"""Change a quaternion q by some angular velocity w over some small
timestep dt, using additive propagation (q1 = q0 + dq/dt * dt)"""
q_vector = q.to_vector()
q_vector += qdot(q_vector, w) * dt
return Quat(*q_vector)
def cov(ary):
"""Compute the covariance of an array of quaternions, where each
column represents a quaternion.
"""
# If the user supplies an array of N quaternions, convert it to a 4xN array,
# since we need it in this form to get its covariance.
if ary.dtype == np.dtype(Quat):
a = np.empty((4, max(ary.shape)), dtype=np.double)
q_ary = ary.T
for i, q in enumerate(q_ary.flatten()):
a[:,i] = q.to_vector()[:,0]
ary = a
# Compute the covariance of the supplied quaternions.
return np.cov(ary)
def mean(ary, covariance = None):
"""
Compute the average quaternion using Markey, Cheng, Craissidis, and Oshman (2007)
This method takes a 4xN array and computes the average using eigenvalue decomposition.
"""
if covariance == None:
covariance = cov(ary)
# Compute their eigenvalues and eigenvectors
eigenvalues, eigenvectors = linalg.eig(covariance)
max_index = np.argmax(eigenvalues)
q = eigenvectors[max_index]
mean = Quat(q[0], q[1], q[2], q[3])
mean.normalize()
return mean
def mean_and_cov(ary):
c = cov(ary)
m = mean(ary, covariance=c)
return (m,c)
def angle_vector_cov(ary):
"""
Compute the covariance of an array of quaternions, like cov(), except use the attitude vector
representation of each.
"""
if ary.dtype == | np.dtype(Quat) | numpy.dtype |
# !/usr/bin/python
# -*- coding: latin-1 -*-
# WAVELET Torrence and Combo translate from Matlab to Python
# author: <NAME>
# INPE
# 23/01/2013
# https://github.com/mabelcalim/waipy/blob/master/Waipy%20Examples%20/waipy_pr%C3%AAt-%C3%A0-porter.ipynb
"Baseado : Torrence e Combo"
# data from http://paos.colorado.edu/research/wavelets/software.html
import numpy as np
import pylab
from pylab import *
import matplotlib.pyplot as plt
from pylab import detrend_mean
import math
""" Translating mfiles of the Torrence and Combo to python functions
1 - wavetest.m
2 - wave_bases.m
3 - wave_signif.m
4 - chisquare_inv.m
5 - chisquare_solve.m
"""
def nextpow2(i):
n = 2
while n < i:
n = n * 2
return n
def wave_bases(mother, k, scale, param):
"""Computes the wavelet function as a function of Fourier frequency
used for the CWT in Fourier space (Torrence and Compo, 1998)
-- This def is called automatically by def wavelet --
_____________________________________________________________________
Inputs:
mother - a string equal to 'Morlet'
k - a vectorm the Fourier frequecies
scale - a number, the wavelet scale
param - the nondimensional parameter for the wavelet function
Outputs:
daughter - a vector, the wavelet function
fourier_factor - the ratio os Fourier period to scale
coi - a number, the cone-of-influence size at the scale
dofmin - a number, degrees of freedom for each point in the
wavelet power (Morlet = 2)
Call function:
daughter,fourier_factor,coi,dofmin = wave_bases(mother,k,scale,param)
_____________________________________________________________________
"""
n = len(k) # length of Fourier frequencies (came from wavelet.py)
"""CAUTION : default values"""
if (mother == 'Morlet'): # choose the wavelet function
param = 6 # For Morlet this is k0 (wavenumber) default is 6
k0 = param
# table 1 Torrence and Compo (1998)
expnt = -pow(scale * k - k0, 2) / 2 * (k > 0)
norm = math.sqrt(scale * k[1]) * \
(pow(math.pi, -0.25)) * math.sqrt(len(k))
daughter = [] # define daughter as a list
for ex in expnt: # for each value scale (equal to next pow of 2)
daughter.append(norm * math.exp(ex))
k = np.array(k) # turn k to array
daughter = np.array(daughter) # transform in array
daughter = daughter * (k > 0) # Heaviside step function
# scale --> Fourier
fourier_factor = (4 * math.pi) / (k0 + math.sqrt(2 + k0 * k0))
# cone-of- influence
coi = fourier_factor / math.sqrt(2)
dofmin = 2 # degrees of freedom
# ---------------------------------------------------------#
elif (mother == 'DOG'):
param = 2
m = param
expnt = -pow(scale * k, 2) / 2.0
pws = (pow(scale * k, m))
pws = np.array(pws)
"""CAUTION gamma(m+0.5) = 1.3293"""
norm = math.sqrt(scale * k[1] / 1.3293) * math.sqrt(n)
daughter = []
for ex in expnt:
daughter.append(-norm * pow(1j, m) * math.exp(ex))
daughter = np.array(daughter)
daughter = daughter[:] * pws
fourier_factor = (2 * math.pi) / math.sqrt(m + 0.5)
coi = fourier_factor / math.sqrt(2)
dofmin = 1
# ---------------------------------------------------------#
elif (mother == 'PAUL'): # Paul Wavelet
param = 4
m = param
k = np.array(k)
expnt = -(scale * k) * (k > 0)
norm = math.sqrt(scale * k[1]) * \
(2 ** m / math.sqrt(m * \
(math.factorial(2 * m - 1)))) * math.sqrt(n)
pws = (pow(scale * k, m))
pws = np.array(pws)
daughter = []
for ex in expnt:
daughter.append(norm * math.exp(ex))
daughter = np.array(daughter)
daughter = daughter[:] * pws
daughter = daughter * (k > 0) # Heaviside step function
fourier_factor = 4 * math.pi / (2 * m + 1)
coi = fourier_factor * math.sqrt(2)
dofmin = 2
else:
print ('Mother must be one of MORLET,PAUL,DOG')
return daughter, fourier_factor, coi, dofmin
def wavelet(Y, dt, param, dj, s0, j1, mother):
"""Computes the wavelet continuous transform of the vector Y,
by definition:
W(a,b) = sum(f(t)*psi[a,b](t) dt) a dilate/contract
psi[a,b](t) = 1/sqrt(a) psi(t-b/a) b displace
Only Morlet wavelet (k0=6) is used
The wavelet basis is normalized to have total energy = 1 at all scales
_____________________________________________________________________
Input:
Y - time series
dt - sampling rate
mother - the mother wavelet function
param - the mother wavelet parameter
Output:
ondaleta - wavelet bases at scale 10 dt
wave - wavelet transform of Y
period - the vector of "Fourier"periods ( in time units) that correspond
to the scales
scale - the vector of scale indices, given by S0*2(j*DJ), j =0 ...J1
coi - cone of influence
Call function:
ondaleta, wave, period, scale, coi = wavelet(Y,dt,mother,param)
_____________________________________________________________________
"""
n1 = len(Y) # time series length
#s0 = 2 * dt # smallest scale of the wavelet
# dj = 0.25 # spacing between discrete scales
# J1 = int(np.floor((np.log10(n1*dt/s0))/np.log10(2)/dj))
J1 = int(np.floor(np.log2(n1 * dt / s0) / dj)) # J1+1 total os scales
# print 'Nr of Scales:', J1
# J1= 60
# pad if necessary
x = detrend_mean(Y) # extract the mean of time series
pad = 1
if (pad == 1):
base2 = nextpow2(n1) # call det nextpow2
n = base2
"""CAUTION"""
# construct wavenumber array used in transform
# simetric eqn 5
#k = np.arange(n / 2)
import math
k_pos, k_neg = [], []
for i in arange(0, int(n / 2) ):
k_pos.append(i * ((2 * math.pi) / (n * dt))) # frequencies as in eqn5
k_neg = k_pos[::-1] # inversion vector
k_neg = [e * (-1) for e in k_neg] # negative part
# delete the first value of k_neg = last value of k_pos
#k_neg = k_neg[1:-1]
print(len(k_neg),len(k_pos))
k = np.concatenate((k_pos, k_neg), axis=0) # vector of symmetric
# compute fft of the padded time series
f = np.fft.fft(x, n)
scale = []
for i in range(J1 + 1):
scale.append(s0 * pow(2, (i) * dj))
period = scale
# print period
wave = np.zeros((J1 + 1, n)) # define wavelet array
wave = wave + 1j * wave # make it complex
# loop through scales and compute transform
for a1 in range(J1 + 1):
daughter, fourier_factor, coi, dofmin = wave_bases(
mother, k, scale[a1], param) # call wave_bases
wave[a1, :] = np.fft.ifft(f * daughter) # wavelet transform
if a1 == 11:
ondaleta = daughter
# ondaleta = daughter
period = np.array(period)
period = period[:] * fourier_factor
# cone-of-influence, differ for uneven len of timeseries:
if (((n1) / 2.0).is_integer()) is True:
# create mirrored array)
mat = np.concatenate(
(arange(1, int(n1 / 2)), arange(1, int(n1 / 2))[::-1]), axis=0)
# insert zero at the begining of the array
mat = np.insert(mat, 0, 0)
mat = np.append(mat, 0) # insert zero at the end of the array
elif (((n1) / 2.0).is_integer()) is False:
# create mirrored array
mat = np.concatenate(
(arange(1, int(n1 / 2) + 1), arange(1, int(n1 / 2))[::-1]), axis=0)
# insert zero at the begining of the array
mat = | np.insert(mat, 0, 0) | numpy.insert |
from __future__ import print_function, division, absolute_import
import warnings
import sys
import itertools
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import keypoints_equal, reseed
class Test_blur_gaussian_(unittest.TestCase):
def setUp(self):
reseed()
def test_integration(self):
backends = ["auto", "scipy", "cv2"]
nb_channels_lst = [None, 1, 3, 4, 5, 10]
gen = itertools.product(backends, nb_channels_lst)
for backend, nb_channels in gen:
with self.subTest(backend=backend, nb_channels=nb_channels):
image = np.zeros((5, 5), dtype=np.uint8)
if nb_channels is not None:
image = np.tile(image[..., np.newaxis], (1, 1, nb_channels))
image[2, 2] = 255
mask = image < 255
observed = iaa.blur_gaussian_(
np.copy(image), sigma=5.0, backend=backend)
assert observed.shape == image.shape
assert observed.dtype.name == "uint8"
assert np.all(observed[2, 2] < 255)
assert np.sum(observed[mask]) > (5*5-1)
if nb_channels is not None and nb_channels > 1:
for c in sm.xrange(1, observed.shape[2]):
assert np.array_equal(observed[..., c],
observed[..., 0])
def test_sigma_zero(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4, 1))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4*3).astype(np.uint8).reshape((4, 4, 3))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
def test_eps(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed_no_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=0)
observed_with_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=1e10)
assert not np.array_equal(observed_no_eps, observed_with_eps)
assert np.array_equal(observed_with_eps, image)
def test_ksize(self):
def side_effect(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
sigmas = [5.0, 5.0]
ksizes = [None, 3]
ksizes_expected = [2.6*5.0, 3]
gen = zip(sigmas, ksizes, ksizes_expected)
for (sigma, ksize, ksize_expected) in gen:
with self.subTest(sigma=sigma, ksize=ksize):
mock_GaussianBlur = mock.Mock(side_effect=side_effect)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
observed = iaa.blur_gaussian_(
np.copy(image),
sigma=sigma,
ksize=ksize,
backend="cv2")
assert np.array_equal(observed, image+1)
cargs = mock_GaussianBlur.call_args
assert mock_GaussianBlur.call_count == 1
assert np.array_equal(cargs[0][0], image)
assert isinstance(cargs[0][1], tuple)
assert np.allclose(
np.float32(cargs[0][1]),
np.float32([ksize_expected, ksize_expected]))
assert np.isclose(cargs[1]["sigmaX"], sigma)
assert np.isclose(cargs[1]["sigmaY"], sigma)
assert cargs[1]["borderType"] == cv2.BORDER_REFLECT_101
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_backends_called(self):
def side_effect_cv2(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
def side_effect_scipy(image, sigma, mode):
return image + 1
mock_GaussianBlur = mock.Mock(side_effect=side_effect_cv2)
mock_gaussian_filter = mock.Mock(side_effect=side_effect_scipy)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="cv2")
assert mock_GaussianBlur.call_count == 1
with mock.patch('scipy.ndimage.gaussian_filter', mock_gaussian_filter):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="scipy")
assert mock_gaussian_filter.call_count == 1
def test_backends_similar(self):
with self.subTest(nb_channels=None):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
with self.subTest(nb_channels=3):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image = np.tile(image[..., np.newaxis], (1, 1, 3))
image[1] += 1
image[2] += 2
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
for c in sm.xrange(3):
diff = np.abs(image_cv2[..., c].astype(np.int32)
- image_scipy[..., c].astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
def test_warnings(self):
# note that self.assertWarningRegex does not exist in python 2.7
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = iaa.blur_gaussian_(
np.zeros((1, 1), dtype=np.uint32),
sigma=3.0,
ksize=11,
backend="scipy")
assert len(caught_warnings) == 1
assert (
"but also provided 'ksize' argument"
in str(caught_warnings[-1].message))
def test_other_dtypes_sigma_0(self):
dtypes_to_test_list = [
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"],
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == image)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.all(image_aug == image)
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
for dtype in float_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.allclose(image_aug, image)
def test_other_dtypes_sigma_075(self):
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
dtypes_to_test_list = [
# scipy
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
# cv2
["bool",
"uint8", "uint16",
"int8", "int16", "int32",
"float16", "float32", "float64"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((5, 5), dtype=bool)
image[2, 2] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.75, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == (mask > 0.5))
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if dtype.itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
values = [5000, 1000**1, 1000**2, 1000**3]
for dtype, value in zip(float_dts, values):
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1,
# 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
max_diff = (
np.dtype(dtype).itemsize
* 0.01
* np.float128(value))
assert np.max(diff) < max_diff
def test_other_dtypes_bool_at_sigma_06(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
for backend in ["scipy", "cv2"]:
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.6, backend=backend)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
class Test_blur_mean_shift_(unittest.TestCase):
@property
def image(self):
image = [
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203]
]
image = np.array(image, dtype=np.uint8).reshape((4, 2*4, 1))
image = np.tile(image, (1, 1, 3))
return image
def test_simple_image(self):
image = self.image
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
assert 0 <= np.average(image[:, 0:4, :]) <= 5
assert 199 <= np.average(image[:, 4:, :]) <= 203
def test_hw_image(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_hw1_image(self):
image = self.image[:, :, 0:1]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.ndim == 3
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_non_contiguous_image(self):
image = self.image
image_cp = np.copy(np.fliplr(image))
image = np.fliplr(image)
assert image.flags["C_CONTIGUOUS"] is False
image_blurred = iaa.blur_mean_shift_(image, 0.5, 0.5)
assert image_blurred.shape == image_cp.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image_cp)
def test_both_parameters_are_zero(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0, 0)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_mean_shift_(np.copy(image), 1.0, 1.0)
assert image_aug.shape == image.shape
class TestGaussianBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_sigma_is_zero(self):
# no blur, shouldnt change anything
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_low_sigma(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
def test_keypoints_dont_change(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(3, 3, 1))]
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
def test_sigma_is_tuple(self):
# varying blur sigmas
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
def test_other_dtypes_bool_at_sigma_0(self):
# bool
aug = iaa.GaussianBlur(sigma=0)
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
def test_other_dtypes_uint_int_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
def test_other_dtypes_float_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.float16, np.float32, np.float64]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
def test_other_dtypes_bool_at_sigma_060(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=0.6)
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
image_aug = aug.augment_image(image)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
def test_other_dtypes_at_sigma_1(self):
# --
# blur of various dtypes at sigma=1.0
# and using an example value of 100 for int/uint/float and True for
# bool
# --
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[2, 2] = 100
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=1.0)
mask = np.float64([
[1, 2, 3, 2, 1],
[2, 5, 9, 5, 2],
[4, 9, 15, 9, 4],
[2, 5, 9, 5, 2],
[1, 2, 3, 2, 1]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
assert np.average(diff) <= 2
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4
assert np.average(diff) < 2.0
def test_other_dtypes_at_sigma_040(self):
# --
# blur of various dtypes at sigma=0.4
# and using an example value of 100 for int/uint/float and True for
# bool
# --
aug = iaa.GaussianBlur(sigma=0.4)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.uint8)
# mask[2, 2] = 100
# kernel = ndimage.gaussian_filter(mask, 0.4, mode="mirror")
mask = np.float64([
[0, 0, 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 3, 83, 3, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4.0
def test_other_dtypes_at_sigma_075(self):
# --
# blur of various dtypes at sigma=0.75
# and values being half-way between center and maximum for each dtype
# The goal of this test is to verify that no major loss of resolution
# happens for large dtypes.
# Such inaccuracies appear for float64 if used.
# --
aug = iaa.GaussianBlur(sigma=0.75)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if np.dtype(dtype).itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes,
# i.e. 8, 16, 32, 64 bit)
max_diff = np.dtype(dtype).itemsize * 0.01 * np.float128(value)
assert np.max(diff) < max_diff
def test_failure_on_invalid_dtypes(self):
# assert failure on invalid dtypes
aug = iaa.GaussianBlur(sigma=1.0)
for dt in [np.float128]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
class TestAverageBlur(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestAverageBlur, self).__init__(*args, **kwargs)
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[5, 5, 0] = 200
base_img[4, 5, 0] = 100
base_img[6, 5, 0] = 100
base_img[5, 4, 0] = 100
base_img[5, 6, 0] = 100
blur3x3 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]
blur4x4 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]
blur5x5 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]
self.base_img = base_img
self.blur3x3 = blur3x3
self.blur4x4 = blur4x4
self.blur5x5 = blur5x5
def setUp(self):
reseed()
def test_kernel_size_0(self):
# no blur, shouldnt change anything
aug = iaa.AverageBlur(k=0)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.base_img)
def test_kernel_size_3(self):
# k=3
aug = iaa.AverageBlur(k=3)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur3x3)
def test_kernel_size_5(self):
# k=5
aug = iaa.AverageBlur(k=5)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur5x5)
def test_kernel_size_is_tuple(self):
# k as (3, 4)
aug = iaa.AverageBlur(k=(3, 4))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@1")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_with_wider_range(self):
# k as (3, 5)
aug = iaa.AverageBlur(k=(3, 5))
nb_iterations = 200
nb_seen = [0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[2] += 1
else:
raise Exception("Unexpected result in AverageBlur@2")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.23 <= p_seen[0] <= 0.43
assert 0.23 <= p_seen[1] <= 0.43
assert 0.23 <= p_seen[2] <= 0.43
def test_kernel_size_is_stochastic_parameter(self):
# k as stochastic parameter
aug = iaa.AverageBlur(k=iap.Choice([3, 5]))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@3")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_of_tuples(self):
# k as ((3, 5), (3, 5))
aug = iaa.AverageBlur(k=((3, 5), (3, 5)))
possible = dict()
for kh in [3, 4, 5]:
for kw in [3, 4, 5]:
key = (kh, kw)
if kh == 0 or kw == 0:
possible[key] = | np.copy(self.base_img) | numpy.copy |
import numpy as np
import pyLDAvis
#from biterm.cbtm import oBTM
from biterm.btm import oBTM
from sklearn.feature_extraction.text import CountVectorizer
from biterm.utility import vec_to_biterms, topic_summuary
if __name__ == "__main__":
texts = open('./data/reuters.titles').read().splitlines()
# vectorize texts
vec = CountVectorizer(stop_words='english')
X = vec.fit_transform(texts).toarray()
# get vocabulary
vocab = np.array(vec.get_feature_names())
# get biterms
biterms = vec_to_biterms(X)
# create btm
btm = oBTM(num_topics=20, V=vocab)
print("\n\n Train Online BTM ..")
for i in range(0, len(biterms), 100): # prozess chunk of 200 texts
biterms_chunk = biterms[i:i + 100]
btm.fit(biterms_chunk, iterations=50)
topics = btm.transform(biterms)
print("\n\n Visualize Topics ..")
vis = pyLDAvis.prepare(btm.phi_wz.T, topics, | np.count_nonzero(X, axis=1) | numpy.count_nonzero |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from functools import partial
from tqdm import tqdm
from utils import build_knns, knns2ordered_nbrs, Timer
"""
paper: https://arxiv.org/pdf/1604.00989.pdf
original code https://github.com/varun-suresh/Clustering
To run `aro`:
1. pip install pyflann
2. 2to3 -w path/site-packages/pyflann/
Refer [No module named 'index'](https://github.com/primetang/pyflann/issues/1) for more details.
For `knn_aro`, we replace the pyflann with more advanced knn searching methods.
"""
__all__ = ['aro', 'knn_aro']
def build_index(dataset, n_neighbors):
"""
Takes a dataset, returns the "n" nearest neighbors
"""
# Initialize FLANN
import pyflann
pyflann.set_distance_type(distance_type='euclidean')
flann = pyflann.FLANN()
params = flann.build_index(dataset, algorithm='kdtree', trees=4)
#print params
nbrs, dists = flann.nn_index(dataset, n_neighbors, checks=params['checks'])
return nbrs, dists
def create_neighbor_lookup(nbrs):
"""
Key is the reference face, values are the neighbors.
"""
nn_lookup = {}
for i in range(nbrs.shape[0]):
nn_lookup[i] = nbrs[i, :]
return nn_lookup
def calculate_symmetric_dist_row(nbrs, nn_lookup, row_no):
"""
This function calculates the symmetric distances for one row in the
matrix.
"""
dist_row = np.zeros([1, nbrs.shape[1]])
f1 = nn_lookup[row_no]
for idx, neighbor in enumerate(f1[1:]):
Oi = idx + 1
co_neighbor = True
try:
row = nn_lookup[neighbor]
Oj = | np.where(row == row_no) | numpy.where |
import os
import json
import numpy as np
import matplotlib.pyplot as plt
import sys
import h5py
vibe_dir = sys.argv[1] # folder containing the json files.
reqd_joints = [38, 43, 44, 45, 46, 47, 48, 40, 37, 33,32,31, 34,35,36, 41, 39, 27,26,25, 28,29,30, 22, 19]
idx = 0
files_order = []
joint_data_final = []
labels_final = []
for num, class_name in sorted(enumerate(os.listdir(vibe_dir))):
for file_name in os.listdir(os.path.join(vibe_dir, class_name)):
print(file_name)
with open (file_name, 'r') as f:
data = json.load(f)
# files_order.append(file_name)
f.close()
person_list = list(data.keys())
if (person_list == []):
print(file_name)
continue
frame_vids = []
for k in (person_list):
frame_vids.append(np.array(data[k]['joints3d']).shape[0])
if (len(person_list) == 1):
frames = np.array(data[person_list[0]]['frame_ids'])
joint_data_1 = np.array(data[person_list[0]]['joints3d'])[:,reqd_joints,:]
joint_data_1 = np.reshape(joint_data_1, (joint_data_1.shape[0],75))
final_data = | np.zeros((300,150)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 11:07:35 2017
@author: gianni
"""
from pythonradex import atomic_transition
from scipy import constants
import pytest
import numpy as np
class TestLevel():
g = 2
E = 3
level = atomic_transition.Level(g=g,E=E,number=1)
def test_LTE_level_pop(self):
T = 50
Z = 3
lte_level_pop = self.level.LTE_level_pop(Z=Z,T=T)
assert lte_level_pop == self.g*np.exp(-self.E/(constants.k*T))/Z
shape = (5,5)
T_array = np.ones(shape)*T
Z_array = np.ones(shape)*Z
lte_level_pop_array = self.level.LTE_level_pop(Z=Z_array,T=T_array)
assert lte_level_pop_array.shape == shape
assert np.all(lte_level_pop==lte_level_pop_array)
class TestLineProfile():
nu0 = 400*constants.giga
width_v = 10*constants.kilo
gauss_line_profile = atomic_transition.GaussianLineProfile(nu0=nu0,width_v=width_v)
square_line_profile = atomic_transition.SquareLineProfile(nu0=nu0,width_v=width_v)
profiles = (gauss_line_profile,square_line_profile)
test_v = np.linspace(-3*width_v,3*width_v,600)
def test_abstract_line_profile(self):
with pytest.raises(NotImplementedError):
atomic_transition.LineProfile(nu0=self.nu0,width_v=self.width_v)
def test_constant_average_over_nu(self):
for profile in self.profiles:
const_array = np.ones_like(profile.nu_array)
const_average = profile.average_over_nu_array(const_array)
assert np.isclose(const_average,1,rtol=1e-2,atol=0)
def test_asymmetric_average_over_nu(self):
for profile in self.profiles:
left_value,right_value = 0,1
asymmetric_array = np.ones_like(profile.nu_array)*left_value
asymmetric_array[:asymmetric_array.size//2] = right_value
asymmetric_average = profile.average_over_nu_array(asymmetric_array)
assert np.isclose(asymmetric_average,np.mean((left_value,right_value)),
rtol=1e-2,atol=0)
def test_square_profile_average_over_nu(self):
np.random.seed(0)
nu_array = self.square_line_profile.nu_array
random_values = np.random.rand(nu_array.size)
profile_window = np.where(self.square_line_profile.phi_nu(nu_array)==0,0,1)
expected_average = np.sum(profile_window*random_values)/np.count_nonzero(profile_window)
average = self.square_line_profile.average_over_nu_array(random_values)
assert np.isclose(expected_average,average,rtol=5e-2,atol=0)
def test_normalisation(self):
for profile in self.profiles:
integrated_line_profile = np.trapz(profile.phi_nu_array,profile.nu_array)
integrated_line_profile_v = np.trapz(profile.phi_v(self.test_v),self.test_v)
for intg_prof in (integrated_line_profile,integrated_line_profile_v):
assert np.isclose(intg_prof,1,rtol=1e-2,atol=0)
def test_profile_shape(self):
square_phi_nu = self.square_line_profile.phi_nu_array
square_phi_v = self.square_line_profile.phi_v(self.test_v)
for square_phi,x_axis,width in zip((square_phi_nu,square_phi_v),
(self.square_line_profile.nu_array,self.test_v),
(self.square_line_profile.width_nu,self.width_v)):
assert square_phi[0] == square_phi[-1] == 0
assert square_phi[square_phi.size//2] > 0
square_indices = np.where(square_phi>0)[0]
square_window_size = x_axis[square_indices[-1]] - x_axis[square_indices[0]]
assert np.isclose(square_window_size,width,rtol=5e-2,atol=0)
gauss_phi_nu = self.gauss_line_profile.phi_nu_array
gauss_phi_v = self.gauss_line_profile.phi_v(self.test_v)
for gauss_phi,x_axis,width in zip((gauss_phi_nu,gauss_phi_v),
(self.square_line_profile.nu_array,self.test_v),
(self.square_line_profile.width_nu,self.width_v)):
assert np.all(np.array((gauss_phi[0],gauss_phi[-1]))
<gauss_phi[gauss_phi.size//2])
max_index = np.argmax(gauss_phi)
half_max_index = np.argmin(np.abs(gauss_phi-np.max(gauss_phi)/2))
assert np.isclose(2*np.abs(x_axis[max_index]-x_axis[half_max_index]),
width,rtol=3e-2,atol=0)
class TestTransition():
up = atomic_transition.Level(g=1,E=1,number=1)
low = atomic_transition.Level(g=1,E=0,number=0)
line_profile_cls = atomic_transition.SquareLineProfile
A21 = 1
radiative_transition = atomic_transition.RadiativeTransition(
up=up,low=low,A21=A21)
width_v = 1*constants.kilo
Tkin_data=np.array((1,2,3,4,5))
test_emission_line = atomic_transition.EmissionLine(
up=up,low=low,A21=A21,
line_profile_cls=line_profile_cls,
width_v=width_v)
def test_radiative_transition_negative_DeltaE(self):
with pytest.raises(AssertionError):
atomic_transition.RadiativeTransition(up=self.low,low=self.up,A21=self.A21)
def test_radiative_transition_wrong_nu0(self):
wrong_nu0 = (self.up.E-self.low.E)/constants.h*1.01
with pytest.raises(AssertionError):
atomic_transition.RadiativeTransition(up=self.up,low=self.low,A21=self.A21,
nu0=wrong_nu0)
atomic_transition.EmissionLine(
up=self.up,low=self.low,A21=1,
line_profile_cls=self.line_profile_cls,
width_v=self.width_v,nu0=wrong_nu0)
def test_emission_line_constructor(self):
assert self.test_emission_line.nu0 == self.test_emission_line.line_profile.nu0
def test_constructor_from_radiative_transition(self):
emission_line = atomic_transition.EmissionLine.from_radiative_transition(
radiative_transition=self.radiative_transition,
line_profile_cls=self.line_profile_cls,
width_v=self.width_v)
assert emission_line.nu0 == self.radiative_transition.nu0
assert emission_line.B12 == self.radiative_transition.B12
with pytest.raises(AssertionError):
wrong_nu0_rad_trans = atomic_transition.RadiativeTransition(
up=self.up,low=self.low,
A21=self.A21)
wrong_nu0_rad_trans.nu0 = wrong_nu0_rad_trans.nu0*1.01
atomic_transition.EmissionLine.from_radiative_transition(
radiative_transition=wrong_nu0_rad_trans,
line_profile_cls=self.line_profile_cls,
width_v=self.width_v)
def test_coll_coeffs(self):
K21_data_sets = [np.array((2,1,4,6,3)),np.array((1,0,0,6,3))]
for K21_data in K21_data_sets:
coll_transition = atomic_transition.CollisionalTransition(
up=self.up,low=self.low,K21_data=K21_data,
Tkin_data=self.Tkin_data)
Tkin_interp = np.array((self.Tkin_data[0],self.Tkin_data[-1]))
coeff = coll_transition.coeffs(Tkin_interp)['K21']
expected_coeff = | np.array((K21_data[0],K21_data[-1])) | numpy.array |
"""
Base NN implementation evaluating train and test performance on a homogeneous dataset
created on May 17, 2019 by <NAME>
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from low_dim.generate_environment import create_simple_classification_dataset
from low_dim.utils.accuracy_measures import compute_specificity, compute_sensitivity
from low_dim.utils.helper_utils import save_performance_results
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(50) # ensures repeatability
np.random.seed(50)
num_schedules = 50
it_1 = [True,False, False]
it_2 = [False, True, False]
it_3 = [False,False,True]
it = [it_3,it_1,it_2]
x_data, y = create_simple_classification_dataset(num_schedules, train=it[0][0], cv=it[0][1])
x = []
for each_ele in x_data:
x.append(each_ele[2:])
x = torch.Tensor(x).reshape(-1, 2)
y = torch.Tensor(y).reshape((-1, 1))
print('Toy problem generated, and data cleaned')
x_data_test, y_test = create_simple_classification_dataset(10, train=it[1][0], cv=it[1][1])
x_test = []
for each_ele in x_data_test:
x_test.append(each_ele[2:])
x_test = torch.Tensor(x_test).reshape(-1, 2)
y_test = torch.Tensor(y_test).reshape((-1, 1))
print('test set generated')
class Classifier_MLP(nn.Module):
def __init__(self, in_dim, hidden_dim, out_dim):
super(Classifier_MLP, self).__init__()
self.h1 = nn.Linear(in_dim, hidden_dim)
self.h2 = nn.Linear(hidden_dim, hidden_dim)
self.out = nn.Linear(hidden_dim, out_dim)
self.out_dim = out_dim
def forward(self, x):
x = F.relu(self.h1(x))
x = F.relu(self.h2(x))
x = F.log_softmax(self.out(x))
return x
input_size = 2 # Just the x dimension
hidden_size = 10 # The number of nodes at the hidden layer
num_classes = 2 # The number of output classes. In this case, from 0 to 1
learning_rate = 1e-3 # The speed of convergence
MLP = Classifier_MLP(in_dim=input_size, hidden_dim=hidden_size, out_dim=num_classes)
optimizer = torch.optim.Adam(MLP.parameters(), lr=learning_rate)
epochs = 100
schedule_starts = np.linspace(0, 20 * (num_schedules - 1), num=num_schedules)
for epoch in range(epochs): # loop over the dataset multiple times
# for batch, (x_train, y_train) in enumerate(train_loader):
for i in range(num_schedules):
chosen_schedule_start = int(np.random.choice(schedule_starts))
for each_t in range(chosen_schedule_start, chosen_schedule_start + 20):
optimizer.zero_grad()
pred = MLP(x[each_t])
loss = F.cross_entropy(pred.reshape(1, 2), y[each_t].long())
loss.backward()
optimizer.step()
learning_rate /= 1.1
test_losses, test_accs = [], []
# for i, (x_test, y_test) in enumerate(test_loader):
for i in range(10):
chosen_schedule_start = int(schedule_starts[i])
for each_t in range(chosen_schedule_start, chosen_schedule_start + 20):
optimizer.zero_grad()
pred = MLP(x_test[each_t])
loss = F.cross_entropy(pred.reshape(1, 2), y_test[each_t].long())
acc = (pred.argmax(dim=-1) == y_test[each_t].item()).to(torch.float32).mean()
test_losses.append(loss.item())
test_accs.append(acc.mean().item())
print('Loss: {}, Accuracy: {}'.format(np.mean(test_losses), | np.mean(test_accs) | numpy.mean |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist.py
# Author: <NAME> <<EMAIL>>
import os
import gzip
import struct
from datetime import datetime
import numpy as np
# from tensorcv.dataflow.base import RNGDataFlow
_RNG_SEED = None
def get_rng(obj=None):
"""
This function is copied from `tensorpack
<https://github.com/ppwwyyxx/tensorpack/blob/master/tensorpack/utils/utils.py>`__.
Get a good RNG seeded with time, pid and the object.
Args:
obj: some object to use to generate random seed.
Returns:
np.random.RandomState: the RNG.
"""
seed = (id(obj) + os.getpid() +
int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295
if _RNG_SEED is not None:
seed = _RNG_SEED
return | np.random.RandomState(seed) | numpy.random.RandomState |
""" vector functions
"""
import numbers
import numpy
import transformations as tf
def unit_norm(xyz):
""" vector normalized to 1
"""
norm = numpy.linalg.norm(xyz)
uxyz = numpy.divide(xyz, norm)
assert numpy.allclose(numpy.linalg.norm(uxyz), 1.)
return uxyz
def unit_direction(xyz1, xyz2):
""" calculate a unit direction vector from `xyz1` to `xyz2`
"""
dxyz12 = numpy.subtract(xyz2, xyz1)
uxyz12 = unit_norm(dxyz12)
return uxyz12
def unit_perpendicular(xyz1, xyz2, orig_xyz=(0., 0., 0.), allow_parallel=True):
""" calculate a unit perpendicular on `xyz1` and `xyz2`
"""
xyz1 = numpy.subtract(xyz1, orig_xyz)
xyz2 = numpy.subtract(xyz2, orig_xyz)
xyz3 = | numpy.cross(xyz1, xyz2) | numpy.cross |
# -*- coding: utf-8 -*-
"""
The :mod:`parsimony.functions.losses` module contains the loss functions used
throughout the package. These represent mathematical functions and should thus
have properties used by the corresponding algorithms. These properties are
defined in :mod:`parsimony.functions.properties`.
Loss functions should be stateless. Loss functions may be shared and copied
and should therefore not hold anything that cannot be recomputed the next time
it is called.
Created on Mon Apr 22 10:54:29 2013
Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>, <NAME>, <NAME> and
<NAME>
@email: <EMAIL>, <EMAIL>
@license: BSD 3-clause.
"""
import numpy as np
try:
from . import properties # Only works when imported as a package.
except (ValueError, SystemError):
import parsimony.functions.properties as properties # Run as a script.
import parsimony.utils as utils
import parsimony.utils.consts as consts
__all__ = ["LinearRegression", "RidgeRegression",
"LogisticRegression", "RidgeLogisticRegression",
"LatentVariableVariance", "LinearFunction",
"LinearSVM", "NonlinearSVM"]
class LinearRegression(properties.CompositeFunction,
properties.Gradient,
properties.LipschitzContinuousGradient,
properties.StepSize):
"""The Linear regression loss function.
Corresponds to the function
f(beta) = (1 / 2n) * ||y - X.beta||²_2.
"""
def __init__(self, X, y, mean=True):
"""
Parameters
----------
X : numpy array (n-by-p)
The regressor matrix.
y : numpy array (n-by-1)
The regressand vector.
k : float
Non-negative float. The ridge parameter.
mean : bool
Whether to compute the squared loss or the mean squared loss.
Default is True, the mean squared loss.
"""
self.X = X
self.y = y
self.mean = bool(mean)
self.reset()
def reset(self):
"""Free any cached computations from previous use of this Function.
From the interface "Function".
"""
self._L = None
def f(self, beta):
"""Function value.
From the interface "Function".
Parameters
----------
beta : numpy array
Regression coefficient vector. The point at which to evaluate the
function.
"""
if self.mean:
d = 2.0 * float(self.X.shape[0])
else:
d = 2.0
f = (1.0 / d) * np.sum((np.dot(self.X, beta) - self.y) ** 2)
return f
def grad(self, beta):
"""Gradient of the function at beta.
From the interface "Gradient".
Parameters
----------
beta : numpy array
The point at which to evaluate the gradient.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.losses import LinearRegression
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> y = np.random.rand(100, 1)
>>> lr = LinearRegression(X=X, y=y)
>>> beta = np.random.rand(150, 1)
>>> np.linalg.norm(lr.grad(beta)
... - lr.approx_grad(beta, eps=1e-4)) < 5e-8
True
"""
grad = np.dot(self.X.T, np.dot(self.X, beta) - self.y)
if self.mean:
grad *= 1.0 / float(self.X.shape[0])
return grad
def L(self, beta=None):
"""Lipschitz constant of the gradient.
From the interface "LipschitzContinuousGradient".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.losses import LinearRegression
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(10, 15)
>>> y = np.random.rand(10, 1)
>>> lr = LinearRegression(X=X, y=y)
>>> L = lr.L()
>>> L_ = lr.approx_L((15, 1), 10000)
>>> L >= L_
True
>>> (L - L_) / L # doctest: +ELLIPSIS
0.14039091...
"""
if self._L is None:
from parsimony.algorithms.nipals import RankOneSVD
# Rough limits for when RankOneSVD is faster than np.linalg.svd.
n, p = self.X.shape
if (max(n, p) > 500 and max(n, p) <= 1000
and float(max(n, p)) / min(n, p) <= 1.3) \
or (max(n, p) > 1000 and max(n, p) <= 5000
and float(max(n, p)) / min(n, p) <= 5.0) \
or (max(n, p) > 5000 and max(n, p) <= 10000
and float(max(n, p)) / min(n, p) <= 15.0) \
or (max(n, p) > 10000 and max(n, p) <= 20000
and float(max(n, p)) / min(n, p) <= 200.0) \
or max(n, p) > 10000:
v = RankOneSVD(max_iter=1000).run(self.X)
us = np.dot(self.X, v)
self._L = np.sum(us ** 2)
else:
s = np.linalg.svd(self.X,
full_matrices=False, compute_uv=False)
self._L = np.max(s) ** 2
if self.mean:
self._L /= float(n)
return self._L
def step(self, beta, index=0, **kwargs):
"""The step size to use in descent methods.
Parameters
----------
beta : numpy array
The point at which to determine the step size.
"""
return 1.0 / self.L(beta)
class RidgeRegression(properties.CompositeFunction,
properties.Gradient,
properties.LipschitzContinuousGradient,
properties.StronglyConvex,
properties.StepSize):
"""The Ridge Regression function, i.e. a representation of
f(x) = (0.5 / n) * ||Xb - y||²_2 + lambda * 0.5 * ||b||²_2,
where ||.||²_2 is the L2 norm.
"""
# TODO: Inherit from LinearRegression and add an L2 constraint instead!
def __init__(self, X, y, k, penalty_start=0, mean=True):
"""
Parameters
----------
X : Numpy array (n-by-p). The regressor matrix.
y : Numpy array (n-by-1). The regressand vector.
k : Non-negative float. The ridge parameter.
penalty_start : Non-negative integer. The number of columns, variables
etc., to except from penalisation. Equivalently, the first
index to be penalised. Default is 0, all columns are included.
mean : Boolean. Whether to compute the squared loss or the mean
squared loss. Default is True, the mean squared loss.
"""
self.X = X
self.y = y
self.k = max(0.0, float(k))
self.penalty_start = max(0, int(penalty_start))
self.mean = bool(mean)
self.reset()
def reset(self):
"""Free any cached computations from previous use of this Function.
From the interface "Function".
"""
self._lambda_max = None
self._lambda_min = None
def f(self, beta):
"""Function value.
From the interface "Function".
Parameters
----------
beta : Numpy array. Regression coefficient vector. The point at which
to evaluate the function.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.mean:
d = 2.0 * float(self.X.shape[0])
else:
d = 2.0
f = (1.0 / d) * np.sum((np.dot(self.X, beta) - self.y) ** 2) \
+ (self.k / 2.0) * np.sum(beta_ ** 2)
return f
def grad(self, beta):
"""Gradient of the function at beta.
From the interface "Gradient".
Parameters
----------
beta : Numpy array. The point at which to evaluate the gradient.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.losses import RidgeRegression
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> y = np.random.rand(100, 1)
>>> rr = RidgeRegression(X=X, y=y, k=3.14159265)
>>> beta = np.random.rand(150, 1)
>>> np.linalg.norm(rr.grad(beta)
... - rr.approx_grad(beta, eps=1e-4)) < 5e-8
True
"""
gradOLS = np.dot((np.dot(self.X, beta) - self.y).T, self.X).T
if self.mean:
gradOLS *= 1.0 / float(self.X.shape[0])
if self.penalty_start > 0:
gradL2 = np.vstack((np.zeros((self.penalty_start, 1)),
self.k * beta[self.penalty_start:, :]))
else:
gradL2 = self.k * beta
grad = gradOLS + gradL2
return grad
def L(self, beta=None):
"""Lipschitz constant of the gradient.
From the interface "LipschitzContinuousGradient".
"""
if self._lambda_max is None:
s = np.linalg.svd(self.X, full_matrices=False, compute_uv=False)
self._lambda_max = np.max(s) ** 2
if len(s) < self.X.shape[1]:
self._lambda_min = 0.0
else:
self._lambda_min = np.min(s) ** 2
if self.mean:
self._lambda_max /= float(self.X.shape[0])
self._lambda_min /= float(self.X.shape[0])
return self._lambda_max + self.k
@utils.deprecated("StronglyConvex.parameter")
def lambda_min(self):
"""Smallest eigenvalue of the corresponding covariance matrix.
From the interface "Eigenvalues".
"""
return self.parameter()
def parameter(self):
"""Returns the strongly convex parameter for the function.
From the interface "StronglyConvex".
"""
if self._lambda_min is None:
self._lambda_max = None
self.L() # Precompute
return self._lambda_min + self.k
def step(self, beta, index=0, **kwargs):
"""The step size to use in descent methods.
Parameters
----------
beta : Numpy array. The point at which to determine the step size.
"""
return 1.0 / self.L()
class LogisticRegression(properties.AtomicFunction,
properties.Gradient,
properties.LipschitzContinuousGradient,
properties.StepSize):
"""The Logistic Regression loss function.
(Re-weighted) Log-likelihood (cross-entropy):
* f(beta) = -Sum wi (yi log(pi) + (1 − yi) log(1 − pi))
= -Sum wi (yi xi' beta − log(1 + e(x_i'beta))),
* grad f(beta) = -Sum wi[ xi (yi - pi)] + k beta,
where pi = p(y=1 | xi, beta) = 1 / (1 + exp(-x_i'beta)) and wi is the
weight for sample i.
See [Hastie 2009, p.: 102, 119 and 161, Bishop 2006 p.: 206] for details.
Parameters
----------
X : Numpy array (n-by-p). The regressor matrix.
y : Numpy array (n-by-1). The regressand vector.
weights: Numpy array (n-by-1). The sample's weights.
mean : Boolean. Whether to compute the squared loss or the mean squared
loss. Default is True, the mean squared loss.
"""
def __init__(self, X, y, weights=None, mean=True):
self.X = X
self.y = y
if weights is None:
# TODO: Make the weights sparse.
# weights = np.eye(self.X.shape[0])
weights = np.ones(y.shape).reshape(y.shape)
# TODO: Allow the weight vector to be a list.
self.weights = weights
self.mean = bool(mean)
self.reset()
def reset(self):
"""Free any cached computations from previous use of this Function.
From the interface "Function".
"""
self._L = None
def f(self, beta):
"""Function value at the point beta.
From the interface "Function".
Parameters
----------
beta : Numpy array. Regression coefficient vector. The point at which
to evaluate the function.
"""
Xbeta = np.dot(self.X, beta)
negloglike = -np.sum(self.weights *
((self.y * Xbeta) - np.log(1 + np.exp(Xbeta))))
if self.mean:
negloglike /= float(self.X.shape[0])
return negloglike
def grad(self, beta):
"""Gradient of the function at beta.
From the interface "Gradient".
Parameters
----------
beta : Numpy array. The point at which to evaluate the gradient.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.losses import LogisticRegression
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> y = np.random.randint(0, 2, (100, 1))
>>> lr = LogisticRegression(X=X, y=y, mean=True)
>>> beta = np.random.rand(150, 1)
>>> np.linalg.norm(lr.grad(beta)
... - lr.approx_grad(beta, eps=1e-4)) < 5e-10
True
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> y = np.random.randint(0, 2, (100, 1))
>>> lr = LogisticRegression(X=X, y=y, mean=False)
>>> beta = np.random.rand(150, 1)
>>> np.linalg.norm(lr.grad(beta)
... - lr.approx_grad(beta, eps=1e-4)) < 5e-8
True
"""
Xbeta = np.dot(self.X, beta)
# pi = 1.0 / (1.0 + np.exp(-Xbeta))
pi = np.reciprocal(1.0 + np.exp(-Xbeta))
grad = -np.dot(self.X.T, self.weights * (self.y - pi))
if self.mean:
grad *= 1.0 / float(self.X.shape[0])
return grad
def L(self, beta=None):
"""Lipschitz constant of the gradient.
Returns the maximum eigenvalue of (1 / 4) * X'WX.
From the interface "LipschitzContinuousGradient".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.losses import LogisticRegression
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(10, 15)
>>> y = np.random.randint(0, 2, (10, 1))
>>> lr = LogisticRegression(X=X, y=y, mean=True)
>>> L = lr.L()
>>> L_ = lr.approx_L((15, 1), 10000)
>>> L >= L_
True
>>> (L - L_) / L # doctest: +ELLIPSIS
0.45110910...
>>> lr = LogisticRegression(X=X, y=y, mean=False)
>>> L = lr.L()
>>> L_ = lr.approx_L((15, 1), 10000)
>>> L >= L_
True
>>> (L - L_) / L # doctest: +ELLIPSIS
0.43030668...
"""
if self._L is None:
# pi(x) * (1 - pi(x)) <= 0.25 = 0.5 * 0.5
PWX = 0.5 * np.sqrt(self.weights) * self.X
# TODO: Use RankOneSVD for speedup!
s = np.linalg.svd(PWX, full_matrices=False, compute_uv=False)
self._L = np.max(s) ** 2 # TODO: CHECK
if self.mean:
self._L /= float(self.X.shape[0])
return self._L
def step(self, beta, index=0, **kwargs):
"""The step size to use in descent methods.
Parameters
----------
beta : Numpy array. The point at which to determine the step size.
"""
return 1.0 / self.L()
class RidgeLogisticRegression(properties.CompositeFunction,
properties.Gradient,
properties.LipschitzContinuousGradient,
properties.StepSize):
"""The Logistic Regression loss function with a squared L2 penalty.
Ridge (re-weighted) log-likelihood (cross-entropy):
* f(beta) = -loglik + k/2 * ||beta||^2_2
= -Sum wi (yi log(pi) + (1 − yi) log(1 − pi)) + k/2*||beta||^2_2
= -Sum wi (yi xi' beta − log(1 + e(xi' beta))) + k/2*||beta||^2_2
* grad f(beta) = -Sum wi[ xi (yi - pi)] + k beta
pi = p(y=1|xi, beta) = 1 / (1 + exp(-xi' beta))
wi: sample i weight
[Hastie 2009, p.: 102, 119 and 161, Bishop 2006 p.: 206]
"""
def __init__(self, X, y, k=0.0, weights=None, penalty_start=0, mean=True):
"""
Parameters
----------
X : Numpy array (n-by-p). The regressor matrix. Training vectors, where
n is the number of samples and p is the number of features.
y : Numpy array (n-by-1). The regressand vector. Target values (class
labels in classification).
k : Non-negative float. The ridge parameter.
weights: Numpy array (n-by-1). The sample's weights.
penalty_start : Non-negative integer. The number of columns, variables
etc., to except from penalisation. Equivalently, the first
index to be penalised. Default is 0, all columns are included.
mean : Boolean. Whether to compute the mean loss or not. Default is
True, the mean loss is computed.
"""
self.X = X
self.y = y
self.k = max(0.0, float(k))
if weights is None:
weights = np.ones(y.shape) # .reshape(y.shape)
self.weights = weights
self.penalty_start = max(0, int(penalty_start))
self.mean = bool(mean)
self.reset()
def reset(self):
"""Free any cached computations from previous use of this Function.
From the interface "Function".
"""
self._L = None
def f(self, beta):
"""Function value of Logistic regression at beta.
Parameters
----------
beta : Numpy array. Regression coefficient vector. The point at which
to evaluate the function.
"""
# TODO check the correctness of the re-weighted loglike
Xbeta = np.dot(self.X, beta)
negloglike = -np.sum(self.weights *
((self.y * Xbeta) - np.log(1 + np.exp(Xbeta))))
if self.mean:
negloglike *= 1.0 / float(self.X.shape[0])
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return negloglike + (self.k / 2.0) * np.sum(beta_ ** 2)
def grad(self, beta):
"""Gradient of the function at beta.
From the interface "Gradient".
Parameters
----------
beta : Numpy array. The point at which to evaluate the gradient.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.losses import RidgeLogisticRegression
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> y = np.random.rand(100, 1)
>>> y[y < 0.5] = 0.0
>>> y[y >= 0.5] = 1.0
>>> rr = RidgeLogisticRegression(X=X, y=y, k=2.71828182, mean=True)
>>> beta = np.random.rand(150, 1)
>>> round(np.linalg.norm(rr.grad(beta)
... - rr.approx_grad(beta, eps=1e-4)), 11) < 1e-9
True
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> y = np.random.rand(100, 1)
>>> y[y < 0.5] = 0.0
>>> y[y >= 0.5] = 1.0
>>> rr = RidgeLogisticRegression(X=X, y=y, k=2.71828182, mean=False)
>>> beta = np.random.rand(150, 1)
>>> np.linalg.norm(rr.grad(beta)
... - rr.approx_grad(beta, eps=1e-4)) < 5e-8
True
"""
Xbeta = np.dot(self.X, beta)
# pi = 1.0 / (1.0 + np.exp(-Xbeta))
pi = np.reciprocal(1.0 + np.exp(-Xbeta))
grad = -np.dot(self.X.T, self.weights * (self.y - pi))
if self.mean:
grad *= 1.0 / float(self.X.shape[0])
if self.penalty_start > 0:
gradL2 = np.vstack((np.zeros((self.penalty_start, 1)),
self.k * beta[self.penalty_start:, :]))
else:
gradL2 = self.k * beta
grad = grad + gradL2
return grad
# return -np.dot(self.X.T,
# np.dot(self.W, (self.y - pi))) \
# + self.k * beta
def L(self, beta=None):
"""Lipschitz constant of the gradient.
Returns the maximum eigenvalue of (1 / 4) * X'WX.
From the interface "LipschitzContinuousGradient".
"""
if self._L is None:
# pi(x) * (1 - pi(x)) <= 0.25 = 0.5 * 0.5
PWX = 0.5 * np.sqrt(self.weights) * self.X # TODO: CHECK WITH FOUAD
# PW = 0.5 * np.eye(self.X.shape[0]) ## miss np.sqrt(self.W)
# PW = 0.5 * np.sqrt(self.W)
# PWX = np.dot(PW, self.X)
# TODO: Use RankOneSVD for speedup!
s = np.linalg.svd(PWX, full_matrices=False, compute_uv=False)
self._L = np.max(s) ** 2 # TODO: CHECK
if self.mean:
self._L /= float(self.X.shape[0])
self._L += self.k # TODO: CHECK
return self._L
def step(self, beta, index=0, **kwargs):
"""The step size to use in descent methods.
Parameters
----------
beta : Numpy array. The point at which to determine the step size.
"""
return 1.0 / self.L()
class LatentVariableVariance(properties.Function,
properties.Gradient,
properties.StepSize,
properties.LipschitzContinuousGradient):
# TODO: Handle mean here?
def __init__(self, X, unbiased=True):
self.X = X
if unbiased:
self._n = float(X.shape[0] - 1.0)
else:
self._n = float(X.shape[0])
self.reset()
def reset(self):
self._lambda_max = None
def f(self, w):
"""Function value.
From the interface "Function".
Examples
--------
>>> import numpy as np
>>> from parsimony.algorithms.nipals import RankOneSVD
>>> from parsimony.functions.losses import LatentVariableVariance
>>>
>>> np.random.seed(1337)
>>> X = np.random.rand(50, 150)
>>> w = np.random.rand(150, 1)
>>> var = LatentVariableVariance(X)
>>> round(var.f(w), 12)
-1295.854475188615
>>> round(-np.dot(w.T, np.dot(X.T, np.dot(X, w)))[0, 0] / 49.0, 12)
-1295.854475188615
"""
Xw = np.dot(self.X, w)
wXXw = | np.dot(Xw.T, Xw) | numpy.dot |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
from urllib.parse import urlencode
from io import BytesIO
from astropy.utils.data import download_file
from astropy import units as u
from astropy.io import fits
from astropy.coordinates import ICRS, Galactic, BaseCoordinateFrame
from astropy.coordinates import SkyCoord, Angle, Longitude, Latitude
from astropy import wcs
import cdshealpix
try:
from astropy_healpix import HEALPix
except ImportError:
pass
from ..abstract_moc import AbstractMOC
from ..interval_set import IntervalSet
from .. import mocpy
from .boundaries import Boundaries
from .plot import fill, border
__author__ = "<NAME>, <NAME>, <NAME>"
__copyright__ = "CDS, Centre de Données astronomiques de Strasbourg"
__license__ = "BSD 3-Clause License"
__email__ = "<EMAIL>, <EMAIL>, <EMAIL>"
class MOC(AbstractMOC):
"""
Multi-order spatial coverage class.
A MOC describes the coverage of an arbitrary region on the unit sphere.
MOCs are usually used for describing the global coverage of catalog/image surveys such as GALEX or SDSS.
A MOC corresponds to a list of `HEALPix <https://healpix.sourceforge.io/>`__ cells at different depths.
This class gives you the possibility to:
1. Define `~mocpy.moc.MOC` objects:
- From a FITS file that stores HEALPix cells (see `load(path, 'fits')`).
- Directly from a list of HEALPix cells expressed either as a numpy structural array (see `from_healpix_cells`) or a simple
python dictionnary (see `from_json`).
- From a list of sky coordinates (see `from_skycoords`, `from_lonlat`).
- From a convex/concave polygon (see `from_polygon`).
- From a cone (will be implemented in a next version).
2. Perform fast logical operations between `~mocpy.moc.MOC` objects:
- The `intersection`
- The `union`
- The `difference`
- The `complement`
3. Plot the `~mocpy.moc.MOC` objects:
- Draw the MOC with its HEALPix cells (see `fill`)
- Draw the perimeter of a MOC (see `border`)
4. Get the sky coordinates defining the border(s) of `~mocpy.moc.MOC` objects (see `get_boundaries`).
5. Serialize `~mocpy.moc.MOC` objects to `astropy.io.fits.HDUList` or JSON dictionary and save it to a file.
"""
# I introduced, but do not like, the double `make_consistent` (MOC + IntervalSet)
# but `coverage_merge_time_intervals` is no more genric
# and I can't remove `make_consistent` from `IntervalSet` without changing tests
def __init__(self, interval_set=None, make_consistent=True, min_depth=None):
"""
Moc constructor.
The merging step of the overlapping intervals is done here.
Parameters
----------
intervals : `~numpy.ndarray`
a N x 2 numpy array representing the set of intervals.
make_consistent : bool, optional
True by default. Remove the overlapping intervals that makes
a valid MOC (i.e. can be plot, serialized, manipulated).
"""
super(MOC, self).__init__(interval_set)
if make_consistent:
if min_depth is None:
min_depth = -1
min_depth = np.int8(min_depth)
self._merge_intervals(min_depth)
def _merge_intervals(self, min_depth):
if not self.empty():
self._interval_set._intervals = mocpy.coverage_merge_hpx_intervals(self._interval_set._intervals, min_depth)
@property
def max_order(self):
"""
Depth of the smallest HEALPix cells found in the MOC instance.
"""
depth = mocpy.hpx_coverage_depth(self._interval_set._intervals)
depth = np.uint8(depth)
return depth
def refine_to_order(self, min_depth):
intervals = mocpy.coverage_merge_hpx_intervals(self._interval_set._intervals, min_depth)
interval_set = IntervalSet(intervals, make_consistent=False)
return MOC(interval_set, make_consistent=False)
def complement(self):
"""
Returns the complement of the MOC instance.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
intervals = mocpy.hpx_coverage_complement(self._interval_set._intervals)
interval_set = IntervalSet(intervals, make_consistent=False)
return MOC(interval_set, make_consistent=False)
def extended(self):
"""
Returns the MOC extended by the external border made of cells at the MOC maximum depth.
The only difference with respect to `add_neighbours` is that `extended` returns a new MOC
instead of modifying the existing one.
Returns
-------
moc : `~mocpy.moc.MOC`
The extended MOC
"""
intervals = mocpy.hpx_coverage_expand(self.max_order, self._interval_set._intervals)
interval_set = IntervalSet(intervals, make_consistent=False)
return MOC(interval_set, make_consistent=False)
def contracted(self):
"""
Returns the MOC contracted by removing the internal border made of cells at the MOC maximum depth.
The only difference with respect to `remove_neighbours` is that `contracted` returns a new MOC
instead of modifying the existing one.
Returns
-------
moc : `~mocpy.moc.MOC`
The extended MOC
"""
intervals = mocpy.hpx_coverage_contract(self.max_order, self._interval_set._intervals)
interval_set = IntervalSet(intervals, make_consistent=False)
return MOC(interval_set, make_consistent=False)
def split_count(self):
"""
Returns the number of disjoint MOCs the given MOC contains.
"""
return mocpy.hpx_coverage_split_count(self.max_order, self._interval_set._intervals)
def split(self):
"""
Returns the disjoint MOCs this MOC contains.format
WARNING
-------
Please use `~mocpy.moc.MOC.split_count` first to ensure the number is not too high
"""
list_of_intervals = mocpy.hpx_coverage_split(self.max_order, self._interval_set._intervals)
mocs = map(lambda intervals: MOC(IntervalSet(intervals, make_consistent=False), make_consistent=False), list_of_intervals)
return mocs
def degrade_to_order(self, new_order):
"""
Degrades the MOC instance to a new, less precise, MOC.
The maximum depth (i.e. the depth of the smallest HEALPix cells that can be found in the MOC) of the
degraded MOC is set to ``new_order``.
Parameters
----------
new_order : int
Maximum depth of the output degraded MOC.
Returns
-------
moc : `~mocpy.moc.MOC`
The degraded MOC.
"""
intervals = mocpy.hpx_coverage_degrade(self._interval_set._intervals, new_order)
return MOC(IntervalSet(intervals, make_consistent=False), make_consistent=False)
def contains(self, ra, dec, keep_inside=True):
"""
Returns a boolean mask array of the positions lying inside (or outside) the MOC instance.
Parameters
----------
ra : `astropy.coordinates.Longitude` or its supertype `astropy.units.Quantity`
Right ascension array
dec : `astropy.coordinates.Latitude` or its supertype `astropy.units.Quantity`
Declination array
keep_inside : bool, optional
True by default. If so the mask describes coordinates lying inside the MOC. If ``keep_inside``
is false, contains will return the mask of the coordinates lying outside the MOC.
Returns
-------
array : `~np.ndarray`
A mask boolean array
"""
max_depth = self.max_order
m = np.zeros(3 << (2*(max_depth + 1)), dtype=bool)
pix_id = mocpy.flatten_pixels(self._interval_set._intervals, max_depth)
m[pix_id] = True
if not keep_inside:
m = np.logical_not(m)
ra = ra if isinstance(ra, Longitude) else Longitude(ra)
dec = dec if isinstance(dec, Latitude) else Latitude(dec)
pix = cdshealpix.lonlat_to_healpix(ra, dec, max_depth)
return m[pix]
## TODO: implement: def contains_including_surrounding(self, ra, dec, distance)
def add_neighbours(self):
"""
Extends the MOC instance so that it includes the HEALPix cells touching its border.
The depth of the HEALPix cells added at the border is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self extended by one degree of neighbours.
"""
intervals = mocpy.hpx_coverage_expand(self.max_order, self._interval_set._intervals)
self._interval_set = IntervalSet(intervals, make_consistent=False)
return self
def remove_neighbours(self):
"""
Removes from the MOC instance the HEALPix cells located at its border.
The depth of the HEALPix cells removed is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self minus its HEALPix cells located at its border.
"""
intervals = mocpy.hpx_coverage_contract(self.max_order, self._interval_set._intervals);
self._interval_set = IntervalSet(intervals, make_consistent=False)
return self
def fill(self, ax, wcs, **kw_mpl_pathpatch):
"""
Draws the MOC on a matplotlib axis.
This performs the projection of the cells from the world coordinate system to the pixel image coordinate system.
You are able to specify various styling kwargs for `matplotlib.patches.PathPatch`
(see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__).
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axis.
wcs : `astropy.wcs.WCS`
WCS defining the World system <-> Image system projection.
kw_mpl_pathpatch
Plotting arguments for `matplotlib.patches.PathPatch`.
Examples
--------
>>> from mocpy import MOC, World2ScreenMPL
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.load(filename, 'fits')
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a WCS as a context
>>> with World2ScreenMPL(fig,
... fov=50 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call fill giving the matplotlib axe and the `~astropy.wcs.WCS` object.
... # We will set the matplotlib keyword linewidth to 0 so that it does not plot
... # the border of each HEALPix cell.
... # The color can also be specified along with an alpha value.
... moc.fill(ax=ax, wcs=wcs, linewidth=0, alpha=0.5, fill=True, color="green")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
"""
fill.fill(self, ax, wcs, **kw_mpl_pathpatch)
def border(self, ax, wcs, **kw_mpl_pathpatch):
"""
Draws the MOC border(s) on a matplotlib axis.
This performs the projection of the sky coordinates defining the perimeter of the MOC to the pixel image coordinate system.
You are able to specify various styling kwargs for `matplotlib.patches.PathPatch`
(see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__).
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axis.
wcs : `astropy.wcs.WCS`
WCS defining the World system <-> Image system projection.
kw_mpl_pathpatch
Plotting arguments for `matplotlib.patches.PathPatch`
Examples
--------
>>> from mocpy import MOC, World2ScreenMPL
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.load(filename, 'fits')
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a WCS as a context
>>> with World2ScreenMPL(fig,
... fov=50 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call border giving the matplotlib axe and the `~astropy.wcs.WCS` object.
... moc.border(ax=ax, wcs=wcs, alpha=0.5, color="red")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
"""
border.border(self, ax, wcs, **kw_mpl_pathpatch)
def get_boundaries(self, order=None):
"""
Returns the sky coordinates defining the border(s) of the MOC.
The border(s) are expressed as a list of SkyCoord.
Each SkyCoord refers to the coordinates of one border of the MOC (i.e.
either a border of a connexe MOC part or a border of a hole
located in a connexe MOC part).
This function is currently not stable: encoding a vertice of a
HEALPix cell (N, E, S, W) should not depend on the position of the
vertice but rather on the uniq value (+ 2 bits to encode the direction
of the vertice).
Parameters
----------
order : int
The depth of the MOC before computing its boundaries.
A shallow depth leads to a faster computation.
By default the maximum depth of the MOC is taken.
Raises
------
DeprecationWarning
This method is not stable and not tested! A future more stable algorithm will be implemented!
Returns
-------
coords: [`~astropy.coordinates.SkyCoord`]
A list of `~astropy.coordinates.SkyCoord` each describing one border.
"""
import warnings
warnings.warn('This method is not stable. A future more stable algorithm will be implemented!', DeprecationWarning)
return Boundaries.get(self, order)
@classmethod
def from_fits_image(cls, hdu, max_norder, mask=None):
"""
Creates a `~mocpy.moc.MOC` from an image stored as a FITS file.
Parameters
----------
hdu : HDU object
HDU containing the data of the image
max_norder : int
The moc resolution.
mask : `numpy.ndarray`, optional
A boolean array of the same size of the image where pixels having the value 1 are part of
the final MOC and pixels having the value 0 are not.
Returns
-------
moc : `~mocpy.moc.MOC`
The resulting MOC.
"""
# Only take the first HDU
header = hdu.header
height = header['NAXIS2']
width = header['NAXIS1']
# Compute a WCS from the header of the image
w = wcs.WCS(header)
if mask is None:
data = hdu.data
# A mask is computed discarding nan floating values
mask = np.isfinite(data)
# If the BLANK keyword is set to a value then we mask those
# pixels too
if header.get('BLANK') is not None:
discard_val = header['BLANK']
# We keep the finite values and those who are not equal to the BLANK field
mask = mask & (data != discard_val)
y, x = np.where(mask)
pix = np.dstack((x, y))[0]
world = w.wcs_pix2world(pix, 0)
# Remove coord containing inf/nan values
good = np.isfinite(world)
# It is a good coordinates whether both its coordinate are good
good = good[:, 0] & good[:, 1]
world = world[good]
# Get the frame from the wcs
frame = wcs.utils.wcs_to_celestial_frame(w)
skycrd = SkyCoord(
world,
unit="deg",
frame=frame
)
# Compute the order based on the CDELT
c1 = header['CDELT1']
c2 = header['CDELT2']
max_res_px = np.sqrt(c1*c1 + c2*c2) * np.pi / 180.0
max_depth_px = int(np.floor(np.log2(np.pi / (3 * max_res_px * max_res_px)) / 2))
max_norder = min(max_norder, max_depth_px)
moc = MOC.from_lonlat(
lon=skycrd.icrs.ra,
lat=skycrd.icrs.dec,
max_norder=max_norder
)
return moc
@classmethod
def from_fits_images(cls, path_l, max_norder):
"""
Loads a MOC from a set of FITS file images.
Assumes the data of the image is stored in the first HDU of the FITS file.
Please call `~mocpy.moc.MOC.from_fits_image` for passing another hdu than the first one.
Parameters
----------
path_l : [str]
A list of path where the fits image are located.
max_norder : int
The MOC resolution.
Returns
-------
moc : `~mocpy.moc.MOC`
The union of all the MOCs created from the paths found in ``path_l``.
"""
moc = MOC()
for filename in path_l:
with fits.open(filename) as hdul:
current_moc = MOC.from_fits_image(hdu=hdul[0], max_norder=max_norder)
moc = moc.union(current_moc)
return moc
@classmethod
def from_vizier_table(cls, table_id, nside=256):
"""
Creates a `~mocpy.moc.MOC` object from a VizieR table.
**Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object
from a vizier catalog ID.
Parameters
----------
table_id : str
table index
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
nside_possible_values = (8, 16, 32, 64, 128, 256, 512)
if nside not in nside_possible_values:
raise ValueError('Bad value for nside. Must be in {0}'.format(nside_possible_values))
result = cls.from_ivorn('ivo://CDS/' + table_id, nside)
return result
MOC_SERVER_ROOT_URL = 'http://alasky.unistra.fr/MocServer/query'
@classmethod
def from_ivorn(cls, ivorn, nside=256):
"""
Creates a `~mocpy.moc.MOC` object from a given ivorn.
Parameters
----------
ivorn : str
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
return cls.from_url('%s?%s' % (MOC.MOC_SERVER_ROOT_URL,
urlencode({
'ivorn': ivorn,
'get': 'moc',
'order': int(np.log2(nside))
})))
@classmethod
def from_url(cls, url):
"""
Creates a `~mocpy.moc.MOC` object from a given url.
Parameters
----------
url : str
The url of a FITS file storing a MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
path = download_file(url, show_progress=False, timeout=60)
return cls.load(path, 'fits')
@classmethod
def from_skycoords(cls, skycoords, max_norder):
"""
Creates a MOC from an `astropy.coordinates.SkyCoord`.
Parameters
----------
skycoords : `astropy.coordinates.SkyCoord`
The sky coordinates that will belong to the MOC.
max_norder : int
The depth of the smallest HEALPix cells contained in the MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
return cls.from_lonlat(lon=skycoords.icrs.ra, lat=skycoords.icrs.dec, max_norder=max_norder)
@classmethod
def from_lonlat(cls, lon, lat, max_norder):
"""
Creates a MOC from astropy lon, lat `astropy.units.Quantity`.
Parameters
----------
lon : `astropy.coordinates.Longitude` or its supertype `astropy.units.Quantity`
The longitudes of the sky coordinates belonging to the MOC.
lat : `astropy.coordinates.Latitude` or its supertype `astropy.units.Quantity`
The latitudes of the sky coordinates belonging to the MOC.
max_norder : int
The depth of the smallest HEALPix cells contained in the MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
intervals = mocpy.from_lonlat(max_norder, lon.to_value(u.rad).astype(np.float64), lat.to_value(u.rad).astype(np.float64))
return cls(IntervalSet(intervals, make_consistent=False), make_consistent=False)
@classmethod
def from_multiordermap_fits_file(cls, path,
cumul_from=0.0, cumul_to=1.0,
asc=False, strict=True, no_split=True, reverse_decent=False):
"""
Creates a MOC from a mutli-order map FITS file.
HEALPix cells are first sorted by their values.
The MOC contains the cells from which the cumulative value is between
``cumul_from`` and ``cumul_to``.
Cells being on the fence are recursively splitted and added
until the depth of the cells is equal to ``max_norder``.
For compatibility with Aladin, use ``no_split=False`` and ``reverse_decent=True``
Remark: using ``no_split=False``, the way the cells overlapping with the low and high thresholds are split
is somewhat arbitrary.
Parameters
----------
path : str
The path to the file to save the MOC in.
cumul_from : float
Cumulative value from which cells will be added to the MOC
cumul_to : float
Cumulative value to which cells will be added to the MOC
asc: boolean
the cumulative value is computed from lower to highest densities instead of from highest to lowest
strict: boolean
(sub-)cells overlapping the `cumul_from` or `cumul_to` values are not added
no_split: boolean
cells overlapping the `cumul_from` or `cumul_to` values are not recursively split
reverse_decent: boolean
perform the recursive decent from the highest cell number to the lowest (to be compatible with Aladin)
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
intervals = mocpy.spatial_moc_from_multiordermap_fits_file(
path,
np.float64(cumul_from),
np.float64(cumul_to),
asc,
strict,
no_split,
reverse_decent
)
return cls(IntervalSet(intervals, make_consistent=False), make_consistent=False)
@classmethod
def from_valued_healpix_cells(cls,
uniq, values, max_depth=None,
cumul_from=0.0, cumul_to=1.0,
asc=False, strict=True, no_split=True, reverse_decent=False):
"""
Creates a MOC from a list of uniq associated with values.
HEALPix cells are first sorted by their values.
The MOC contains the cells from which the cumulative value is between
``cumul_from`` and ``cumul_to``.
Cells being on the fence are recursively splitted and added
until the depth of the cells is equal to ``max_norder``.
For compatibility with Aladin, use ``no_split=False`` and ``reverse_decent=True``
Remark: using ``no_split=False``, the way the cells overlapping with the low and high thresholds are split
is somewhat arbitrary.
Parameters
----------
uniq : `numpy.ndarray`
HEALPix cell indices written in uniq. dtype must be np.uint64
values : `numpy.ndarray`
Probabilities associated with each ``uniq`` cells. dtype must be np.float64
max_depth : int, optional
The max depth of the MOC. If a depth is given, degrade the MOC to this depth before returning it to the user.
Otherwise choose as ``max_depth`` the depth corresponding to the smallest HEALPix cell found in ``uniq``.
cumul_from : float
Cumulative value from which cells will be added to the MOC
cumul_to : float
Cumulative value to which cells will be added to the MOC
asc: boolean
the cumulative value is computed from lower to highest densities instead of from highest to lowest
strict: boolean
(sub-)cells overlapping the `cumul_from` or `cumul_to` values are not added
no_split: boolean
cells overlapping the `cumul_from` or `cumul_to` values are not recursively split
reverse_decent: boolean
perform the recursive decent from the highest cell number to the lowest (to be compatible with Aladin)
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
max_depth_tile = 0
if uniq.size > 0:
# Get the depth of the smallest uniq
# Bigger uniq corresponds to big depth HEALPix cells.
max_depth_tile = int(np.log2(uniq.max() >> 2)) >> 1
assert max_depth_tile >= 0 and max_depth_tile <= 29, "Invalid uniq numbers. Too big uniq or negative uniq numbers might the cause."
# Create the MOC at the max_depth equals to the smallest cell
# found in the uniq array
intervals = mocpy.from_valued_hpx_cells(
np.uint8(max_depth_tile),
uniq.astype(np.uint64),
values.astype(np.float64),
np.float64(cumul_from),
np.float64(cumul_to),
asc,
strict,
no_split,
reverse_decent
)
moc = cls(IntervalSet(intervals, make_consistent=False), make_consistent=False)
# Degrade the MOC to the depth requested by the user
if max_depth is not None:
assert max_depth >= 0 and max_depth <= 29, "Max depth must be in [0, 29]"
moc = moc.degrade_to_order(max_depth)
return moc
@classmethod
def from_elliptical_cone(cls, lon, lat, a, b, pa, max_depth, delta_depth=2):
"""
Creates a MOC from an elliptical cone
The ellipse is centered around the (`lon`, `lat`) position. `a` (resp. `b`) corresponds
to the semi-major axis magnitude (resp. semi-minor axis magnitude). `pa` is expressed as a
`~astropy.coordinates.Angle` and defines the position angle of the elliptical cone.
Parameters
----------
lon : `astropy.coordinates.Longitude` or its supertype `astropy.units.Quantity`
The longitude of the center of the elliptical cone.
lat : `astropy.coordinates.Latitude` or its supertype `astropy.units.Quantity`
The latitude of the center of the elliptical cone.
a : `astropy.coordinates.Angle`
The semi-major axis angle of the elliptical cone.
b : `astropy.coordinates.Angle`
The semi-minor axis angle of the elliptical cone.
pa : `astropy.coordinates.Angle`
The position angle (i.e. the angle between the north and the semi-major axis, east-of-north).
max_depth : int
Maximum HEALPix cell resolution.
delta_depth : int, optional
To control the approximation, you can choose to perform the computations at a deeper
depth using the `depth_delta` parameter.
The depth at which the computations will be made will therefore be equal to
`depth` + `depth_delta`.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
Examples
--------
>>> from mocpy import MOC
>>> import astropy.units as u
>>> from astropy.coordinates import Angle, Longitude, Latitude
>>> moc = MOC.from_elliptical_cone(
... lon=Longitude(0 * u.deg),
... lat=Latitude(0 * u.deg),
... a=Angle(10, u.deg),
... b=Angle(5, u.deg),
... pa=Angle(0, u.deg),
... max_depth=10
... )
"""
lon = lon if isinstance(lon, Longitude) else Longitude(lon)
lat = lat if isinstance(lat, Latitude) else Latitude(lat)
pix, depth, fully_covered_flags = cdshealpix.elliptical_cone_search(lon, lat, a, b, pa, max_depth, delta_depth, flat=False)
return MOC.from_healpix_cells(pix, depth, fully_covered_flags)
@classmethod
def from_cone(cls, lon, lat, radius, max_depth, delta_depth=2):
"""
Creates a MOC from a cone.
The cone is centered around the (`lon`, `lat`) position with a radius expressed by
`radius`.
Parameters
----------
lon : `astropy.coordinates.Longitude` or its supertype `astropy.units.Quantity`
The longitude of the center of the cone.
lat : `astropy.coordinates.Latitude` or its supertype `astropy.units.Quantity`
The latitude of the center of the cone.
radius : `astropy.coordinates.Angle`
The radius angle of the cone.
max_depth : int
Maximum HEALPix cell resolution.
delta_depth : int, optional
To control the approximation, you can choose to perform the computations at a deeper
depth using the `depth_delta` parameter.
The depth at which the computations will be made will therefore be equal to
`max_depth` + `depth_delta`.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
Examples
--------
>>> from mocpy import MOC
>>> import astropy.units as u
>>> from astropy.coordinates import Angle, Longitude, Latitude
>>> moc = MOC.from_cone(
... lon=Longitude(0 * u.deg),
... lat=Latitude(0 * u.deg),
... radius=Angle(10, u.deg),
... max_depth=10
... )
"""
lon = lon if isinstance(lon, Longitude) else Longitude(lon)
lat = lat if isinstance(lat, Latitude) else Latitude(lat)
pix, depth, fully_covered_flags = cdshealpix.cone_search(lon, lat, radius, max_depth, delta_depth, flat=False)
return MOC.from_healpix_cells(pix, depth, fully_covered_flags)
@classmethod
def from_polygon_skycoord(cls, skycoord, max_depth=10):
"""
Creates a MOC from a polygon.
The polygon is given as an `astropy.coordinates.SkyCoord` that contains the
vertices of the polygon. Concave, convex and self-intersecting polygons are accepted.
Parameters
----------
skycoord : `astropy.coordinates.SkyCoord`
The sky coordinates defining the vertices of a polygon. It can describe a convex or
concave polygon but not a self-intersecting one.
max_depth : int, optional
The resolution of the MOC. Set to 10 by default.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
return MOC.from_polygon(lon=skycoord.icrs.ra, lat=skycoord.icrs.dec, max_depth=max_depth)
@classmethod
def from_polygon(cls, lon, lat, max_depth=10):
"""
Creates a MOC from a polygon
The polygon is given as lon and lat `astropy.units.Quantity` that define the
vertices of the polygon. Concave, convex and self-intersecting polygons are accepted.
Parameters
----------
lon : `astropy.coordinates.Longitude` or its supertype `astropy.units.Quantity`
The longitudes defining the polygon. Can describe convex and
concave polygons but not self-intersecting ones.
lat : `astropy.coordinates.Latitude` or its supertype `astropy.units.Quantity`
The latitudes defining the polygon. Can describe convex and concave
polygons but not self-intersecting ones.
max_depth : int, optional
The resolution of the MOC. Set to 10 by default.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
lon = lon if isinstance(lon, Longitude) else Longitude(lon)
lat = lat if isinstance(lat, Latitude) else Latitude(lat)
pix, depth, fully_covered_flags = cdshealpix.polygon_search(lon, lat, max_depth)
return MOC.from_healpix_cells(pix, depth, fully_covered_flags)
@classmethod
def from_healpix_cells(cls, ipix, depth, fully_covered=None):
"""
Creates a MOC from a set of HEALPix cells at a given depth.
Parameters
----------
ipix : `numpy.ndarray`
HEALPix cell indices in the NESTED notation. dtype must be np.uint64
depth : `numpy.ndarray`
Depth of the HEALPix cells. Must be of the same size of `ipix`.
dtype must be np.uint8. Corresponds to the `level` of an HEALPix cell in astropy.healpix.
fully_covered : `numpy.ndarray`, optional
HEALPix cells coverage flags. This flag informs whether a cell is
fully covered by a cone (resp. polygon, elliptical cone) or not.
Must be of the same size of `ipix`.
Raises
------
IndexError
When `ipix`, `depth` and `fully_covered` do not have the same shape
Returns
-------
moc : `~mocpy.moc.MOC`
The MOC
"""
if ipix.shape != depth.shape:
raise IndexError("pixels and depth arrays must have the same shape")
if fully_covered is not None and fully_covered.shape != ipix.shape:
raise IndexError("fully covered and depth arrays must have the same shape")
intervals = mocpy.from_healpix_cells(ipix.astype(np.uint64), depth.astype(np.uint8))
return cls(IntervalSet(intervals, make_consistent=False), make_consistent=False)
@staticmethod
def order_to_spatial_resolution(order):
"""
Convert a depth to its equivalent spatial resolution.
Parameters
----------
order : int
Spatial depth.
Returns
-------
spatial_resolution : `~astropy.coordinates.Angle`
Spatial resolution.
"""
spatial_resolution = Angle( | np.sqrt(np.pi/(3 * 4**(order))) | numpy.sqrt |
import numpy as np
import math
import matplotlib.pyplot as plt
from kernel_generalization.utils import gegenbauer
import scipy as sp
import scipy.special
import scipy.optimize
from kernel_generalization.utils import neural_tangent_kernel as ntk
###############################################################
################# Use Only These Functions ####################
###############################################################
def f(phi, L):
if L == 1:
return np.arccos(1 / np.pi * np.sin(phi) + (1 - 1 / np.pi * np.arccos(np.cos(phi))) * np.cos(phi))
elif L == 0:
return np.arccos(np.cos(phi))
else:
return f(phi, L - 1)
def NTK(phi, L):
if L == 1:
ntk = np.cos(f(phi, 1)) + (1 - phi / np.pi) * np.cos(phi)
return ntk
else:
a = phi
for i in range(L - 1):
a = f(a, 1)
ntk = np.cos(f(a, 1)) + NTK(phi, L - 1) * (1 - a / np.pi)
return ntk
def get_gaussian_spectrum(ker_var, dist_var, kmax, dim):
## Sigma is sample variance
## Gamma is kernel variance
sigma = dist_var
gamma = ker_var
a = 1/(4*sigma)
b = 1/(2*gamma)
c = np.sqrt(a**2 + 2*a*b)
A = a+b+c
B = b/A
spectrum = np.array([np.sqrt(2*a/A)**(dim) * B**(k) for k in range(kmax)])
lambda_bar = np.array([B**(k) for k in range(kmax)])
degens = np.array([scipy.special.comb(k+dim-1,dim-1) for k in range(kmax)])
return spectrum, degens, lambda_bar
def get_kernel_spectrum(layers, sig_w, sig_b, kmax, dim, num_pts=10000, IfNTK = True):
alpha = dim / 2.0 - 1
z, w = sp.special.roots_gegenbauer(num_pts, alpha)
Q = gegenbauer.gegenbauer(z, kmax, dim)
degens = np.array([gegenbauer.degeneracy_kernel(dim, k) for k in range(kmax)])
kernel = np.zeros((len(layers), num_pts))
L = max(layers)+1
theta = np.arccos(z)
KernelNTK, KernelNormalizedNTK, ThetaNTK = ntk.NTK(theta, sig_w, sig_b, L, IfNTK);
for i, layer in enumerate(layers):
kernel[i] = KernelNTK[layer]
scaled_kernel = kernel * np.outer(np.ones(len(layers)), w)
normalization = gegenbauer.eigenvalue_normalization(kmax, alpha, degens)
spectrum_scaled = scaled_kernel @ Q.T / normalization
spectrum_scaled = spectrum_scaled * np.heaviside(spectrum_scaled - 1e-20, 0)
spectrum_true = spectrum_scaled / np.outer(len(layers), degens)
for i in range(len(layers)):
for j in range(kmax - 1):
if spectrum_true[i, j + 1] < spectrum_true[i, j] * 1e-5:
spectrum_true[i, j + 1] = 0
return z, spectrum_true, spectrum_scaled, degens, kernel
def exp_spectrum(s, kmax, degens):
## Here s denotes the s^(-l)
spectrum_scaled = np.array([s**(-l) for l in range(1,kmax)])
spectrum_scaled = np.append([1],spectrum_scaled) ## We add the zero-mode
spectrum_true = spectrum_scaled / degens
return spectrum_true, spectrum_scaled
def power_spectrum(s, kmax, degens):
## Here s denotes the l^(-s)
spectrum_scaled = np.array([l**(-s) for l in range(1,kmax)])
spectrum_scaled = np.append([1],spectrum_scaled) ## We add the zero-mode
spectrum_true = spectrum_scaled / degens
return spectrum_true, spectrum_scaled
def white_spectrum(N):
return np.ones(N)/N
###############################################################
################# For Kernel Spectrum From Mathematica ####################
###############################################################
def ntk_spectrum(file, kmax = -1, layer = None, dim = None, return_NTK = False):
## Obtain the spectrum
data = np.load(file, allow_pickle=True)
eig, eig_real, eig_raw = [data['arr_'+str(i)] for i in range(len(data.files))]
if(kmax != -1):
eig = eig[:,:kmax,:]
eig_real = eig_real[:,:kmax,:]
eig_raw = eig_raw[:,:kmax,:]
## Reconstruct the NTK
num_pts = 10000
Dim = np.array([5*(i+1) for i in range(40)])
alpha = Dim[dim] / 2.0 - 1
z, w = sp.special.roots_gegenbauer(num_pts, alpha)
Q = gegenbauer.gegenbauer(z, kmax, Dim[dim])
k = np.array([i for i in range(kmax)]);
norm = (alpha+k)/alpha
NTK = eig_real[dim,:,layer]*norm @ Q
if(layer != None and dim != None):
if return_NTK:
return eig[dim,:,layer], eig_real[dim,:,layer], NTK
return eig[dim,:,layer], eig_real[dim,:,layer]
if(layer != None and dim == None):
return eig[:,:,layer], eig_real[:,:,layer]
if(layer == None and dim != None):
return eig[dim,:,:], eig_real[dim,:,:]
if(layer == None and dim == None):
return eig[:,:,:], eig_real[:,:,:]
def degeneracy(d,l):
alpha = (d-2)/2
degens = np.zeros((len(l),1))
degens[0] = 1
for i in range(len(l)-1):
k = l[i+1,0]
degens[i+1,:] = comb(k+d-3,k)*((alpha+k)/(alpha))
return degens
def norm(dim,l):
alpha = (dim-2)/2;
area = np.sqrt(np.pi)*gamma((dim-1)/2)/gamma(dim/2);
degen = degeneracy(dim,l)
Norm = area*degen*((alpha)/(alpha+l))**2
## Also another factor of lambda/(n+lambda) comes from spherical harmoincs -> gegenbauer
Norm1 = area*((alpha)/(alpha+l))*degen
#Norm2 = area*((alpha)/(alpha+l))
return [Norm1, degen]
def save_spectrum(directory, dim, deg, layer):
# dim = np.array([5*(i+1) for i in range(20)])
# deg = np.array([i+1 for i in range(100)])
# layer = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
layer_str = [str(num) for num in layer]
data = | np.zeros((dim.size,deg.size,layer.size)) | numpy.zeros |
import numpy as np
from numba import vectorize
# Size 为列表,为神经网络结构,比如[3,5,5,4,2],3是输入层神经元个数,中间为隐藏层每层神经元个数,2为输出层个数
class nn_Creat():
def __init__(self,Size,active_fun='sigmoid',learning_rate=1.5,batch_normalization=1,objective_fun='MSE',
output_function='sigmoid',optimization_method='normal',weight_decay=0):
self.Size=Size # 初始化网络参数,并进行打印
print('the structure of the NN is \n', self.Size)
self.active_fun=active_fun
print('active function is %s '% active_fun)
self.learning_rate=learning_rate
print('learning_rate is %s '% learning_rate)
self.batch_normalization=batch_normalization
print('batch_normalization is %d '% batch_normalization)
self.objective_fun=objective_fun
print('objective_function is %s '% objective_fun)
self.optimization_method=optimization_method
print('optimization_method is %s '% optimization_method)
self.weight_decay = weight_decay
print('weight_decay is %f '% weight_decay)
# 初始化网络权值和梯度
self.vecNum=0
self.depth=len(Size)
self.W=[]
self.b=[]
self.W_grad=[]
self.b_grad=[]
self.cost=[]
if self.batch_normalization: # 是否运用批量归一化,如果用,则引入期望E和方差S,以及缩放因子Gamma、Beta
self.E = []
self.S = []
self.Gamma = []
self.Beta = []
if objective_fun=='Cross Entropy': # 目标函数是否为交叉墒函数
self.output_function='softmax'
else:
self.output_function='sigmoid'
print('output_function is %s \n'% self.output_function)
print('Start training NN \n')
for item in range(self.depth-1):
width=self.Size[item]
height=self.Size[item+1]
q=2*np.random.rand(height,width)/np.sqrt(width)-1/np.sqrt(width) #初始化权系数W
self.W.append(q)
if self.active_fun=='relu': # 判断激活函数是否为relu函数,以决定b的初始化形式
self.b.append(np.random.rand(height,1)+0.01)
else:
self.b.append(2*np.random.rand(height,1)/np.sqrt(width)-1/np.sqrt(width))
if self.optimization_method=='Momentum': #优化方向是否使用矩形式,即为之前梯度的叠加
if item!=0:
self.vW.append(np.zeros([height,width]))
self.vb.append(np.zeros([height, 1]))
else:
self.vW=[]
self.vb=[]
self.vW.append(np.zeros([height, width]))
self.vb.append(np.zeros([height, 1]))
if self.optimization_method=='AdaGrad'or optimization_method=='RMSProp' or optimization_method=='Adam': #优化方法是否使用上述方法
if item!=0:
self.rW.append(np.zeros([height,width]))
self.rb.append(np.zeros([height, 1]))
else:
self.rW=[]
self.rb=[]
self.rW.append(np.zeros([height, width]))
self.rb.append(np.zeros([height, 1]))
if self.optimization_method == 'Adam': #优化方法是否为Adam方法
if item!=0:
self.sW.append(np.zeros([height, width]))
self.sb.append(np.zeros([height, 1]))
else:
self.sW = []
self.sb = []
self.sW.append(np.zeros([height, width]))
self.sb.append(np.zeros([height, 1]))
if self.batch_normalization: #是否对每层进行归一化
self.Gamma.append(np.array([1]))
self.Beta.append(np.array([0]))
self.E.append(np.zeros([height,1]))
self.S.append(np.zeros([height,1]))
if self.optimization_method=='Momentum': #在归一化基础上是否使用Momentun方法
if item!=0:
self.vGamma.append(np.array([1]))
self.vBeta.append(np.array([0]))
else:
self.vGamma = []
self.vBeta = []
self.vGamma.append(np.array([1]))
self.vBeta.append(np.array([0]))
if self.optimization_method == 'AdaGrad' or optimization_method == 'RMSProp' or optimization_method == 'Adam': # 在归一化基础上优化方法是否使用上述方法
if item!=0:
self.rGamma.append(np.array([0]))
self.rBeta.append(np.array([0]))
else:
self.rGamma = []
self.rBeta = []
self.rGamma.append(np.array([0]))
self.rBeta.append(np.array([0]))
if self.optimization_method == 'Adam': #在归一化基础上是否使用Adam方法
if item!=0:
self.sGamma.append(np.array([1]))
self.sBeta.append(np.array([0]))
else:
self.sGamma = []
self.sBeta = []
self.sGamma.append(np.array([1]))
self.sBeta.append(np.array([0]))
self.W_grad.append(np.array([]))
self.b_grad.append(np.array([]))
def nn_train(self,train_x,train_y,iterations=10,batch_size=100): #神经网络训练流程化
# 随机将数据分为num_batches堆,每堆Batch_Size个
Batch_Size=batch_size
m=np.size(train_x,0)
num_batches=np.round(m/Batch_Size)
num_batches=np.int(num_batches)
for k in range(iterations):
kk=np.random.randint(0,m,m)
for l in range(num_batches):
batch_x=train_x[kk[l*batch_size:(l+1)*batch_size ],:]
batch_y=train_y[kk[l*batch_size:(l+1)*batch_size ],:]
self.nn_forward(batch_x,batch_y) # 执行神经网络向前传播
self.nn_backward(batch_y) # 执行神经网络向后传播
self.gradient_obtain() # 执行得到所以参数的梯度
return None
def Sigmoid(self,z): # 定义sigmoid函数
yyy=1/(1+np.exp(-z))
return yyy
def SoftMax(self,x): # 定义Softmax函数
e_x = np.exp(x - np.max(x,0))
return e_x / np.sum(e_x,0)
def Relu(self,xxx): # 定义Relu函数
# xxx[xxx<0]=0
s=np.maximum(xxx, 0)
return s
def nn_forward(self,batch_x,batch_y): # 神经网络向前传播,得到对z偏导theta,每层输出a和cost
batch_x=batch_x.T
batch_y=batch_y.T
m=np.size(batch_x,1)
self.a=[] #定义每层激活函数输出
self.a.append(batch_x) # 接受第一层输入
cost2=0 #初始化正则函数
self.yy=[]
for k in range(1,self.depth): # 从第一层开始,循环求每层输出
y=(self.W[k-1].dot(self.a[k-1]))+(np.repeat(self.b[k-1],m,1))
if self.batch_normalization:
self.E[k-1]=self.E[k-1]*self.vecNum+np.sum(y,1)[:,None]
self.S[k-1]=self.S[k-1]**2*(self.vecNum-1)+((m-1)*np.std(y,1)**2)[:,None]
self.vecNum=self.vecNum+m
self.E[k-1]=self.E[k-1]/self.vecNum #求期望
self.S[k-1]=np.sqrt(self.S[k-1]/(self.vecNum-1)) #求方差
y = (y - self.E[k-1]) / (self.S[k-1] + 0.0001)
self.yy.append(y) #存储缩放之前输入,以便反向传播使用
y=self.Gamma[k-1]*y+self.Beta[k-1] #缩放
# 定义输出和激活函数字典,分别对应输出层和隐藏层
if k==self.depth-1: #将每层输出函数值矩阵添加到相应数据列表
target_output_function = {'sigmoid': self.Sigmoid, 'tanh': np.tanh,
'relu': self.Relu, 'softmax': self.SoftMax}
self.a.append(target_output_function[self.output_function](y))
else:
target_active_function = {'sigmoid': self.Sigmoid, 'tanh': np.tanh,
'relu': self.Relu}
self.a.append(target_active_function[self.active_fun](y))
cost2=cost2+np.sum(self.W[k-1]**2) #得到正则化损失函数
# 得到总损失函数
if self.objective_fun == 'MSE':
self.cost.append(0.5 / m * np.sum((self.a[-1] - batch_y) ** 2) / m + 0.5 * self.weight_decay * cost2)
elif self.objective_fun == 'Cross Entropy':
self.cost.append(-0.5 * np.sum(batch_y * np.log(self.a[-1])) / m + 0.5 * self.weight_decay * cost2)
return None
# 神经网络反响传播得到参数梯度
def nn_backward(self,batch_y):
batch_y=batch_y.T
m=np.size(self.a[0],1)
# 不同输出函数损失函数梯度字典
self.theta=[np.array([]) for i in range(self.depth)] # 初始化theta
if self.output_function=='sigmoid':
self.theta[-1]=-(batch_y-self.a[-1] )*self.a[-1]*(1-self.a[-1])
elif self.output_function=='tanh':
self.theta[-1] = -(batch_y-self.a[-1] )*(1-self.a[-1]**2)
elif self.output_function=='softmax':
self.theta[-1] =self.a[-1]-batch_y
if self.batch_normalization:
self.gamma_grad = [np.array([]) for ii in range(self.depth - 1)] # 若使用归一化,则初始化gamma和beta的梯度
self.beta_grad = [np.array([]) for iii in range(self.depth - 1)]
temp=self.theta[-1]*self.yy[-1]
self.gamma_grad[-1]=np.sum(np.mean(temp,1))
self.beta_grad[-1]=np.sum(np.mean(self.theta[-1],1))
self.theta[-1]=self.Gamma[-1]*(self.theta[-1])/(self.S[-1]+0.0001) #得到最后一个theta
self.W_grad[-1]=self.theta[-1].dot(self.a[-2].T)/m+self.weight_decay*self.W[-1] #得到参数W和b的最后一个梯度向量
self.b_grad[-1]=(np.sum(self.theta[-1],1)/m)[:,None]
# 由最后一层参数,反向逐层求参数梯度
for k in range(2,self.depth):
if self.active_fun=='sigmoid':
self.theta[-k] = (self.W[-k + 1].T.dot(self.theta[-k + 1])) * (self.a[-k] * (1 - self.a[-k]))
elif self.active_fun=='tanh':
self.theta[-k] = (self.W[-k + 1].T.dot(self.theta[-k + 1])) * (1 - self.a[-k] ** 2)
elif self.active_fun=='relu':
self.theta[-k] = (self.W[-k + 1].T.dot(self.theta[-k + 1])) * (self.a[-k]>=0)
if self.batch_normalization:
temp=self.theta[-k]*self.yy[-k]
self.gamma_grad[-k]=np.sum(np.mean(temp,1))
self.beta_grad[-k]=np.sum(np.mean(self.theta[-k],1))
self.theta[-k]=self.Gamma[-k]*(self.theta[-k])/((self.S[-k]+0.0001).reshape(np.size(self.S[-k]),1))
self.W_grad[-k]=self.theta[-k].dot(self.a[-k-1].T)/m+self.weight_decay*self.W[-k]
self.b_grad[-k]=(np.sum(self.theta[-k],1)/m)[:,None]
def gradient_obtain(self):
# 获取参数梯度信息
for k in range(self.depth-1):
if self.batch_normalization==0:
if self.optimization_method=='normal':
self.W[k]= self.W[k]-self.learning_rate*self.W_grad[k]
self.b[k]=self.b[k]-self.learning_rate*self.b_grad[k]
elif self.optimization_method=='AdaGrad':
self.rW[k]=self.rW[k]+self.W_grad[k]**2
self.rb[k]=self.rb[k]+self.b_grad[k]**2
self.W[k]=self.W[k]-self.learning_rate*self.W_grad[k]/(np.sqrt(self.rW[k])+0.001)
self.b[k]=self.b[k]-self.learning_rate*self.b_grad[k]/(np.sqrt(self.rb[k])+0.001)
elif self.optimization_method=='Momentum':
rho=0.1
self.vW[k]=rho*self.vW[k]-self.learning_rate*self.W_grad[k]
self.vb[k] = rho * self.vb[k] - self.learning_rate * self.b_grad[k]
self.W[k]=self.W[k]+self.vW[k]
self.b[k]=self.b[k]+self.vb[k]
elif self.optimization_method=='RMSProp':
rho=0.9
self.rW[k] = rho * self.rW[k] + 0.1* self.W_grad[k]**2
self.rb[k] = rho * self.rb[k] +0.1 * self.b_grad[k]**2
self.W[k] = self.W[k] - self.learning_rate*self.W_grad[k]/(np.sqrt(self.rW[k])+0.001)
self.b[k] = self.b[k] - self.learning_rate*self.b_grad[k]/(np.sqrt(self.rb[k])+0.001)
elif self.optimization_method=='Adam':
rho1=0.9
rho2=0.999
self.sW[k]=0.9*self.sW[k]+0.1*self.W_grad[k]
self.sb[k]=0.9*self.sb[k]+0.1*self.b_grad[k]
self.rW[k]=0.999*self.rW[k]+0.001*self.W_grad[k]**2
self.rb[k]=0.999*self.rb[k]+0.001*self.b_grad[k]**2
newS=self.sW[k]/(1-rho1)
newR=self.rW[k]/(1-rho2)
self.W[k]=self.W[k]-self.learning_rate*newS/np.sqrt(newR+0.00001)
newS = self.sb[k] / (1 - rho1)
newR = self.rb[k] / (1 - rho2)
self.b[k]=self.b[k]-self.learning_rate*newS/np.sqrt(newR+0.00001)
else:
if self.optimization_method=='normal':
self.W[k]=self.W[k]-self.learning_rate*self.W_grad[k]
self.b[k]=self.b[k]-self.learning_rate*self.b_grad[k]
self.Gamma[k]=self.Gamma[k]-self.learning_rate*self.gamma_grad[k]
self.Beta[k]=self.Beta[k]-self.learning_rate*self.beta_grad[k]
elif self.optimization_method=='AdaGrad':
self.rW[k]=self.rW[k]+self.W_grad[k]**2
self.rb[k]=self.rb[k]+self.b_grad[k]**2
self.rGamma[k]=self.rGamma[k]+self.gamma_grad[k]**2
self.rBeta[k]=self.rBeta[k]+self.beta_grad[k]**2
self.W[k]=self.W[k]-self.learning_rate*self.W_grad[k]/(np.sqrt(self.rW[k])+0.001)
self.b[k]=self.b[k]-self.learning_rate*self.b_grad[k]/(np.sqrt(self.rb[k])+0.001)
self.Gamma[k]=self.Gamma[k] - self.learning_rate * self.gamma_grad[k]/(
np.sqrt(self.rGamma[k])+0.001)
self.Beta[k] = self.Beta[k] - self.learning_rate * self.beta_grad[k] /(
np.sqrt(self.rBeta[k]) + 0.001)
elif self.optimization_method=='Momentum':
rho=0.1
self.vW[k]=rho*self.vW[k]-self.learning_rate*self.W_grad[k]
self.vb[k] = rho * self.vb[k] - self.learning_rate * self.b_grad[k]
self.vGamma[k]=rho*self.vGamma[k]-self.learning_rate*self.gamma_grad[k]
self.vBeta[k]=rho*self.vBeta[k]-self.learning_rate*self.beta_grad[k]
self.W[k]=self.W[k]+self.vW[k]
self.b[k]=self.b[k]+self.vb[k]
self.Gamma[k]=self.Gamma[k]+self.vGamma[k]
self.Beta[k]-self.Beta[k]+self.vBeta[k]
elif self.optimization_method=='RMSProp':
self.rW[k] = 0.9 * self.rW[k] + 0.1* self.W_grad[k]**2
self.rb[k] = 0.9 * self.rb[k] + 0.1 * self.b_grad[k]**2
self.rGamma[k]=0.9*self.rGamma[k]+0.1*self.gamma_grad[k]**2
self.rBeta[k]=0.9*self.rBeta[k]+0.1*self.beta_grad[k]**2
self.W[k] = self.W[k] - self.learning_rate*self.W_grad[k]/(np.sqrt(self.rW[k])+0.001)
self.b[k] = self.b[k] - self.learning_rate*self.b_grad[k]/( | np.sqrt(self.rb[k]) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
This module is a work in progress, as such concepts are subject to change.
MAIN IDEA:
`MultiTaskSamples` serves as a structure to contain and manipulate a set of
samples with potentially many different types of labels and features.
"""
import logging
import utool as ut
import ubelt as ub
import numpy as np
from wbia import dtool as dt
import pandas as pd
import sklearn
import sklearn.metrics
import sklearn.ensemble
import sklearn.impute
import sklearn.pipeline
import sklearn.neural_network
from wbia.algo.verif import sklearn_utils
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class XValConfig(dt.Config):
_param_info_list = [
# ut.ParamInfo('type', 'StratifiedKFold'),
ut.ParamInfo('type', 'StratifiedGroupKFold'),
ut.ParamInfo('n_splits', 3),
ut.ParamInfo(
'shuffle', True, hideif=lambda cfg: cfg['type'] == 'StratifiedGroupKFold'
),
ut.ParamInfo(
'random_state',
3953056901,
hideif=lambda cfg: cfg['type'] == 'StratifiedGroupKFold',
),
]
@ut.reloadable_class
class ClfProblem(ut.NiceRepr):
def __init__(pblm):
pblm.deploy_task_clfs = None
pblm.eval_task_clfs = None
pblm.xval_kw = XValConfig()
pblm.eval_task_clfs = None
pblm.task_combo_res = None
pblm.verbose = True
def set_pandas_options(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def set_pandas_options_low(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 5
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def set_pandas_options_normal(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def learn_evaluation_classifiers(pblm, task_keys=None, clf_keys=None, data_keys=None):
"""
Evaluates by learning classifiers using cross validation.
Do not use this to learn production classifiers.
python -m wbia.algo.verif.vsone evaluate_classifiers --db PZ_PB_RF_TRAIN --show
Example:
CommandLine:
python -m clf_helpers learn_evaluation_classifiers
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.verif.clf_helpers import * # NOQA
>>> pblm = IrisProblem()
>>> pblm.setup()
>>> pblm.verbose = True
>>> pblm.eval_clf_keys = ['Logit', 'RF']
>>> pblm.eval_task_keys = ['iris']
>>> pblm.eval_data_keys = ['learn(all)']
>>> result = pblm.learn_evaluation_classifiers()
>>> res = pblm.task_combo_res['iris']['Logit']['learn(all)']
>>> res.print_report()
>>> res = pblm.task_combo_res['iris']['RF']['learn(all)']
>>> res.print_report()
>>> print(result)
"""
pblm.eval_task_clfs = ut.AutoVivification()
pblm.task_combo_res = ut.AutoVivification()
if task_keys is None:
task_keys = pblm.eval_task_keys
if data_keys is None:
data_keys = pblm.eval_data_keys
if clf_keys is None:
clf_keys = pblm.eval_clf_keys
if task_keys is None:
task_keys = [pblm.primary_task_key]
if data_keys is None:
data_keys = [pblm.default_data_key]
if clf_keys is None:
clf_keys = [pblm.default_clf_key]
if pblm.verbose:
ut.cprint('[pblm] learn_evaluation_classifiers', color='blue')
ut.cprint('[pblm] task_keys = {}'.format(task_keys))
ut.cprint('[pblm] data_keys = {}'.format(data_keys))
ut.cprint('[pblm] clf_keys = {}'.format(clf_keys))
Prog = ut.ProgPartial(freq=1, adjust=False, prehack='%s')
task_prog = Prog(task_keys, label='Task')
for task_key in task_prog:
dataset_prog = Prog(data_keys, label='Data')
for data_key in dataset_prog:
clf_prog = Prog(clf_keys, label='CLF')
for clf_key in clf_prog:
pblm._ensure_evaluation_clf(task_key, data_key, clf_key)
def _ensure_evaluation_clf(pblm, task_key, data_key, clf_key, use_cache=True):
"""
Learns and caches an evaluation (cross-validated) classifier and tests
and caches the results.
data_key = 'learn(sum,glob)'
clf_key = 'RF'
"""
# TODO: add in params used to construct features into the cfgstr
if hasattr(pblm.samples, 'sample_hashid'):
ibs = pblm.infr.ibs
sample_hashid = pblm.samples.sample_hashid()
feat_dims = pblm.samples.X_dict[data_key].columns.values.tolist()
# cfg_prefix = sample_hashid + pblm.qreq_.get_cfgstr() + feat_cfgstr
est_kw1, est_kw2 = pblm._estimator_params(clf_key)
param_id = ut.get_dict_hashid(est_kw1)
xval_id = pblm.xval_kw.get_cfgstr()
cfgstr = '_'.join(
[
sample_hashid,
param_id,
xval_id,
task_key,
data_key,
clf_key,
ut.hashid_arr(feat_dims, 'feats'),
]
)
fname = 'eval_clfres_' + ibs.dbname
else:
fname = 'foo'
feat_dims = None
cfgstr = 'bar'
use_cache = False
# TODO: ABI class should not be caching
cacher_kw = dict(appname='vsone_rf_train', enabled=use_cache, verbose=1)
cacher_clf = ub.Cacher(fname, cfgstr=cfgstr, meta=[feat_dims], **cacher_kw)
data = cacher_clf.tryload()
if not data:
data = pblm._train_evaluation_clf(task_key, data_key, clf_key)
cacher_clf.save(data)
clf_list, res_list = data
labels = pblm.samples.subtasks[task_key]
combo_res = ClfResult.combine_results(res_list, labels)
pblm.eval_task_clfs[task_key][clf_key][data_key] = clf_list
pblm.task_combo_res[task_key][clf_key][data_key] = combo_res
def _train_evaluation_clf(pblm, task_key, data_key, clf_key, feat_dims=None):
"""
Learns a cross-validated classifier on the dataset
Ignore:
>>> from wbia.algo.verif.vsone import * # NOQA
>>> pblm = OneVsOneProblem()
>>> pblm.load_features()
>>> pblm.load_samples()
>>> data_key = 'learn(all)'
>>> task_key = 'photobomb_state'
>>> clf_key = 'RF-OVR'
>>> task_key = 'match_state'
>>> data_key = pblm.default_data_key
>>> clf_key = pblm.default_clf_key
"""
X_df = pblm.samples.X_dict[data_key]
labels = pblm.samples.subtasks[task_key]
assert np.all(labels.encoded_df.index == X_df.index)
clf_partial = pblm._get_estimator(clf_key)
xval_kw = pblm.xval_kw.asdict()
clf_list = []
res_list = []
skf_list = pblm.samples.stratified_kfold_indices(**xval_kw)
skf_prog = ut.ProgIter(skf_list, label='skf-train-eval')
for train_idx, test_idx in skf_prog:
X_df_train = X_df.iloc[train_idx]
assert X_df_train.index.tolist() == ut.take(pblm.samples.index, train_idx)
# train_uv = X_df.iloc[train_idx].index
# X_train = X_df.loc[train_uv]
# y_train = labels.encoded_df.loc[train_uv]
if feat_dims is not None:
X_df_train = X_df_train[feat_dims]
X_train = X_df_train.values
y_train = labels.encoded_df.iloc[train_idx].values.ravel()
clf = clf_partial()
clf.fit(X_train, y_train)
# Note: There is a corner case where one fold doesn't get any
# labels of a certain class. Because y_train is an encoded integer,
# the clf.classes_ attribute will cause predictions to agree with
# other classifiers trained on the same labels.
# Evaluate results
res = ClfResult.make_single(
clf, X_df, test_idx, labels, data_key, feat_dims=feat_dims
)
clf_list.append(clf)
res_list.append(res)
return clf_list, res_list
def _external_classifier_result(
pblm, clf, task_key, data_key, feat_dims=None, test_idx=None
):
"""
Given an external classifier (ensure its trained on disjoint data)
evaluate all data on it.
Args:
test_idx (list): subset of this classifier to test on
(defaults to all if None)
"""
X_df = pblm.samples.X_dict[data_key]
if test_idx is None:
test_idx = np.arange(len(X_df))
labels = pblm.samples.subtasks[task_key]
res = ClfResult.make_single(
clf, X_df, test_idx, labels, data_key, feat_dims=feat_dims
)
return res
def learn_deploy_classifiers(pblm, task_keys=None, clf_key=None, data_key=None):
"""
Learns on data without any train/validation split
"""
if pblm.verbose > 0:
ut.cprint('[pblm] learn_deploy_classifiers', color='blue')
if clf_key is None:
clf_key = pblm.default_clf_key
if data_key is None:
data_key = pblm.default_data_key
if task_keys is None:
task_keys = list(pblm.samples.supported_tasks())
if pblm.deploy_task_clfs is None:
pblm.deploy_task_clfs = ut.AutoVivification()
Prog = ut.ProgPartial(freq=1, adjust=False, prehack='%s')
task_prog = Prog(task_keys, label='Task')
task_clfs = {}
for task_key in task_prog:
clf = pblm._train_deploy_clf(task_key, data_key, clf_key)
task_clfs[task_key] = clf
pblm.deploy_task_clfs[task_key][clf_key][data_key] = clf
return task_clfs
def _estimator_params(pblm, clf_key):
est_type = clf_key.split('-')[0]
if est_type in {'RF', 'RandomForest'}:
est_kw1 = {
# 'max_depth': 4,
'bootstrap': True,
'class_weight': None,
'criterion': 'entropy',
'max_features': 'sqrt',
# 'max_features': None,
'min_samples_leaf': 5,
'min_samples_split': 2,
# 'n_estimators': 64,
'n_estimators': 256,
}
# Hack to only use missing values if we have the right sklearn
if 'missing_values' in ut.get_func_kwargs(
sklearn.ensemble.RandomForestClassifier.__init__
):
est_kw1['missing_values'] = np.nan
est_kw2 = {
'random_state': 3915904814,
'verbose': 0,
'n_jobs': -1,
}
elif est_type in {'SVC', 'SVM'}:
est_kw1 = dict(kernel='linear')
est_kw2 = {}
elif est_type in {'Logit', 'LogisticRegression'}:
est_kw1 = {}
est_kw2 = {}
elif est_type in {'MLP'}:
est_kw1 = dict(
activation='relu',
alpha=1e-05,
batch_size='auto',
beta_1=0.9,
beta_2=0.999,
early_stopping=False,
epsilon=1e-08,
hidden_layer_sizes=(10, 10),
learning_rate='constant',
learning_rate_init=0.001,
max_iter=200,
momentum=0.9,
nesterovs_momentum=True,
power_t=0.5,
random_state=3915904814,
shuffle=True,
solver='lbfgs',
tol=0.0001,
validation_fraction=0.1,
warm_start=False,
)
est_kw2 = dict(verbose=False)
else:
raise KeyError('Unknown Estimator')
return est_kw1, est_kw2
def _get_estimator(pblm, clf_key):
"""
Returns sklearn classifier
"""
tup = clf_key.split('-')
wrap_type = None if len(tup) == 1 else tup[1]
est_type = tup[0]
multiclass_wrapper = {
None: ut.identity,
'OVR': sklearn.multiclass.OneVsRestClassifier,
'OVO': sklearn.multiclass.OneVsOneClassifier,
}[wrap_type]
est_class = {
'RF': sklearn.ensemble.RandomForestClassifier,
'SVC': sklearn.svm.SVC,
'Logit': sklearn.linear_model.LogisticRegression,
'MLP': sklearn.neural_network.MLPClassifier,
}[est_type]
est_kw1, est_kw2 = pblm._estimator_params(est_type)
est_params = ut.merge_dicts(est_kw1, est_kw2)
# steps = []
# steps.append((est_type, est_class(**est_params)))
# if wrap_type is not None:
# steps.append((wrap_type, multiclass_wrapper))
if est_type == 'MLP':
def clf_partial():
pipe = sklearn.pipeline.Pipeline(
[
('inputer', sklearn.impute.SimpleImputer(strategy='mean')),
# ('scale', sklearn.preprocessing.StandardScaler),
('est', est_class(**est_params)),
]
)
return multiclass_wrapper(pipe)
elif est_type == 'Logit':
def clf_partial():
pipe = sklearn.pipeline.Pipeline(
[
('inputer', sklearn.impute.SimpleImputer(strategy='mean')),
('est', est_class(**est_params)),
]
)
return multiclass_wrapper(pipe)
else:
def clf_partial():
return multiclass_wrapper(est_class(**est_params))
return clf_partial
def _train_deploy_clf(pblm, task_key, data_key, clf_key):
X_df = pblm.samples.X_dict[data_key]
labels = pblm.samples.subtasks[task_key]
assert np.all(labels.encoded_df.index == X_df.index)
clf_partial = pblm._get_estimator(clf_key)
logger.info(
'Training deployment {} classifier on {} for {}'.format(
clf_key, data_key, task_key
)
)
clf = clf_partial()
index = X_df.index
X = X_df.loc[index].values
y = labels.encoded_df.loc[index].values.ravel()
clf.fit(X, y)
return clf
def _optimize_rf_hyperparams(pblm, data_key=None, task_key=None):
"""
helper script I've only run interactively
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.verif.vsone import * # NOQA
>>> pblm = OneVsOneProblem.from_empty('PZ_PB_RF_TRAIN')
#>>> pblm = OneVsOneProblem.from_empty('GZ_Master1')
>>> pblm.load_samples()
>>> pblm.load_features()
>>> pblm.build_feature_subsets()
>>> data_key=None
>>> task_key=None
"""
from sklearn.model_selection import RandomizedSearchCV # NOQA
from sklearn.model_selection import GridSearchCV # NOQA
from sklearn.ensemble import RandomForestClassifier
from wbia.algo.verif import sklearn_utils
if data_key is None:
data_key = pblm.default_data_key
if task_key is None:
task_key = pblm.primary_task_key
# Load data
X = pblm.samples.X_dict[data_key].values
y = pblm.samples.subtasks[task_key].y_enc
groups = pblm.samples.group_ids
# Define estimator and parameter search space
grid = {
'bootstrap': [True, False],
'class_weight': [None, 'balanced'],
'criterion': ['entropy', 'gini'],
# 'max_features': ['sqrt', 'log2'],
'max_features': ['sqrt'],
'min_samples_leaf': list(range(2, 11)),
'min_samples_split': list(range(2, 11)),
'n_estimators': [8, 64, 128, 256, 512, 1024],
}
est = RandomForestClassifier(missing_values=np.nan)
if False:
# debug
params = ut.util_dict.all_dict_combinations(grid)[0]
est.set_params(verbose=10, n_jobs=1, **params)
est.fit(X=X, y=y)
cv = sklearn_utils.StratifiedGroupKFold(n_splits=3)
if True:
n_iter = 25
SearchCV = ut.partial(RandomizedSearchCV, n_iter=n_iter)
else:
n_iter = ut.prod(map(len, grid.values()))
SearchCV = GridSearchCV
search = SearchCV(est, grid, cv=cv, verbose=10)
n_cpus = ut.num_cpus()
thresh = n_cpus * 1.5
n_jobs_est = 1
n_jobs_ser = min(n_cpus, n_iter)
if n_iter < thresh:
n_jobs_est = int(max(1, thresh / n_iter))
est.set_params(n_jobs=n_jobs_est)
search.set_params(n_jobs=n_jobs_ser)
search.fit(X=X, y=y, groups=groups)
res = search.cv_results_.copy()
alias = ut.odict(
[
('rank_test_score', 'rank'),
('mean_test_score', 'μ-test'),
('std_test_score', 'σ-test'),
('mean_train_score', 'μ-train'),
('std_train_score', 'σ-train'),
('mean_fit_time', 'fit_time'),
('params', 'params'),
]
)
res = ut.dict_subset(res, alias.keys())
cvresult_df = pd.DataFrame(res).rename(columns=alias)
cvresult_df = cvresult_df.sort_values('rank').reset_index(drop=True)
params = pd.DataFrame.from_dict(cvresult_df['params'].values.tolist())
logger.info('Varied params:')
logger.info(ut.repr4(ut.map_vals(set, params.to_dict('list'))))
logger.info('Ranked Params')
logger.info(params)
logger.info('Ranked scores on development set:')
logger.info(cvresult_df)
logger.info('Best parameters set found on hyperparam set:')
logger.info('best_params_ = %s' % (ut.repr4(search.best_params_),))
logger.info('Fastest params')
cvresult_df.loc[cvresult_df['fit_time'].idxmin()]['params']
def _dev_calib(pblm):
"""
interactive script only
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import calibration_curve
from sklearn.metrics import log_loss, brier_score_loss
# Load data
data_key = pblm.default_data_key
task_key = pblm.primary_task_key
X = pblm.samples.X_dict[data_key].values
y = pblm.samples.subtasks[task_key].y_enc
groups = pblm.samples.group_ids
# Split into test/train/valid
cv = sklearn_utils.StratifiedGroupKFold(n_splits=2)
test_idx, train_idx = next(cv.split(X, y, groups))
# valid_idx = train_idx[0::2]
# train_idx = train_idx[1::2]
# train_valid_idx = np.hstack([train_idx, valid_idx])
# Train Uncalibrated RF
est_kw = pblm._estimator_params('RF')[0]
uncal_clf = RandomForestClassifier(**est_kw)
uncal_clf.fit(X[train_idx], y[train_idx])
uncal_probs = uncal_clf.predict_proba(X[test_idx]).T[1]
uncal_score = log_loss(y[test_idx] == 1, uncal_probs)
uncal_brier = brier_score_loss(y[test_idx] == 1, uncal_probs)
# Train Calibrated RF
method = 'isotonic' if len(test_idx) > 2000 else 'sigmoid'
precal_clf = RandomForestClassifier(**est_kw)
# cv = sklearn_utils.StratifiedGroupKFold(n_splits=3)
cal_clf = CalibratedClassifierCV(precal_clf, cv=2, method=method)
cal_clf.fit(X[train_idx], y[train_idx])
cal_probs = cal_clf.predict_proba(X[test_idx]).T[1]
cal_score = log_loss(y[test_idx] == 1, cal_probs)
cal_brier = brier_score_loss(y[test_idx] == 1, cal_probs)
logger.info('cal_brier = %r' % (cal_brier,))
logger.info('uncal_brier = %r' % (uncal_brier,))
logger.info('uncal_score = %r' % (uncal_score,))
logger.info('cal_score = %r' % (cal_score,))
import wbia.plottool as pt
ut.qtensure()
pt.figure()
ax = pt.gca()
y_test = y[test_idx] == 1
fraction_of_positives, mean_predicted_value = calibration_curve(
y_test, uncal_probs, n_bins=10
)
ax.plot([0, 1], [0, 1], 'k:', label='Perfectly calibrated')
ax.plot(
mean_predicted_value,
fraction_of_positives,
's-',
label='%s (%1.3f)' % ('uncal-RF', uncal_brier),
)
fraction_of_positives, mean_predicted_value = calibration_curve(
y_test, cal_probs, n_bins=10
)
ax.plot(
mean_predicted_value,
fraction_of_positives,
's-',
label='%s (%1.3f)' % ('cal-RF', cal_brier),
)
pt.legend()
@ut.reloadable_class
class ClfResult(ut.NiceRepr):
r"""
Handles evaluation statistics for a multiclass classifier trained on a
specific dataset with specific labels.
"""
# Attributes that identify the task and data the classifier is evaluated on
_key_attrs = ['task_key', 'data_key', 'class_names']
# Attributes about results and labels of individual samples
_datafame_attrs = ['probs_df', 'probhats_df', 'target_bin_df', 'target_enc_df']
def __init__(res):
pass
def __nice__(res):
return '{}, {}, {}'.format(res.task_key, res.data_key, len(res.index))
@property
def index(res):
return res.probs_df.index
@classmethod
def make_single(ClfResult, clf, X_df, test_idx, labels, data_key, feat_dims=None):
"""
Make a result for a single cross validiation subset
"""
X_df_test = X_df.iloc[test_idx]
if feat_dims is not None:
X_df_test = X_df_test[feat_dims]
index = X_df_test.index
# clf_probs = clf.predict_proba(X_df_test)
# index = pd.Series(test_idx, name='test_idx')
# Ensure shape corresponds with all classes
def align_cols(arr, arr_cols, target_cols):
import utool as ut
alignx = ut.list_alignment(arr_cols, target_cols, missing=True)
aligned_arrT = ut.none_take(arr.T, alignx)
aligned_arrT = ut.replace_nones(aligned_arrT, np.zeros(len(arr)))
aligned_arr = np.vstack(aligned_arrT).T
return aligned_arr
res = ClfResult()
res.task_key = labels.task_name
res.data_key = data_key
res.class_names = ut.lmap(str, labels.class_names)
res.feat_dims = feat_dims
res.probs_df = sklearn_utils.predict_proba_df(clf, X_df_test, res.class_names)
res.target_bin_df = labels.indicator_df.iloc[test_idx]
res.target_enc_df = labels.encoded_df.iloc[test_idx]
if hasattr(clf, 'estimators_') and labels.n_classes > 2:
# The n-th estimator in the OVR classifier predicts the prob of the
# n-th class (as label 1).
probs_hat = np.hstack(
[est.predict_proba(X_df_test)[:, 1:2] for est in clf.estimators_]
)
res.probhats_df = pd.DataFrame(
align_cols(probs_hat, clf.classes_, labels.classes_),
index=index,
columns=res.class_names,
)
# In the OVR-case, ideally things will sum to 1, but when they
# don't normalization happens. An Z-value of more than 1 means
# overconfidence, and under 0 means underconfidence.
res.confidence_ratio = res.probhats_df.sum(axis=1)
else:
res.probhats_df = None
return res
def compress(res, flags):
res2 = ClfResult()
res2.task_key = res.task_key
res2.data_key = res.data_key
res2.class_names = res.class_names
res2.probs_df = res.probs_df[flags]
res2.target_bin_df = res.target_bin_df[flags]
res2.target_enc_df = res.target_enc_df[flags]
if res.probhats_df is None:
res2.probhats_df = None
else:
res2.probhats_df = res.probhats_df[flags]
# res2.confidence_ratio = res.confidence_ratio[flags]
return res2
@classmethod
def combine_results(ClfResult, res_list, labels=None):
"""
Combine results from cross validation runs into a single result
representing the performance of the entire dataset
"""
# Ensure that res_lists are not overlapping
for r1, r2 in ut.combinations(res_list, 2):
assert (
len(r1.index.intersection(r2.index)) == 0
), 'ClfResult dataframes must be disjoint'
# sanity check
for r in res_list:
assert | np.all(r.index == r.probs_df.index) | numpy.all |
"""Module to provide functionality to import structures."""
import os
import tempfile
import datetime
from collections import OrderedDict
from traitlets import Bool
import ipywidgets as ipw
from aiida.orm import CalcFunctionNode, CalcJobNode, Node, QueryBuilder, WorkChainNode, StructureData
from .utils import get_ase_from_file
class StructureManagerWidget(ipw.VBox): # pylint: disable=too-many-instance-attributes
'''Upload a structure and store it in AiiDA database.
Useful class members:
:ivar has_structure: whether the widget contains a structure
:vartype has_structure: bool
:ivar frozen: whenter the widget is frozen (can't be modified) or not
:vartype frozen: bool
:ivar structure_node: link to AiiDA structure object
:vartype structure_node: StructureData or CifData'''
has_structure = Bool(False)
frozen = Bool(False)
DATA_FORMATS = ('StructureData', 'CifData')
def __init__(self, importers, storable=True, node_class=None, **kwargs):
"""
:param storable: Whether to provide Store button (together with Store format)
:type storable: bool
:param node_class: AiiDA node class for storing the structure.
Possible values: 'StructureData', 'CifData' or None (let the user decide).
Note: If your workflows require a specific node class, better fix it here.
:param examples: list of tuples each containing a name and a path to an example structure
:type examples: list
:param importers: list of tuples each containing a name and an object for data importing. Each object
should containt an empty `on_structure_selection()` method that has two parameters: structure_ase, name
:type examples: list"""
from .viewers import StructureDataViewer
if not importers: # we make sure the list is not empty
raise ValueError("The parameter importers should contain a list (or tuple) of tuples "
"(\"importer name\", importer), got a falsy object.")
self.structure_ase = None
self._structure_node = None
self.viewer = StructureDataViewer(downloadable=False)
self.btn_store = ipw.Button(description='Store in AiiDA', disabled=True)
self.btn_store.on_click(self._on_click_store)
# Description that will is stored along with the new structure.
self.structure_description = ipw.Text(placeholder="Description (optional)")
# Select format to store in the AiiDA database.
self.data_format = ipw.RadioButtons(options=self.DATA_FORMATS, description='Data type:')
self.data_format.observe(self.reset_structure, names=['value'])
if len(importers) == 1:
# If there is only one importer - no need to make tabs.
self._structure_sources_tab = importers[0][1]
# Assigning a function which will be called when importer provides a structure.
importers[0][1].on_structure_selection = self.select_structure
else:
self._structure_sources_tab = ipw.Tab() # Tabs.
self._structure_sources_tab.children = [i[1] for i in importers] # One importer per tab.
for i, (label, importer) in enumerate(importers):
# Labeling tabs.
self._structure_sources_tab.set_title(i, label)
# Assigning a function which will be called when importer provides a structure.
importer.on_structure_selection = self.select_structure
if storable:
if node_class is None:
store = [self.btn_store, self.data_format, self.structure_description]
elif node_class not in self.DATA_FORMATS:
raise ValueError("Unknown data format '{}'. Options: {}".format(node_class, self.DATA_FORMATS))
else:
self.data_format.value = node_class
store = [self.btn_store, self.structure_description]
else:
store = [self.structure_description]
store = ipw.HBox(store)
super().__init__(children=[self._structure_sources_tab, self.viewer, store], **kwargs)
def reset_structure(self, change=None): # pylint: disable=unused-argument
if self.frozen:
return
self._structure_node = None
self.viewer.structure = None
def select_structure(self, structure_ase, name):
"""Select structure
:param structure_ase: ASE object containing structure
:type structure_ase: ASE Atoms
:param name: File name with extension but without path
:type name: str"""
if self.frozen:
return
self._structure_node = None
if not structure_ase:
self.btn_store.disabled = True
self.has_structure = False
self.structure_ase = None
self.structure_description.value = ''
self.reset_structure()
return
self.btn_store.disabled = False
self.has_structure = True
self.structure_description.value = "{} ({})".format(structure_ase.get_chemical_formula(), name)
self.structure_ase = structure_ase
self.viewer.structure = structure_ase
def _on_click_store(self, change): # pylint: disable=unused-argument
self.store_structure()
def store_structure(self, label=None, description=None):
"""Stores the structure in AiiDA database."""
if self.frozen:
return
if self.structure_node is None:
return
if self.structure_node.is_stored:
print("Already stored in AiiDA: " + repr(self.structure_node) + " skipping..")
return
if label:
self.structure_node.label = label
if description:
self.structure_node.description = description
self.structure_node.store()
print("Stored in AiiDA: " + repr(self.structure_node))
def freeze(self):
"""Do not allow any further modifications"""
self._structure_sources_tab.layout.visibility = 'hidden'
self.frozen = True
self.btn_store.disabled = True
self.structure_description.disabled = True
self.data_format.disabled = True
@property
def node_class(self):
return self.data_format.value
@node_class.setter
def node_class(self, value):
if self.frozen:
return
self.data_format.value = value
@property
def structure_node(self):
"""Returns AiiDA StructureData node."""
if self._structure_node is None:
if self.structure_ase is None:
return None
# perform conversion
if self.data_format.value == 'CifData':
from aiida.orm.nodes.data.cif import CifData
self._structure_node = CifData()
self._structure_node.set_ase(self.structure_ase)
else: # Target format is StructureData
self._structure_node = StructureData(ase=self.structure_ase)
self._structure_node.description = self.structure_description.value
self._structure_node.label = self.structure_ase.get_chemical_formula()
return self._structure_node
class StructureUploadWidget(ipw.VBox):
"""Class that allows to upload structures from user's computer."""
def __init__(self, text="Upload Structure"):
from fileupload import FileUploadWidget
self.on_structure_selection = lambda structure_ase, name: None
self.file_path = None
self.file_upload = FileUploadWidget(text)
supported_formats = ipw.HTML(
"""<a href="https://wiki.fysik.dtu.dk/ase/_modules/ase/io/formats.html" target="_blank">
Supported structure formats
</a>""")
self.file_upload.observe(self._on_file_upload, names='data')
super().__init__(children=[self.file_upload, supported_formats])
def _on_file_upload(self, change): # pylint: disable=unused-argument
"""When file upload button is pressed."""
self.file_path = os.path.join(tempfile.mkdtemp(), self.file_upload.filename)
with open(self.file_path, 'w') as fobj:
fobj.write(self.file_upload.data.decode("utf-8"))
structure_ase = get_ase_from_file(self.file_path)
self.on_structure_selection(structure_ase=structure_ase, name=self.file_upload.filename)
class StructureExamplesWidget(ipw.VBox):
"""Class to provide example structures for selection."""
def __init__(self, examples, **kwargs):
self.on_structure_selection = lambda structure_ase, name: None
self._select_structure = ipw.Dropdown(options=self.get_example_structures(examples))
self._select_structure.observe(self._on_select_structure, names=['value'])
super().__init__(children=[self._select_structure], **kwargs)
@staticmethod
def get_example_structures(examples):
"""Get the list of example structures."""
if not isinstance(examples, list):
raise ValueError("parameter examples should be of type list, {} given".format(type(examples)))
return [("Select structure", False)] + examples
def _on_select_structure(self, change): # pylint: disable=unused-argument
"""When structure is selected."""
if not self._select_structure.value:
return
structure_ase = get_ase_from_file(self._select_structure.value)
self.on_structure_selection(structure_ase=structure_ase, name=self._select_structure.label)
class StructureBrowserWidget(ipw.VBox):
"""Class to query for structures stored in the AiiDA database."""
def __init__(self):
# Find all process labels
qbuilder = QueryBuilder()
qbuilder.append(WorkChainNode, project="label")
qbuilder.order_by({WorkChainNode: {'ctime': 'desc'}})
process_labels = {i[0] for i in qbuilder.all() if i[0]}
layout = ipw.Layout(width="900px")
self.mode = ipw.RadioButtons(options=['all', 'uploaded', 'edited', 'calculated'],
layout=ipw.Layout(width="25%"))
# Date range
self.dt_now = datetime.datetime.now()
self.dt_end = self.dt_now - datetime.timedelta(days=10)
self.date_start = ipw.Text(value='', description='From: ', style={'description_width': '120px'})
self.date_end = ipw.Text(value='', description='To: ')
self.date_text = ipw.HTML(value='<p>Select the date range:</p>')
self.btn_date = ipw.Button(description='Search', layout={'margin': '1em 0 0 0'})
self.age_selection = ipw.VBox(
[self.date_text, ipw.HBox([self.date_start, self.date_end]), self.btn_date],
layout={
'border': '1px solid #fafafa',
'padding': '1em'
})
# Labels
self.drop_label = ipw.Dropdown(options=({'All'}.union(process_labels)),
value='All',
description='Process Label',
style={'description_width': '120px'},
layout={'width': '50%'})
self.btn_date.on_click(self.search)
self.mode.observe(self.search, names='value')
self.drop_label.observe(self.search, names='value')
h_line = ipw.HTML('<hr>')
box = ipw.VBox([self.age_selection, h_line, ipw.HBox([self.mode, self.drop_label])])
self.results = ipw.Dropdown(layout=layout)
self.results.observe(self._on_select_structure)
self.search()
super(StructureBrowserWidget, self).__init__([box, h_line, self.results])
@staticmethod
def preprocess():
"""Search structures in AiiDA database."""
queryb = QueryBuilder()
queryb.append(StructureData, filters={'extras': {'!has_key': 'formula'}})
for itm in queryb.all(): # iterall() would interfere with set_extra()
formula = itm[0].get_formula()
itm[0].set_extra("formula", formula)
def search(self, change=None): # pylint: disable=unused-argument
"""Launch the search of structures in AiiDA database."""
self.preprocess()
qbuild = QueryBuilder()
try: # If the date range is valid, use it for the search
self.start_date = datetime.datetime.strptime(self.date_start.value, '%Y-%m-%d')
self.end_date = datetime.datetime.strptime(self.date_end.value, '%Y-%m-%d') + datetime.timedelta(hours=24)
except ValueError: # Otherwise revert to the standard (i.e. last 7 days)
self.start_date = self.dt_end
self.end_date = self.dt_now + datetime.timedelta(hours=24)
self.date_start.value = self.start_date.strftime('%Y-%m-%d')
self.date_end.value = self.end_date.strftime('%Y-%m-%d')
filters = {}
filters['ctime'] = {'and': [{'<=': self.end_date}, {'>': self.start_date}]}
if self.drop_label.value != 'All':
qbuild.append(WorkChainNode, filters={'label': self.drop_label.value})
# print(qbuild.all())
# qbuild.append(CalcJobNode, with_incoming=WorkChainNode)
qbuild.append(StructureData, with_incoming=WorkChainNode, filters=filters)
else:
if self.mode.value == "uploaded":
qbuild2 = QueryBuilder()
qbuild2.append(StructureData, project=["id"])
qbuild2.append(Node, with_outgoing=StructureData)
processed_nodes = [n[0] for n in qbuild2.all()]
if processed_nodes:
filters['id'] = {"!in": processed_nodes}
qbuild.append(StructureData, filters=filters)
elif self.mode.value == "calculated":
qbuild.append(CalcJobNode)
qbuild.append(StructureData, with_incoming=CalcJobNode, filters=filters)
elif self.mode.value == "edited":
qbuild.append(CalcFunctionNode)
qbuild.append(StructureData, with_incoming=CalcFunctionNode, filters=filters)
elif self.mode.value == "all":
qbuild.append(StructureData, filters=filters)
qbuild.order_by({StructureData: {'ctime': 'desc'}})
matches = {n[0] for n in qbuild.iterall()}
matches = sorted(matches, reverse=True, key=lambda n: n.ctime)
options = OrderedDict()
options["Select a Structure ({} found)".format(len(matches))] = False
for mch in matches:
label = "PK: %d" % mch.pk
label += " | " + mch.ctime.strftime("%Y-%m-%d %H:%M")
label += " | " + mch.get_extra("formula")
label += " | " + mch.description
options[label] = mch
self.results.options = options
def _on_select_structure(self, change): # pylint: disable=unused-argument
"""When a structure was selected."""
if not self.results.value:
return
structure_ase = self.results.value.get_ase()
formula = structure_ase.get_chemical_formula()
if self.on_structure_selection is not None:
self.on_structure_selection(structure_ase=structure_ase, name=formula)
def on_structure_selection(self, structure_ase, name):
pass
class SmilesWidget(ipw.VBox):
"""Conver SMILES into 3D structure."""
SPINNER = """<i class="fa fa-spinner fa-pulse" style="color:red;" ></i>"""
def __init__(self):
try:
import openbabel # pylint: disable=unused-import
except ImportError:
super().__init__(
[ipw.HTML("The SmilesWidget requires the OpenBabel library, "
"but the library was not found.")])
return
self.smiles = ipw.Text()
self.create_structure_btn = ipw.Button(description="Generate molecule", button_style='info')
self.create_structure_btn.on_click(self._on_button_pressed)
self.output = ipw.HTML("")
super().__init__([self.smiles, self.create_structure_btn, self.output])
@staticmethod
def pymol_2_ase(pymol):
"""Convert pymol object into ASE Atoms."""
import numpy as np
from ase import Atoms, Atom
from ase.data import chemical_symbols
asemol = Atoms()
for atm in pymol.atoms:
asemol.append(Atom(chemical_symbols[atm.atomicnum], atm.coords))
asemol.cell = np.amax(asemol.positions, axis=0) - np.amin(asemol.positions, axis=0) + [10] * 3
asemol.pbc = True
asemol.center()
return asemol
def _optimize_mol(self, mol):
"""Optimize a molecule using force field (needed for complex SMILES)."""
# Note, the pybel module imported below comes together with openbabel package. Do not confuse it with
# pybel package available on PyPi: https://pypi.org/project/pybel/
import pybel # pylint:disable=import-error
self.output.value = "Screening possible conformers {}".format(self.SPINNER) #font-size:20em;
f_f = pybel._forcefields["mmff94"] # pylint: disable=protected-access
if not f_f.Setup(mol.OBMol):
f_f = pybel._forcefields["uff"] # pylint: disable=protected-access
if not f_f.Setup(mol.OBMol):
self.output.value = "Cannot set up forcefield"
return
# initial cleanup before the weighted search
f_f.SteepestDescent(5500, 1.0e-9)
f_f.WeightedRotorSearch(15000, 500)
f_f.ConjugateGradients(6500, 1.0e-10)
f_f.GetCoordinates(mol.OBMol)
self.output.value = ""
def _on_button_pressed(self, change): # pylint: disable=unused-argument
"""Convert SMILES to ase structure when button is pressed."""
self.output.value = ""
# Note, the pybel module imported below comes together with openbabel package. Do not confuse it with
# pybel package available on PyPi: https://pypi.org/project/pybel/
import pybel # pylint:disable=import-error
if not self.smiles.value:
return
mol = pybel.readstring("smi", self.smiles.value)
self.output.value = """SMILES to 3D conversion {}""".format(self.SPINNER)
mol.make3D()
pybel._builder.Build(mol.OBMol) # pylint: disable=protected-access
mol.addh()
self._optimize_mol(mol)
structure_ase = self.pymol_2_ase(mol)
formula = structure_ase.get_chemical_formula()
if self.on_structure_selection is not None:
self.on_structure_selection(structure_ase=structure_ase, name=formula)
def on_structure_selection(self, structure_ase, name):
pass
import numpy as np
from scipy.stats import mode
from numpy.linalg import norm
from pysmiles import read_smiles,write_smiles
from rdkit.Chem.rdmolfiles import MolFromSmiles,MolToMolFile
import networkx as nx
import math
from ase import Atoms
from ase.visualize import view
from IPython.display import display, clear_output
import ipywidgets as ipw
import nglview
from ase.data import covalent_radii
from ase.neighborlist import NeighborList
import ase.neighborlist
class SmilesWidget(ipw.VBox):
"""Conver SMILES into 3D structure."""
SPINNER = """<i class="fa fa-spinner fa-pulse" style="color:red;" ></i>"""
def __init__(self):
try:
import openbabel # pylint: disable=unused-import
except ImportError:
super().__init__(
[ipw.HTML("The SmilesWidget requires the OpenBabel library, "
"but the library was not found.")])
return
self.selection = set()
self.cell_ready = False
self.smiles = ipw.Text()
self.create_structure_btn = ipw.Button(description="Convert SMILES", button_style='info')
self.create_structure_btn.on_click(self._on_button_pressed)
self.create_cell_btn = ipw.Button(description="create GNR", button_style='info')
self.create_cell_btn.on_click(self._on_button2_pressed)
self.viewer = nglview.NGLWidget()
self.viewer.observe(self._on_picked, names='picked')
self.output = ipw.HTML("")
self.picked_out = ipw.Output()
self.button2_out = ipw.Output()
super().__init__([self.smiles, self.create_structure_btn,self.viewer,self_picked_out, self.output,self.button2_out])
########
@staticmethod
def guess_scaling_factor(atoms):
import numpy as np
# set bounding box as cell
cx = 1.5 * (np.amax(atoms.positions[:,0]) - np.amin(atoms.positions[:,0]))
cy = 1.5 * (np.amax(atoms.positions[:,1]) - np.amin(atoms.positions[:,1]))
cz = 15.0
atoms.cell = (cx, cy, cz)
atoms.pbc = (True,True,True)
# calculate all atom-atom distances
c_atoms = [a for a in atoms if a.symbol[0]=="C"]
n = len(c_atoms)
dists = np.zeros([n,n])
for i, a in enumerate(c_atoms):
for j, b in enumerate(c_atoms):
dists[i,j] = norm(a.position - b.position)
# find bond distances to closest neighbor
dists += np.diag([np.inf]*n) # don't consider diagonal
bonds = np.amin(dists, axis=1)
# average bond distance
avg_bond = float(mode(bonds)[0])
# scale box to match equilibrium carbon-carbon bond distance
cc_eq = 1.4313333333
s = cc_eq / avg_bond
return s
@staticmethod
def scale(atoms, s):
cx, cy, cz = atoms.cell
atoms.set_cell((s*cx, s*cy, cz), scale_atoms=True)
atoms.center()
return atoms
@staticmethod
def smiles2D(smiles):
mol = MolFromSmiles(smiles)
from rdkit.Chem import AllChem
# generate the 2D coordinates
AllChem.Compute2DCoords(mol)
# get the 2D coordinates
for c in mol.GetConformers():
coords=c.GetPositions()
# get the atom labels
ll=[]
for i in mol.GetAtoms():
#ll.append(i.GetSymbol())
ll.append(i.GetAtomicNum())
ll= | np.asarray(ll) | numpy.asarray |
# -*- coding: utf-8 -*-
'''
Implementation of Dynamical Motor Primitives (DMPs) for multi-dimensional
trajectories.
'''
import numpy as np
from dmp_1 import DMP
class mDMP(object):
'''
Implementation of a Multi DMP (mDMP) as composition of several
Single DMPs (sDMP). This type of DMP is used with multi-dimensional
trajectories.
'''
def __init__(self, dim=1, nbfs=100):
'''
dim int: number of coordinates of the trajectory >= 1.
nbfs int: number of basis functions per sDMP >= 0.
'''
self.dmps = [DMP(nbfs) for _ in xrange(dim)]
self.dim = dim
self.nbfs = nbfs
self.ns = 0
def _weights(self):
W = np.zeros((self.dim, self.nbfs))
for sdx in xrange(self.dim):
sdmp = self.dmps[sdx]
W[sdx,:] = np.array(np.squeeze(sdmp.ff.weights))
return W.T
def _fs(self):
Fd = np.zeros((self.dim, self.ns))
Fp = np.zeros((self.dim, self.ns))
for sdx in xrange(self.dim):
sdmp = self.dmps[sdx]
Fd[sdx,:] = np.array(np.squeeze(sdmp.ff.Fd))
# TODO: Next line is patch as response time has 1 extra sample.
time = sdmp.responseTime
Fp[sdx,:] = np.array(np.squeeze(sdmp.ff.responseToTimeArray(time)))
return Fd.T, Fp.T
def learnFromDemo(self, trajectory, time):
'''
trajectory np.array([]): trajectory example (NxM).
time np.array([]): time of the trajectory (NxM).
'''
for sdx in xrange(self.dim):
sdmp = self.dmps[sdx]
sdmp.learn(trajectory[sdx,:], time)
self.ns = self.dmps[0].ff.ns
self.W = self._weights()
def planNewTrajectory(self, start, goal, time):
'''
start float: start positio of the new trajectory.
goal float: end positio of the new trajectory.
time float: time to execute the new trajectory.
'''
ns = int(time/self.dmps[0].stepTime)
pos = np.zeros((self.dim, ns))
vel = np.zeros((self.dim, ns))
acc = np.zeros((self.dim, ns))
for sdx in xrange(self.dim):
sdmp = self.dmps[sdx]
sdmp.setup(start[sdx], goal[sdx])
sdmp.plan(time)
# TODO: Next line is patch as response time has 1 extra sample.
pos[sdx,:] = np.array( | np.squeeze(sdmp.responsePos[1:]) | numpy.squeeze |
# <NAME> 2017
# GMM implementation I made for a computer vision course during my honours degree at Wits
import numpy as np
from sklearn.mixture import GaussianMixture
from scipy.stats import multivariate_normal
# These are functions which can be run on GMMs
class fn():
def zero_init(data, K):
lambda_vect = np.full((K), 1.0/K)
# init randomly between (0,1]
# positive semi-def but already is
# sigma_vect = np.full((K), np.var(data)) # diagonal
sigma_list = []
mean_list = []
for k in range(K):
mean = (1.-0.)*np.random.random_sample((data.shape[1])) + 0.
mean_list.append(mean)
sig = (1.0-0.001)*np.random.random_sample((data.shape[1],data.shape[1])) + 0.001
sig = np.dot(sig, sig.T)
sig = np.diag(np.diag(sig))
sigma_list.append(sig)
sigma = np.array(sigma_list)
mean_vect = | np.array(mean_list) | numpy.array |
# -*- coding: utf-8 -*-
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms, models
def model():
# get the "features" portion of VGG19 (we will not need the "classifier" portion)
vgg = models.vgg19(pretrained=True).features
# freeze all VGG parameters since we're only optimizing the target image
for param in vgg.parameters():
param.requires_grad_(False)
return vgg
def load_image(img_path, max_size=400, shape=None):
''' Load in and transform an image, making sure the image
is <= 400 pixels in the x-y dims.'''
image = Image.open(img_path).convert('RGB')
# large images will slow down processing
if max(image.size) > max_size:
size = max_size
else:
size = max(image.size)
if shape is not None:
size = shape
in_transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# discard the transparent, alpha channel (that's the :3) and add the batch dimension
image = in_transform(image)[:3,:,:].unsqueeze(0)
return image
# helper function for un-normalizing an image
# and converting it from a Tensor image to a NumPy image for display
def im_convert(tensor):
""" Display a tensor as an image. """
image = tensor.to("cpu").clone().detach()
image = image.numpy().squeeze()
image = image.transpose(1,2,0)
image = image * np.array((0.229, 0.224, 0.225)) + | np.array((0.485, 0.456, 0.406)) | numpy.array |
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import eigsh, inv
from numpy.linalg import norm, eig
from scipy.linalg import eigh
from itertools import product
from typing import Union
class framework(object):
"""Base class for a framework
A framework at a minimum needs an array of coordinates for vertices/sites and
a list of edges/bonds where each element represents an bond. Additional
information such as boundary conditions, additional constriants
such pinning specific vertices, spring constants, etc. are optional.
For the complete list of the variables, see the following.
Args:
coordinates (Union[np.array, list]): Vertex/site coordinates. For
``N`` sites in ``d`` dimensions, the shape is ``(N,d)``.
bonds (Union[np.array, list]): Edge list. For ``M`` bonds, its
shape is ``(M,2)``.
basis (Union[np.array, list], optional): List of basis/repeat/lattice
vectors, Default: ``None``.If ``None`` or array of zero vectors,
the system is assumed to be finite. Defaults to None.
pins (Union[np.array, list], optional): array/list of int,
List of sites to be immobilized. Defaults to None.
k (Union[np.array, float], optional): Spring constant/stiffness.
If an array is supplied, the shape should be ``(M,2)``.
Defaults to 1.0.
restLengths (Union[np.array, list, float], optional): Equilibrium
or rest length of bonds, used for systems with pre-stress.
Defaults to None.
varcell (Union[np.array, list], optional): (d*d,) array of booleans/int
A list of basis vector components allowed to change (1/True) or
fixed (0/False). Example: ``[0,1,0,0]`` or ``[False, True, False, False]``
both mean that in two dimensions, only second element of first
basis vector is allowed to change.. Defaults to None.
power (int, optional): Power of potential energy.
power=2 is Hookean, power=5/2 is Hertzian. For
non-Hookean potentials, make sure to supply restLengths non-equal to the
current length of the bonds, otherwise the calculations will be wrong.
Defaults to 2.
Raises:
ValueError: The bond list should have two columns corresponding to
two ends of bonds.
Examples:
```python
>>> import numpy as np
>>> import rigidpy as rp
>>> coordinates = np.array([[-1,0], [1,0], [0,1]])
>>> bonds = np.array([[0,1],[1,2],[0,2]])
>>> basis = [[0,0],[0,0]]
>>> pins = [0]
>>> F = rp.Framework(coordinates, bonds, basis=basis, pins=pins)
>>> print ("rigidity matrix:\n",F.RigidityMatrix())
>>> eigvals, eigvecs = F.eigenspace(eigvals=(0,3))
>>> print("vibrational eigenvalues:\n",eigvals)
"""
def __init__(
self,
coordinates: Union[np.array, list],
bonds: Union[np.array, list],
basis: Union[np.array, list] = None,
pins: Union[np.array, list] = None,
k: Union[np.array, float] = 1.0,
restLengths: Union[np.array, list, float] = None,
# mass=1,
varcell: Union[np.array, list] = None,
power: int = 2,
):
# Number of sites and spatial dimensions
self.coordinates = np.array(coordinates)
self.N, self.dim = self.coordinates.shape
# Number of bonds and bond list
self.bonds = np.array(bonds)
self.NB, self.C = self.bonds.shape
# Basis vectors and their norms
if basis is None:
basis = np.zeros(shape=(self.dim, self.dim))
self.boundary = "free"
else:
self.basis = basis
self.boundary = "periodic"
self.nbasis = nbasis = len(basis) # number of basis vectors
self.basisNorm = np.array([norm(base) for base in basis])
self.cutoff = 0.5 * np.amin(self.basisNorm)
if pins is None:
self.pins = []
else:
self.pins = pins
# update the boundary conditions
if self.boundary == "periodic":
self.boundary = "periodic+pinned"
else:
self.boundary = "anchored"
# index of pinned and non-pinned sites in rigidity matrix
dim_idx = np.arange(0, self.dim)
if len(self.pins) != 0:
self.pins_idx = [pin * self.dim + dim_idx for pin in self.pins]
self.keepIdx = np.setdiff1d(np.arange(0, self.dim * self.N), self.pins_idx)
# whether cell can deform
self.varcell = varcell
# volume of the box/cell
if nbasis == 1:
# in case system is 1D
self.volume = self.basisNorm
else:
volume = np.abs(np.product(eig(basis)[0]))
if volume:
self.volume = volume
else:
self.volume = 1
# froce constant matrix
if isinstance(k, (int, float)):
self.K = np.diagflat(k * np.ones(self.NB))
else:
self.K = np.diagflat(k)
self.KS = csr_matrix(self.K) # sparse spring constants
# Cartesian product for periodic box
regionIndex = np.array(list(product([-1, 0, 1], repeat=nbasis)))
transVectors = np.einsum("ij,jk->ik", regionIndex, basis)
# Identify long bonds
if self.C not in [2, 3]:
raise ValueError("Second dimension should be 2 or 3.")
elif self.C == 2:
# vector from node i to node j if bond is (i,j)
dr = -np.diff(coordinates[bonds[:, 0:2]], axis=1).reshape(-1, self.dim)
# length of dr
lengths = norm(dr, axis=1)
# which bonds are long == cross the boundary
self.indexLong = indexLong = np.nonzero(lengths > self.cutoff)[0]
# two ends of long bonds
longBonds = bonds[indexLong]
# index of neiboring boxes for long bonds only
index = [
np.argmin(norm(item[0] - item[1] - transVectors, axis=1))
for item in coordinates[longBonds]
]
dr[indexLong] -= transVectors[index]
# negihbor is in which neighboring box
self.mn = regionIndex[index]
# correct vector from particle 1 to particle 2
self.dr = dr
else:
pass
# which bonds are long == cross the boundary
# indexLong = np.nonzero(lengths > self.cutoff)
# feature for future release
# Equilibrium or rest length of springs
if restLengths is None:
self.L0 = norm(self.dr, axis=1)
else:
self.L0 = restLengths
# Tension spring stiffness
# by convention: compression has postive tension
seperation_norm = self.L0 - norm(self.dr, axis=1)
self.tension = np.dot(self.K, seperation_norm ** (power - 1))
self.KP = np.diag(self.tension / norm(self.dr, axis=1))
### effective stiffness to use in non-Hookean cases
if power == 2:
self.Ke = self.K
else:
self.Ke = np.diag(np.dot(self.K, seperation_norm ** (power - 2)))
def edgeLengths(self) -> np.ndarray:
"""Compute the length of all bonds.
Returns:
np.array: bond lengths]
"""
return norm(self.dr, axis=1)
def rigidityMatrixGeometric(self) -> np.ndarray:
"""Calculate rigidity matrix of the graph.
Elements are normalized position difference of connected
coordinates.
Returns:
np.ndarray: rigidity matrix.
"""
N, M = self.N, self.NB
L = norm(self.dr, axis=1)
drNorm = L[:, np.newaxis]
dr = self.dr / drNorm # normalized dr
# find row and col for non zero values
row = np.repeat(np.arange(M), 2)
col = self.bonds.reshape(-1)
val = np.column_stack((dr, -dr)).reshape(-1, self.dim)
R = np.zeros([M, N, self.dim])
R[row, col] = val
R = R.reshape(M, -1)
if self.varcell is not None:
conditions = np.array(self.varcell, dtype=bool)
# cellDim = np.sum(conditions!=0)
# RCell = np.zeros([M,cellDim])
# print(RCell)
rCell = np.zeros([M, self.nbasis, self.dim])
valsCell = np.einsum("ij,ik->ijk", self.mn, dr[self.indexLong])
rCell[self.indexLong] = valsCell
rCell = rCell.reshape(M, -1)
# select only specified components
rCell = rCell[:, conditions]
R = np.append(R, rCell, axis=1)
if len(self.pins) != 0:
return R[:, self.keepIdx]
return R
def rigidityMatrixAxis(self, i: int) -> np.ndarray:
"""Calculate rigidity matrix of the graph along an axis
in d dimensions. Elements are unit vectors.
Args:
i (int): index of dimensions
Returns:
np.ndarray: rigidity matrix
"""
N, M = self.N, self.NB
row = np.repeat(np.arange(M), 2)
col = self.bonds.reshape(-1)
dr = np.repeat([np.eye(self.dim)[i]], M, axis=0)
val = np.column_stack((dr, -dr)).reshape(-1, self.dim)
R = np.zeros([M, N, self.dim])
R[row, col] = val
R = R.reshape(M, -1)
if self.varcell is not None:
conditions = np.array(self.varcell, dtype=bool)
rCell = np.zeros([M, self.nbasis, self.dim])
valsCell = np.einsum("ij,ik->ijk", self.mn, dr[self.indexLong])
rCell[self.indexLong] = valsCell
rCell = rCell.reshape(M, -1)
# select only specified components
rCell = rCell[:, conditions]
R = np.append(R, rCell, axis=1)
if len(self.pins) != 0:
return R[:, self.keepIdx]
return R
def rigidityMatrix(self) -> np.ndarray:
"""Calculate rigidity matrix of the graph. For now, it simply returns
geometric rigidity matrix.
Returns:
np.ndarray: rigidity matrix
Todo:
Update the function to include the second-order term, if required.
"""
R = self.rigidityMatrixGeometric()
return R
def hessianMatrixGeometric(self) -> np.ndarray:
"""calculate geometric Hessian."""
R = self.rigidityMatrixGeometric()
H = np.dot(R.T, | np.dot(self.Ke, R) | numpy.dot |
import astropy.units as u
import numpy as np
class Cartesian:
"""
Class for Cartesian Coordinates and related transformations.
"""
@u.quantity_input(x=u.km, y=u.km, z=u.km)
def __init__(self, x, y, z):
"""
Constructor.
Parameters
----------
x : ~astropy.units.quantity.Quantity
y : ~astropy.units.quantity.Quantity
z : ~astropy.units.quantity.Qauntity
"""
self.x = x
self.y = y
self.z = z
self.system = "Cartesian"
def __repr__(self):
return "Cartesian x: {}, y: {}, z: {}".format(self.x, self.y, self.z)
def __str__(self):
return self.__repr__()
def si_values(self):
"""
Function for returning values in SI units.
Returns
-------
~numpy.ndarray
Array containing values in SI units (m, m, m)
"""
element_list = [self.x.to(u.m), self.y.to(u.m), self.z.to(u.m)]
return np.array([e.value for e in element_list], dtype=float)
def norm(self):
"""
Function for finding euclidean norm of a vector.
Returns
-------
~astropy.units.quantity.Quantity
Euclidean norm with units.
"""
return np.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
def dot(self, target):
"""
Dot product of two vectors.
Parameters
----------
target: ~einsteipy.coordinates.core.Cartesian
Returns
-------
~astropy.units.quantity.Quantity
Dot product with units
"""
x = self.x * target.x
y = self.y * target.y
z = self.z * target.z
return x + y + z
def to_spherical(self):
"""
Method for conversion to spherical coordinates.
Returns
-------
~einsteinpy.coordinates.core.Spherical
Spherical representation of the Cartesian Coordinates.
"""
r = self.norm()
theta = np.arccos(self.z / r)
phi = np.arctan2(self.y, self.x)
return Spherical(r, theta, phi)
@u.quantity_input(a=u.km)
def to_bl(self, a):
"""
Method for conversion to boyer-lindquist coordinates.
Parameters
----------
a : ~astropy.units.quantity.Quantity
a = J/Mc , the angular momentum per unit mass of the black hole per speed of light.
Returns
-------
~einsteinpy.coordinates.core.BoyerLindquist
BL representation of the Cartesian Coordinates.
"""
w = self.norm() ** 2 - a ** 2
r = np.sqrt(0.5 * (w + np.sqrt((w ** 2) + (4 * (a ** 2) * (self.z ** 2)))))
theta = np.arccos(self.z / r)
phi = np.arctan2(self.y, self.x)
return BoyerLindquist(r, theta, phi, a)
class Spherical:
"""
Class for Spherical Coordinates and related transformations.
"""
@u.quantity_input(r=u.km, theta=u.rad, phi=u.rad)
def __init__(self, r, theta, phi):
"""
Constructor.
Parameters
----------
r : ~astropy.units.quantity.Quantity
theta : ~astropy.units.quantity.Quantity
phi : ~astropy.units.quantity.Quantity
"""
self.r = r
self.theta = theta
self.phi = phi
self.system = "Spherical"
def __repr__(self):
return "Spherical r: {}, theta: {}, phi: {}".format(
self.r, self.theta, self.phi
)
def __str__(self):
return self.__repr__()
def si_values(self):
"""
Function for returning values in SI units.
Returns
-------
~numpy.ndarray
Array containing values in SI units (m, rad, rad)
"""
element_list = [self.r.to(u.m), self.theta.to(u.rad), self.phi.to(u.rad)]
return np.array([e.value for e in element_list], dtype=float)
def to_cartesian(self):
"""
Method for conversion to cartesian coordinates.
Returns
-------
~einsteinpy.coordinates.core.Cartesian
Cartesian representation of the Spherical Coordinates.
"""
x = self.r * np.cos(self.phi) * np.sin(self.theta)
y = self.r * np.sin(self.phi) * np.sin(self.theta)
z = self.r * np.cos(self.theta)
return Cartesian(x, y, z)
@u.quantity_input(a=u.km)
def to_bl(self, a):
"""
Method for conversion to boyer-lindquist coordinates.
Parameters
----------
a : ~astropy.units.quantity.Quantity
a = J/Mc , the angular momentum per unit mass of the black hole per speed of light.
Returns
-------
~einsteinpy.coordinates.core.BoyerLindquist
BL representation of the Spherical Coordinates.
"""
cart = self.to_cartesian()
return cart.to_bl(a)
class BoyerLindquist:
"""
Class for Spherical Coordinates and related transformations.
"""
@u.quantity_input(r=u.km, theta=u.rad, phi=u.rad, a=u.km)
def __init__(self, r, theta, phi, a):
"""
Constructor.
Parameters
----------
r : ~astropy.units.quantity.Quantity
theta : ~astropy.units.quantity.Quantity
phi : ~astropy.units.quantity.Quantity
a : ~astropy.units.quantity.Quantity
"""
self.r = r
self.theta = theta
self.phi = phi
self.a = a
self.system = "BoyerLindquist"
def __repr__(self):
return "Boyer-Lindquist r: {}, theta: {}, phi: {} | a: {}".format(
self.r, self.theta, self.phi, self.a
)
def __str__(self):
return self.__repr__()
def si_values(self):
"""
Function for returning values in SI units.
Returns
-------
~numpy.ndarray
Array containing values in SI units (m, rad, rad)
"""
element_list = [self.r.to(u.m), self.theta.to(u.rad), self.phi.to(u.rad)]
return np.array([e.value for e in element_list], dtype=float)
def to_cartesian(self):
"""
Method for conversion to cartesian coordinates.
Returns
-------
~einsteinpy.coordinates.core.Cartesian
Cartesian representation of the BL Coordinates.
"""
sin_norm = np.sqrt(self.r ** 2 + self.a ** 2) * np.sin(self.theta)
x = sin_norm * np.cos(self.phi)
y = sin_norm * np.sin(self.phi)
z = self.r * | np.cos(self.theta) | numpy.cos |