prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Parsers for several prediction tool outputs.
"""
import numpy as np
max_solvent_acc = {'A': 106.0, 'C': 135.0, 'D': 163.0,
'E': 194.0, 'F': 197.0, 'G': 84.0,
'H': 184.0, 'I': 169.0, 'K': 205.0,
'L': 164.0, 'M': 188.0, 'N': 157.0,
'P': 136.0, 'Q': 198.0, 'R': 248.0,
'S': 130.0, 'T': 142.0, 'V': 142.0,
'W': 227.0, 'Y': 222.0}
def scampi(infile, sequence):
"""Parses the scampi output file.
Parameters
----------
infile : str
Scampi file.
sequence : SeqRecord
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
aa2topo = {
'I': [1, 0, 0, 0],
'M': [0, 1, 0, 0],
'O': [0, 0, 1, 0]
}
result = []
with open(infile, 'r') as fh:
for line in fh:
if not line.startswith('>'):
for aa in line.strip():
result.append(aa2topo[aa])
return np.array([result])
def psipred(infile, sequence):
"""Parses the PSIPRED .horiz output file.
Parameters
----------
infile : str
PSIPRED .horiz file.
sequence : SeqRecord
sequence: SeqRecord object or any other object whichs __len__ method
returns the length of the sequence.
Returns:
NumPy array.
"""
aa2sec = {
'H': [1, 0, 0],
'E': [0, 1, 0],
'C': [0, 0, 1]
}
result = []
with open(infile, 'r') as fh:
for line in fh:
if line.startswith('Pred:'):
spl = line.strip().split(' ')
if len(spl) < 2:
continue
for aa in spl[1]:
result.append(aa2sec[aa])
return | np.array([result]) | numpy.array |
import numpy as np
from gym.spaces import Box
import pyflex
from softgym.envs.fluid_env import FluidEnv
import copy
from softgym.utils.misc import rotate_rigid_object, quatFromAxisAngle
from shapely.geometry import Polygon
import random, math
class PourWaterPosControlEnv(FluidEnv):
def __init__(self, observation_mode, action_mode,
config=None, cached_states_path='pour_water_init_states.pkl', **kwargs):
'''
This class implements a pouring water task.
observation_mode: "cam_rgb" or "point_cloud" or "key_point"
action_mode: "rotation_bottom, rotation_top"
'''
assert observation_mode in ['cam_rgb', 'point_cloud', 'key_point']
assert action_mode in ['rotation_bottom', 'rotation_top']
if action_mode == 'rotation_top':
cached_states_path = 'pour_water_init_states_top.pkl'
self.observation_mode = observation_mode
self.action_mode = action_mode
self.wall_num = 5 # number of glass walls. floor/left/right/front/back
super().__init__(**kwargs)
self.get_cached_configs_and_states(cached_states_path, self.num_variations)
if observation_mode in ['point_cloud', 'key_point']:
if observation_mode == 'key_point':
obs_dim = 0
obs_dim += 13 # Pos (x, z, theta) and shape (w, h, l) of the two cups and the water height.
else:
max_particle_num = 13 * 13 * 13 * 4
obs_dim = max_particle_num * 3
self.particle_obs_dim = obs_dim
# z and theta of the second cup (poured_glass) does not change and thus are omitted.
# add: frac of water in control cup, frac of water in target cup
self.observation_space = Box(low=np.array([-np.inf] * obs_dim), high=np.array([np.inf] * obs_dim), dtype=np.float32)
elif observation_mode == 'cam_rgb':
self.observation_space = Box(low=-np.inf, high=np.inf, shape=(self.camera_height, self.camera_width, 3),
dtype=np.float32)
default_config = self.get_default_config()
border = default_config['glass']['border']
if action_mode in ["rotation_bottom", "rotation_top"]:
self.action_direct_dim = 3
# control the (x, y) corrdinate of the floor center, and theta its rotation angle.
action_low = np.array([-0.01, -0.01, -0.015])
action_high = np.array([0.01, 0.01, 0.015])
self.action_space = Box(action_low, action_high, dtype=np.float32)
else:
raise NotImplementedError
self.prev_reward = 0
self.reward_min = 0
self.reward_max = 1
self.reward_range = self.reward_max - self.reward_min
def get_default_config(self):
config = {
'fluid': {
'radius': 0.033,
'rest_dis_coef': 0.55,
'cohesion': 0.1, # not actually used, instead, is computed as viscosity * 0.01
'viscosity': 2,
'surfaceTension': 0,
'adhesion': 0.0, # not actually used, instead, is computed as viscosity * 0.001
'vorticityConfinement': 40,
'solidpressure': 0.,
'dim_x': 8,
'dim_y': 18,
'dim_z': 8,
},
'glass': {
'border': 0.045,
'height': 0.6,
'glass_distance': 1.0,
'poured_border': 0.04,
'poured_height': 0.6,
},
'camera_name': 'default_camera',
}
return config
def generate_env_variation(self, num_variations=5, config=None, **kwargs):
dim_xs = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
dim_zs = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.cached_configs = []
self.cached_init_states = []
if config is None:
config = self.get_default_config()
config_variations = [copy.deepcopy(config) for _ in range(num_variations)]
for idx in range(num_variations):
print("pour water generate env variations {}".format(idx))
dim_x = random.choice(dim_xs)
dim_z = random.choice(dim_zs)
m = min(dim_x, dim_z)
p = np.random.rand()
water_radius = config['fluid']['radius'] * config['fluid']['rest_dis_coef']
if p < 0.5: # midium water volumes
print("generate env variation: medium volume water")
dim_y = int(3.5 * m)
v = dim_x * dim_y * dim_z
h = v / ((dim_x + 1) * (dim_z + 1)) * water_radius / 2
glass_height = h + (np.random.rand() - 0.5) * 0.001 + config['glass']['border']
else:
print("generate env variation: large volume water")
dim_y = 4 * m
v = dim_x * dim_y * dim_z
h = v / ((dim_x + 1) * (dim_z + 1)) * water_radius / 3
glass_height = h + (m + np.random.rand()) * 0.001 + config['glass']['border']
config_variations[idx]['fluid']['dim_x'] = dim_x
config_variations[idx]['fluid']['dim_y'] = dim_y
config_variations[idx]['fluid']['dim_z'] = dim_z
# if you want to change viscosity also, uncomment this
# config_variations[idx]['fluid']['viscosity'] = self.rand_float(2.0, 10.0)
config_variations[idx]['glass']['height'] = glass_height
config_variations[idx]['glass']['poured_height'] = glass_height + np.random.rand() * 0.1
config_variations[idx]['glass']['glass_distance'] = self.rand_float(0.05 * m, 0.09 * m) + (dim_x + 4) * water_radius / 2.
config_variations[idx]['glass']['poured_border'] = 0.03
self.set_scene(config_variations[idx])
init_state = copy.deepcopy(self.get_state())
self.cached_configs.append(config_variations[idx])
self.cached_init_states.append(init_state)
combined = [self.cached_configs, self.cached_init_states]
return self.cached_configs, self.cached_init_states
def get_config(self):
if self.deterministic:
config_idx = 0
else:
config_idx = np.random.randint(len(self.config_variations))
self.config = self.config_variations[config_idx]
return self.config
def _reset(self):
'''
reset to environment to the initial state.
return the initial observation.
'''
self.inner_step = 0
self.performance_init = None
info = self._get_info()
self.performance_init = info['performance']
pyflex.step(render=True)
return self._get_obs()
def get_state(self):
'''
get the postion, velocity of flex particles, and postions of flex shapes.
'''
particle_pos = pyflex.get_positions()
particle_vel = pyflex.get_velocities()
shape_position = pyflex.get_shape_states()
return {'particle_pos': particle_pos, 'particle_vel': particle_vel, 'shape_pos': shape_position,
'glass_x': self.glass_x, 'glass_y': self.glass_y, 'glass_rotation': self.glass_rotation,
'glass_states': self.glass_states, 'poured_glass_states': self.poured_glass_states,
'glass_params': self.glass_params, 'config_id': self.current_config_id}
def set_state(self, state_dic):
'''
set the postion, velocity of flex particles, and postions of flex shapes.
'''
pyflex.set_positions(state_dic["particle_pos"])
pyflex.set_velocities(state_dic["particle_vel"])
pyflex.set_shape_states(state_dic["shape_pos"])
self.glass_x = state_dic['glass_x']
self.glass_y = state_dic['glass_y']
self.glass_rotation = state_dic['glass_rotation']
self.glass_states = state_dic['glass_states']
self.poured_glass_states = state_dic['poured_glass_states']
for _ in range(5):
pyflex.step()
def initialize_camera(self):
self.camera_params = {
'default_camera': {'pos': np.array([1.4, 1.5, 0.1]),
'angle': np.array([0.45 * np.pi, -60 / 180. * np.pi, 0]),
'width': self.camera_width,
'height': self.camera_height},
'cam_2d': {'pos': np.array([0.5, .7, 4.]),
'angle': np.array([0, 0, 0.]),
'width': self.camera_width,
'height': self.camera_height}
}
def set_poured_glass_params(self, config):
params = config
self.glass_distance = params['glass_distance']
self.poured_border = params['poured_border']
self.poured_height = params['poured_height']
fluid_radis = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
self.poured_glass_dis_x = self.fluid_params['dim_x'] * fluid_radis + 0.07 # glass floor length
self.poured_glass_dis_z = self.fluid_params['dim_z'] * fluid_radis + 0.07 # glass width
params['poured_glass_dis_x'] = self.poured_glass_dis_x
params['poured_glass_dis_z'] = self.poured_glass_dis_z
params['poured_glass_x_center'] = self.x_center + params['glass_distance']
self.glass_params.update(params)
def set_pouring_glass_params(self, config):
params = config
self.border = params['border']
self.height = params['height']
fluid_radis = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
self.glass_dis_x = self.fluid_params['dim_x'] * fluid_radis + 0.1 # glass floor length
self.glass_dis_z = self.fluid_params['dim_z'] * fluid_radis + 0.1 # glass width
params['glass_dis_x'] = self.glass_dis_x
params['glass_dis_z'] = self.glass_dis_z
params['glass_x_center'] = self.x_center
self.glass_params = params
def set_scene(self, config, states=None, create_only=False):
'''
Construct the pouring water scence.
'''
# create fluid
super().set_scene(config) # do not sample fluid parameters, as it's very likely to generate very strange fluid
# compute glass params
if states is None:
self.set_pouring_glass_params(config["glass"])
self.set_poured_glass_params(config["glass"])
else:
glass_params = states['glass_params']
self.border = glass_params['border']
self.height = glass_params['height']
self.glass_dis_x = glass_params['glass_dis_x']
self.glass_dis_z = glass_params['glass_dis_z']
self.glass_distance = glass_params['glass_distance']
self.poured_border = glass_params['poured_border']
self.poured_height = glass_params['poured_height']
self.poured_glass_dis_x = glass_params['poured_glass_dis_x']
self.poured_glass_dis_z = glass_params['poured_glass_dis_z']
self.glass_params = glass_params
# create pouring glass & poured glass
self.create_glass(self.glass_dis_x, self.glass_dis_z, self.height, self.border)
self.create_glass(self.poured_glass_dis_x, self.poured_glass_dis_z, self.poured_height, self.poured_border)
# move pouring glass to be at ground
self.glass_states = self.init_glass_state(self.x_center, 0, self.glass_dis_x, self.glass_dis_z, self.height, self.border)
# move poured glass to be at ground
self.poured_glass_states = self.init_glass_state(self.x_center + self.glass_distance, 0,
self.poured_glass_dis_x, self.poured_glass_dis_z, self.poured_height, self.poured_border)
self.set_shape_states(self.glass_states, self.poured_glass_states)
# record glass floor center x, y, and rotation
self.glass_x = self.x_center
if self.action_mode == 'rotation_bottom':
self.glass_y = 0
elif self.action_mode == 'rotation_top':
self.glass_y = 0.5 * self.border + self.height
self.glass_rotation = 0
# only create the glass and water, without setting their states
# this is only used in the pourwater amount env.
if create_only:
return
# no cached init states passed in
if states is None:
fluid_pos = np.ones((self.particle_num, self.dim_position))
# move water all inside the glass
fluid_radius = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
fluid_dis = np.array([1.0 * fluid_radius, fluid_radius * 0.5, 1.0 * fluid_radius])
lower_x = self.glass_params['glass_x_center'] - self.glass_params['glass_dis_x'] / 2. + self.glass_params['border']
lower_z = -self.glass_params['glass_dis_z'] / 2 + self.glass_params['border']
lower_y = self.glass_params['border']
if self.action_mode in ['sawyer', 'franka']:
lower_y += 0.56 # NOTE: robotics table
lower = np.array([lower_x, lower_y, lower_z])
cnt = 0
rx = int(self.fluid_params['dim_x'] * 1)
ry = int(self.fluid_params['dim_y'] * 1)
rz = int(self.fluid_params['dim_z'] / 1)
for x in range(rx):
for y in range(ry):
for z in range(rz):
fluid_pos[cnt][:3] = lower + np.array([x, y, z]) * fluid_dis # + np.random.rand() * 0.01
cnt += 1
pyflex.set_positions(fluid_pos)
print("stablize water!")
for _ in range(100):
pyflex.step()
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
in_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
not_in_glass = 1 - in_glass
not_total_num = np.sum(not_in_glass)
while not_total_num > 0:
max_height_now = np.max(water_state[:, 1])
fluid_dis = np.array([1.0 * fluid_radius, fluid_radius * 1, 1.0 * fluid_radius])
lower_x = self.glass_params['glass_x_center'] - self.glass_params['glass_dis_x'] / 4
lower_z = -self.glass_params['glass_dis_z'] / 4
lower_y = max_height_now
lower = np.array([lower_x, lower_y, lower_z])
cnt = 0
dim_x = config['fluid']['dim_x']
dim_z = config['fluid']['dim_z']
for w_idx in range(len(water_state)):
if not in_glass[w_idx]:
water_state[w_idx][:3] = lower + fluid_dis * np.array([cnt % dim_x, cnt // (dim_x * dim_z), (cnt // dim_x) % dim_z])
cnt += 1
pyflex.set_positions(water_state)
for _ in range(40):
pyflex.step()
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
in_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
not_in_glass = 1 - in_glass
not_total_num = np.sum(not_in_glass)
for _ in range(30):
pyflex.step()
else: # set to passed-in cached init states
self.set_state(states)
def _get_obs(self):
'''
return the observation based on the current flex state.
'''
if self.observation_mode == 'cam_rgb':
return self.get_image(self.camera_width, self.camera_height)
elif self.observation_mode == 'point_cloud':
particle_pos = np.array(pyflex.get_positions()).reshape([-1, 4])[:, :3].flatten()
pos = np.zeros(shape=self.particle_obs_dim, dtype=np.float)
pos[:len(particle_pos)] = particle_pos
return pos.flatten()
elif 'key_point' in self.observation_mode:
pos = np.empty(0, dtype=np.float)
water_state = pyflex.get_positions().reshape([-1, 4])
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
in_poured_glass = float(np.sum(in_poured_glass)) / len(water_state)
in_control_glass = float(np.sum(in_control_glass)) / len(water_state)
cup_state = np.array([self.glass_x, self.glass_y, self.glass_rotation, self.glass_dis_x, self.glass_dis_z, self.height,
self.glass_distance + self.glass_x, self.poured_height, self.poured_glass_dis_x, self.poured_glass_dis_z,
self._get_current_water_height(), in_poured_glass, in_control_glass])
return np.hstack([pos, cup_state]).flatten()
else:
raise NotImplementedError
def compute_reward(self, obs=None, action=None, set_prev_reward=False):
"""
The reward is computed as the fraction of water in the poured glass.
NOTE: the obs and action params are made here to be compatiable with the MultiTask env wrapper.
"""
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
water_num = len(water_state)
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
good_water = in_poured_glass * (1 - in_control_glass)
good_water_num = np.sum(good_water)
reward = float(good_water_num) / water_num
return reward
def _get_info(self):
# Duplicate of the compute reward function!
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
water_num = len(water_state)
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
good_water = in_poured_glass * (1 - in_control_glass)
good_water_num = np.sum(good_water)
performance = float(good_water_num) / water_num
performance_init = performance if self.performance_init is None else self.performance_init # Use the original performance
return {
'normalized_performance': (performance - performance_init) / (self.reward_max - performance_init),
'performance': performance
}
def _step(self, action):
'''
action: np.ndarray of dim 1x3, (x, y, theta). (x, y) specifies the floor center coordinate, and theta
specifies the rotation.
'''
# make action as increasement, clip its range
move = action[:2]
rotate = action[2]
move = np.clip(move, a_min=self.action_space.low[0], a_max=self.action_space.high[0])
rotate = np.clip(rotate, a_min=self.action_space.low[2], a_max=self.action_space.high[2])
dx, dy, dtheta = move[0], move[1], rotate
x, y, theta = self.glass_x + dx, self.glass_y + dy, self.glass_rotation + dtheta
# check if the movement of the pouring glass collide with the poured glass.
# the action only take effects if there is no collision
new_states = self.rotate_glass(self.glass_states, x, y, theta)
if not self.judge_glass_collide(new_states, theta) and self.above_floor(new_states, theta):
self.glass_states = new_states
self.glass_x, self.glass_y, self.glass_rotation = x, y, theta
else: # invalid move, old state becomes the same as the current state
self.glass_states[:, 3:6] = self.glass_states[:, :3].copy()
self.glass_states[:, 10:] = self.glass_states[:, 6:10].copy()
# pyflex takes a step to update the glass and the water fluid
self.set_shape_states(self.glass_states, self.poured_glass_states)
pyflex.step(render=True)
self.inner_step += 1
def create_glass(self, glass_dis_x, glass_dis_z, height, border):
"""
the glass is a box, with each wall of it being a very thin box in Flex.
each wall of the real box is represented by a box object in Flex with really small thickness (determined by the param border)
dis_x: the length of the glass
dis_z: the width of the glass
height: the height of the glass.
border: the thickness of the glass wall.
the halfEdge determines the center point of each wall.
Note: this is merely setting the length of each dimension of the wall, but not the actual position of them.
That's why left and right walls have exactly the same params, and so do front and back walls.
"""
center = np.array([0., 0., 0.])
quat = quatFromAxisAngle([0, 0, -1.], 0.)
boxes = []
# floor
halfEdge = np.array([glass_dis_x / 2. + border, border / 2., glass_dis_z / 2. + border])
boxes.append([halfEdge, center, quat])
# left wall
halfEdge = np.array([border / 2., (height) / 2., glass_dis_z / 2. + border])
boxes.append([halfEdge, center, quat])
# right wall
boxes.append([halfEdge, center, quat])
# back wall
halfEdge = np.array([(glass_dis_x) / 2., (height) / 2., border / 2.])
boxes.append([halfEdge, center, quat])
# front wall
boxes.append([halfEdge, center, quat])
for i in range(len(boxes)):
halfEdge = boxes[i][0]
center = boxes[i][1]
quat = boxes[i][2]
pyflex.add_box(halfEdge, center, quat)
return boxes
def rotate_glass(self, prev_states, x, y, theta):
'''
given the previous states of the glass, rotate it with angle theta.
update the states of the 5 boxes that form the box: floor, left/right wall, back/front wall.
rotate the glass, where the center point is the center of the floor or the top.
state:
0-3: current (x, y, z) coordinate of the center point
3-6: previous (x, y, z) coordinate of the center point
6-10: current quat
10-14: previous quat
'''
dis_x, dis_z = self.glass_dis_x, self.glass_dis_z
quat_curr = quatFromAxisAngle([0, 0, -1.], theta)
border = self.border
# states of 5 walls
states = np.zeros((5, self.dim_shape_state))
for i in range(5):
states[i][3:6] = prev_states[i][:3]
states[i][10:] = prev_states[i][6:10]
x_center = x
# rotation center is the floor center
rotate_center = np.array([x_center, y, 0.])
if self.action_mode == 'rotation_bottom':
# floor: center position does not change
states[0, :3] = np.array([x_center, y, 0.])
# left wall: center must move right and move down.
relative_coord = np.array([-(dis_x+ border) / 2., (self.height) / 2., 0.])
states[1, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# right wall
relative_coord = np.array([(dis_x+ border) / 2., (self.height) / 2., 0.])
states[2, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# back wall
relative_coord = np.array([0, (self.height) / 2., -(dis_z+ border) / 2.])
states[3, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# front wall
relative_coord = np.array([0, (self.height) / 2., (dis_z+ border) / 2.])
states[4, :3] = rotate_rigid_object(center=rotate_center, axis= | np.array([0, 0, -1]) | numpy.array |
# pre/test_shift_scale.py
"""Tests for rom_operator_inference.pre._shift_scale.py."""
import os
import h5py
import pytest
import itertools
import numpy as np
import rom_operator_inference as opinf
# Data preprocessing: shifting and MinMax scaling / unscaling =================
def test_shift(set_up_basis_data):
"""Test pre._shift_scale.shift()."""
X = set_up_basis_data
# Try with bad data shape.
with pytest.raises(ValueError) as ex:
opinf.pre.shift(np.random.random((3,3,3)))
assert ex.value.args[0] == "data X must be two-dimensional"
# Try with bad shift vector.
with pytest.raises(ValueError) as ex:
opinf.pre.shift(X, X)
assert ex.value.args[0] == "shift_by must be one-dimensional"
# Correct usage.
Xshifted, xbar = opinf.pre.shift(X)
assert xbar.shape == (X.shape[0],)
assert Xshifted.shape == X.shape
assert np.allclose(np.mean(Xshifted, axis=1), np.zeros(X.shape[0]))
for j in range(X.shape[1]):
assert np.allclose(Xshifted[:,j], X[:,j] - xbar)
Y = np.random.random(X.shape)
Yshifted = opinf.pre.shift(Y, xbar)
for j in range(Y.shape[1]):
assert np.allclose(Yshifted[:,j], Y[:,j] - xbar)
# Verify inverse shifting.
assert np.allclose(X, opinf.pre.shift(Xshifted, -xbar))
def test_scale(set_up_basis_data):
"""Test pre._shift_scale.scale()."""
X = set_up_basis_data
# Try with bad scales.
with pytest.raises(ValueError) as ex:
opinf.pre.scale(X, (1,2,3), (4,5))
assert ex.value.args[0] == "scale_to must have exactly 2 elements"
with pytest.raises(ValueError) as ex:
opinf.pre.scale(X, (1,2), (3,4,5))
assert ex.value.args[0] == "scale_from must have exactly 2 elements"
# Scale X to [-1,1] and then scale Y with the same transformation.
Xscaled, scaled_to, scaled_from = opinf.pre.scale(X, (-1,1))
assert Xscaled.shape == X.shape
assert scaled_to == (-1,1)
assert isinstance(scaled_from, tuple)
assert len(scaled_from) == 2
assert round(scaled_from[0],8) == round(X.min(),8)
assert round(scaled_from[1],8) == round(X.max(),8)
assert round(Xscaled.min(),8) == -1
assert round(Xscaled.max(),8) == 1
# Verify inverse scaling.
assert np.allclose(opinf.pre.scale(Xscaled, scaled_from, scaled_to), X)
# Transformer classes for centering and scaling ===============================
class TestSnapshotTransformer:
"""Test pre.SnapshotTransformer."""
def test_init(self):
"""Test pre.SnapshotTransformer.__init__()."""
st = opinf.pre.SnapshotTransformer()
for attr in ["scaling", "center", "verbose"]:
assert hasattr(st, attr)
# Properties --------------------------------------------------------------
def test_properties(self):
"""Test pre.SnapshotTransformer properties (attribute protection)."""
st = opinf.pre.SnapshotTransformer()
# Test center.
with pytest.raises(TypeError) as ex:
st.center = "nope"
assert ex.value.args[0] == "'center' must be True or False"
st.center = True
st.center = False
# Test scale.
with pytest.raises(ValueError) as ex:
st.scaling = "minimaxii"
assert ex.value.args[0].startswith("invalid scaling 'minimaxii'")
with pytest.raises(TypeError) as ex:
st.scaling = [2, 1]
assert ex.value.args[0] == "'scaling' must be of type 'str'"
for s in st._VALID_SCALINGS:
st.scaling = s
st.scaling = None
def test_eq(self, n=200):
"""Test pre.SnapshotTransformer.__eq__()."""
µ = np.random.randint(0, 100, (n,))
a, b = 10, -3
# Null transformers.
st1 = opinf.pre.SnapshotTransformer()
st2 = opinf.pre.SnapshotTransformer()
assert st1 == st2
assert st1 != 100
# Mismatched attributes.
st1.center = True
st2.center = False
assert not (st1 == st2)
assert st1 != st2
# Centering attributes.
st1.mean_ = µ
st2.center = True
assert st1 != st2
st2.mean_ = µ
assert st1 == st2
st2.mean_ = µ - 5
assert st1 != st2
# Scaling attributes.
st1.scaling = "standard"
st2.scaling = None
assert st1 != st2
st2.scaling = "minmax"
assert st1 != st2
st2.scaling = "standard"
assert st1 == st2
st1.scale_, st1.shift_ = a, b
assert st1 != st2
st2.scale_, st2.shift_ = a - 1, b + 1
assert st1 != st2
st2.scale_, st2.shift_ = a, b
assert st1 == st2
# Printing ----------------------------------------------------------------
def test_str(self):
"""Test pre.SnapshotTransformer.__str__()."""
st = opinf.pre.SnapshotTransformer()
st.center = False
st.scaling = None
assert str(st) == "Snapshot transformer"
st.center = True
trn = "(call fit_transform() to train)"
msc = "Snapshot transformer with mean-snapshot centering"
assert str(st) == f"{msc} {trn}"
for s in st._VALID_SCALINGS:
st.scaling = s
assert str(st) == f"{msc} and '{s}' scaling {trn}"
st.center = False
for s in st._VALID_SCALINGS:
st.scaling = s
assert str(st) == f"Snapshot transformer with '{s}' scaling {trn}"
def test_statistics_report(self):
"""Test pre.SnapshotTransformer._statistics_report()."""
X = np.arange(10) - 4
report = opinf.pre.SnapshotTransformer._statistics_report(X)
assert report == "-4.000e+00 | 5.000e-01 | 5.000e+00 | 2.872e+00"
# Persistence -------------------------------------------------------------
def test_save(self, n=200, k=50):
"""Test pre.SnapshotTransformer.save()."""
# Clean up after old tests.
target = "_savetransformertest.h5"
if os.path.isfile(target): # pragma: no cover
os.remove(target)
def _checkfile(filename, st):
assert os.path.isfile(filename)
with h5py.File(filename, 'r') as hf:
# Check transformation metadata.
assert "meta" in hf
assert len(hf["meta"]) == 0
for attr in ("center", "scaling", "verbose"):
assert attr in hf["meta"].attrs
if attr == "scaling" and st.scaling is None:
assert not hf["meta"].attrs[attr]
else:
assert hf["meta"].attrs[attr] == getattr(st, attr)
# Check transformation parameters.
if st.center:
assert "transformation/mean_" in hf
assert np.all(hf["transformation/mean_"][:] == st.mean_)
if st.scaling:
assert "transformation/scale_" in hf
assert hf["transformation/scale_"][0] == st.scale_
assert "transformation/shift_" in hf
assert hf["transformation/shift_"][0] == st.shift_
# Check file creation and overwrite protocol on null transformation.
st = opinf.pre.SnapshotTransformer()
st.save(target[:-3])
_checkfile(target, st)
with pytest.raises(FileExistsError) as ex:
st.save(target, overwrite=False)
ex.value.args[0] == f"{target} (use overwrite=True to ignore)"
st.save(target, overwrite=True)
_checkfile(target, st)
# Check non-null transformations.
X = np.random.randint(0, 100, (n,k)).astype(float)
for scaling, center in itertools.product({None, *st._VALID_SCALINGS},
(True, False)):
st.center = center
st.scaling = scaling
st.fit_transform(X)
st.save(target, overwrite=True)
_checkfile(target, st)
os.remove(target)
def test_load(self, n=200, k=50):
"""Test pre.SnapshotTransformer.load()."""
# Clean up after old tests.
target = "_loadtransformertest.h5"
if os.path.isfile(target): # pragma: no cover
os.remove(target)
# Try to load a bad file.
with h5py.File(target, 'w'):
pass
with pytest.raises(ValueError) as ex:
opinf.pre.SnapshotTransformer.load(target)
assert ex.value.args[0] == "invalid save format (meta/ not found)"
# Check that save() -> load() gives the same transformer.
st = opinf.pre.SnapshotTransformer()
X = np.random.randint(0, 100, (n,k)).astype(float)
for scaling, center in itertools.product({None, *st._VALID_SCALINGS},
(True, False)):
st.scaling = scaling
st.center = center
st.fit_transform(X, inplace=False)
st.save(target, overwrite=True)
st2 = opinf.pre.SnapshotTransformer.load(target)
assert st == st2
os.remove(target)
# Main routines -----------------------------------------------------------
def test_is_trained(self):
"""Test pre.SnapshotTransformer._is_trained()."""
st = opinf.pre.SnapshotTransformer()
# Null transformer is always trained.
st.center = False
st.scaling = None
assert st._is_trained() is True
# Centering.
st.center = True
assert st._is_trained() is False
st.mean_ = np.array([1,2,3])
assert st._is_trained() is True
# Scaling.
st.center = False
st.scaling = "minmax"
assert st._is_trained() is False
st.scale_ = 10
assert st._is_trained() is False
st.shift_ = 20
assert st._is_trained() is True
def test_fit_transform(self, n=200, k=50):
"""Test pre.SnapshotTransformer.fit_transform()."""
def fit_transform_copy(st, A):
"""Assert A and B are not the same object but do have the same
type and shape.
"""
B = st.fit_transform(A, inplace=False)
assert B is not A
assert type(B) is type(A)
assert B.shape == A.shape
return B
st = opinf.pre.SnapshotTransformer(verbose=True)
# Test null transformation.
st.center = False
st.scaling = None
X = np.random.randint(0, 100, (n,k)).astype(float)
Y = st.fit_transform(X, inplace=True)
assert Y is X
Y = fit_transform_copy(st, X)
assert np.all(Y == X)
# Test centering.
st.center = True
st.scaling = None
Y = fit_transform_copy(st, X)
assert hasattr(st, "mean_")
assert isinstance(st.mean_, np.ndarray)
assert st.mean_.shape == (X.shape[0],)
assert np.allclose(np.mean(Y, axis=1), 0)
# Test scaling (without and with centering).
for centering in (False, True):
st.center = centering
# Test standard scaling.
st.scaling = "standard"
Y = fit_transform_copy(st, X)
for attr in "scale_", "shift_":
assert hasattr(st, attr)
assert isinstance(getattr(st, attr), float)
assert np.isclose(np.mean(Y), 0)
assert np.isclose(np.std(Y), 1)
# Test min-max scaling.
st.scaling = "minmax"
Y = fit_transform_copy(st, X)
assert np.isclose(np.min(Y), 0)
assert np.isclose(np.max(Y), 1)
# Test symmetric min-max scaling.
st.scaling = "minmaxsym"
Y = fit_transform_copy(st, X)
assert np.isclose(np.min(Y), -1)
assert np.isclose(np.max(Y), 1)
# Test maximum absolute scaling.
st.scaling = "maxabs"
Y = fit_transform_copy(st, X)
assert np.isclose(np.max(np.abs(Y)), 1)
# Test minimum-maximum absolute scaling.
st.scaling = "maxabssym"
Y = fit_transform_copy(st, X)
assert np.isclose(np.mean(Y), 0)
assert np.isclose(np.max(np.abs(Y)), 1)
def test_transform(self, n=200, k=50):
"""Test pre.SnapshotTransformer.transform()."""
X = np.random.randint(0, 100, (n,k)).astype(float)
st = opinf.pre.SnapshotTransformer(verbose=False)
# Test null transformation.
X = np.random.randint(0, 100, (n,k)).astype(float)
st.fit_transform(X)
Y = np.random.randint(0, 100, (n,k)).astype(float)
Z = st.transform(Y, inplace=True)
assert Z is Y
Z = st.transform(Y, inplace=False)
assert Z is not Y
assert Z.shape == Y.shape
assert np.all(Z == Y)
# Test mean shift.
st.center = True
st.scaling = None
with pytest.raises(AttributeError) as ex:
st.transform(Y, inplace=False)
assert ex.value.args[0] == \
"transformer not trained (call fit_transform())"
st.fit_transform(X)
µ = st.mean_
Z = st.transform(Y, inplace=False)
assert np.allclose(Z, Y - µ.reshape(-1,1))
# Test each scaling.
st.center = False
for scl in st._VALID_SCALINGS:
X = np.random.randint(0, 100, (n,k)).astype(float)
Y = np.random.randint(0, 100, (n,k)).astype(float)
st.scaling = scl
st.fit_transform(X)
a, b = st.scale_, st.shift_
Z = st.transform(Y)
assert np.allclose(Z, a*Y + b)
def test_inverse_transform(self, n=200, k=50):
"""Test pre.SnapshotTransformer.inverse_transform()."""
X = np.random.randint(0, 100, (n,k)).astype(float)
st = opinf.pre.SnapshotTransformer(verbose=False)
st.center = True
with pytest.raises(AttributeError) as ex:
st.inverse_transform(X, inplace=False)
assert ex.value.args[0] == \
"transformer not trained (call fit_transform())"
for scaling, center in itertools.product({None, *st._VALID_SCALINGS},
(True, False)):
st.scaling = scaling
st.center = center
st.fit_transform(X, inplace=False)
Y = np.random.randint(0, 100, (n,k)).astype(float)
Z = st.transform(Y, inplace=False)
st.inverse_transform(Z, inplace=True)
assert np.allclose(Z, Y)
class TestSnapshotTransformerMulti:
"""Test pre.SnapshotTransformerMulti."""
def test_init(self):
"""Test pre.SnapshotTransformer.__init__()."""
stm = opinf.pre.SnapshotTransformerMulti(1)
for attr in ["scaling", "center", "verbose",
"num_variables", "transformers"]:
assert hasattr(stm, attr)
# Center.
stm = opinf.pre.SnapshotTransformerMulti(2, center=(True, True))
assert stm.center == (True, True)
stm.transformers[1].center = False
assert stm.center == (True, False)
stm = opinf.pre.SnapshotTransformerMulti(3, center=True)
assert stm.center == (True, True, True)
with pytest.raises(ValueError) as ex:
opinf.pre.SnapshotTransformerMulti(3, center=[True, False])
assert ex.value.args[0] == "len(center) = 2 != 3 = num_variables"
with pytest.raises(ValueError) as ex:
opinf.pre.SnapshotTransformerMulti(3, center="the center")
assert ex.value.args[0] == "len(center) = 10 != 3 = num_variables"
with pytest.raises(TypeError) as ex:
opinf.pre.SnapshotTransformerMulti(3, center=100)
assert ex.value.args[0] == "object of type 'int' has no len()"
with pytest.raises(TypeError) as ex:
opinf.pre.SnapshotTransformerMulti(2, center=(True, "yes"))
assert ex.value.args[0] == "'center' must be True or False"
# Scaling.
stm = opinf.pre.SnapshotTransformerMulti(2, scaling=("minmax", None))
assert stm.scaling == ("minmax", None)
stm = opinf.pre.SnapshotTransformerMulti(2, scaling=[None, "maxabs"])
assert isinstance(stm.scaling, tuple)
assert stm.scaling == (None, "maxabs")
stm = opinf.pre.SnapshotTransformerMulti(3, scaling="standard")
assert stm.scaling == ("standard", "standard", "standard")
with pytest.raises(TypeError) as ex:
opinf.pre.SnapshotTransformerMulti(3, scaling=100)
assert ex.value.args[0] == "object of type 'int' has no len()"
with pytest.raises(ValueError) as ex:
opinf.pre.SnapshotTransformerMulti(3, scaling=(True, False))
assert ex.value.args[0] == "len(scaling) = 2 != 3 = num_variables"
with pytest.raises(TypeError) as ex:
opinf.pre.SnapshotTransformerMulti(2, scaling=(True, "minmax"))
assert ex.value.args[0] == "'scaling' must be of type 'str'"
# Properties --------------------------------------------------------------
def test_properties(self):
"""Test pre.SnapshotTransformerMulti properties."""
# Attribute setting blocked.
stm = opinf.pre.SnapshotTransformerMulti(2)
for attr in "num_variables", "center", "scaling":
assert hasattr(stm, attr)
with pytest.raises(AttributeError) as ex:
setattr(stm, attr, 0)
assert ex.value.args[0] == "can't set attribute"
# Variable names.
stm = opinf.pre.SnapshotTransformerMulti(3, variable_names=None)
assert isinstance(stm.variable_names, list)
assert len(stm.variable_names) == 3
assert stm.variable_names == ["variable 1", "variable 2", "variable 3"]
with pytest.raises(TypeError) as ex:
stm.variable_names = (1, 2, 3)
assert ex.value.args[0] == "variable_names must be list of length 3"
with pytest.raises(TypeError) as ex:
stm.variable_names = [1, 2]
assert ex.value.args[0] == "variable_names must be list of length 3"
stm.variable_names = ["Bill", "Charlie", "Percy"]
# Verbose
stm.verbose = 1
assert stm.verbose is True
stm.verbose = 0
assert stm.verbose is False
assert all(st.verbose is False for st in stm.transformers)
stm.verbose = True
assert stm.verbose is True
def test_mean(self):
"""Test pre.SnapshotTransformerMulti.mean_."""
centers = [False, True, False, True]
scalings = [None, None, "standard", "minmax"]
stm = opinf.pre.SnapshotTransformerMulti(4, centers, scalings)
assert stm.mean_ is None
# Set the centering vectors.
stm.n_ = 7
µs = [np.random.randint(0, 100, stm.n_) for _ in range(4)]
for i,µ in enumerate(µs):
if centers[i]:
stm.transformers[i].mean_ = µ
if scalings[i]:
stm.transformers[i].scale_ = 0
stm.transformers[i].shift_ = 0
# Validate concatenated mean_.
µµ = stm.mean_
assert isinstance(µµ, np.ndarray)
assert µµ.shape == (4*stm.n_,)
for i,µ in enumerate(µs):
s = slice(i*stm.n_, (i+1)*stm.n_)
if centers[i]:
assert np.allclose(µµ[s], µ)
else:
assert np.allclose(µµ[s], 0)
def test_len(self):
"""Test pre.SnapshotTransformerMulti.__len__()."""
for i in [2, 5, 10]:
stm = opinf.pre.SnapshotTransformerMulti(i)
assert len(stm) == i
def test_getitem(self):
"""Test pre.SnapshotTransformerMulti.__getitem__()."""
stm = opinf.pre.SnapshotTransformerMulti(10)
for i in [3, 4, 7]:
assert stm[i] is stm.transformers[i]
def test_setitem(self):
"""Test pre.SnapshotTransformerMulti.__setitem__()."""
stm = opinf.pre.SnapshotTransformerMulti(10)
st = opinf.pre.SnapshotTransformer(center=True, scaling="minmax")
for i in [3, 4, 7]:
stm[i] = st
assert stm.transformers[i] is st
for i in range(stm.num_variables):
stm[i].center = False
assert stm.center == (False,)*stm.num_variables
with pytest.raises(TypeError) as ex:
stm[2] = 10
assert ex.value.args[0] == \
"assignment object must be SnapshotTransformer"
def test_eq(self):
"""Test pre.SnapshotTransformerMulti.__eq__()."""
# Null transformers.
stm1 = opinf.pre.SnapshotTransformerMulti(3)
assert stm1 != 100
stm2 = opinf.pre.SnapshotTransformerMulti(2)
assert stm1 != stm2
stm2 = opinf.pre.SnapshotTransformerMulti(3)
assert stm1 == stm2
# Mismatched attributes.
stm1 = opinf.pre.SnapshotTransformerMulti(3, center=True)
stm2 = opinf.pre.SnapshotTransformerMulti(3, center=False)
assert not (stm1 == stm2)
assert stm1 != stm2
stm1 = opinf.pre.SnapshotTransformerMulti(3, scaling="minmax")
stm2 = opinf.pre.SnapshotTransformerMulti(3, scaling="minmax")
assert stm1 == stm2
st = opinf.pre.SnapshotTransformer(scaling="standard")
stm1.transformers[1] = st
assert stm1 != stm2
stm2.transformers[1] = st
assert stm1 == stm2
def test_str(self):
"""Test pre.SnapshotTransformerMulti.__str__()."""
names = ["var1", "var2", "var3"]
stm = opinf.pre.SnapshotTransformerMulti(3, center=False,
scaling=None,
variable_names=names)
stm.transformers[0].center = True
stm.transformers[-1].scaling = "standard"
assert str(stm) == \
"Multi-variate snapshot transformer\n" \
"* var1 | Snapshot transformer with mean-snapshot centering " \
"(call fit_transform() to train)\n" \
"* var2 | Snapshot transformer\n" \
"* var3 | Snapshot transformer with 'standard' scaling " \
"(call fit_transform() to train)"
# Persistence -------------------------------------------------------------
def test_save(self):
"""Test pre.SnapshotTransformerMulti.save()."""
# Clean up after old tests.
target = "_savetransformermultitest.h5"
if os.path.isfile(target): # pragma: no cover
os.remove(target)
def _checkfile(filename, stm):
assert os.path.isfile(filename)
with h5py.File(filename, 'r') as hf:
# Check transformation metadata.
assert "meta" in hf
assert len(hf["meta"]) == 0
for attr in ("num_variables", "verbose"):
assert attr in hf["meta"].attrs
assert hf["meta"].attrs[attr] == getattr(stm, attr)
# Check individual transformers.
for i in range(stm.num_variables):
label = f"variable{i+1}"
assert label in hf
group = hf[label]
assert "meta" in group
assert "center" in group["meta"].attrs
assert "scaling" in group["meta"].attrs
st = stm.transformers[i]
assert group["meta"].attrs["center"] == st.center
if st.scaling is None:
assert not group["meta"].attrs["scaling"]
assert group["meta"].attrs["scaling"] is not None
else:
assert group["meta"].attrs["scaling"] == st.scaling
# Check transformation parameters.
if st.center:
assert "transformation/mean_" in group
assert np.all(
group["transformation/mean_"][:] == st.mean_)
if st.scaling:
assert "transformation/scale_" in group
assert group["transformation/scale_"][0] == st.scale_
assert "transformation/shift_" in group
assert group["transformation/shift_"][0] == st.shift_
# Check file creation and overwrite protocol on null transformation.
stm = opinf.pre.SnapshotTransformerMulti(15)
stm.save(target[:-3])
_checkfile(target, stm)
with pytest.raises(FileExistsError) as ex:
stm.save(target, overwrite=False)
assert ex.value.args[0] == f"{target} (use overwrite=True to ignore)"
stm.save(target, overwrite=True)
_checkfile(target, stm)
# Check non-null transformations.
i = 0
scalings = {None, *opinf.pre.SnapshotTransformer._VALID_SCALINGS}
for center, scaling in itertools.product((True, False), scalings):
stm.transformers[i].center = center
stm.transformers[i].scaling = scaling
i += 1
X = np.random.randint(0, 100, (150,17)).astype(float)
stm.fit_transform(X)
stm.save(target, overwrite=True)
_checkfile(target, stm)
os.remove(target)
def test_load(self, n=200, k=50):
"""Test pre.SnapshotTransformerMulti.load()."""
# Clean up after old tests.
target = "_loadtransformermultitest.h5"
if os.path.isfile(target): # pragma: no cover
os.remove(target)
# Try to load a bad file.
with h5py.File(target, 'w'):
pass
with pytest.raises(ValueError) as ex:
opinf.pre.SnapshotTransformerMulti.load(target)
assert ex.value.args[0] == "invalid save format (meta/ not found)"
# Check that save() -> load() gives the same transformer.
stm = opinf.pre.SnapshotTransformerMulti(15)
i = 0
scalings = {None, *opinf.pre.SnapshotTransformer._VALID_SCALINGS}
for center, scaling in itertools.product((True, False), scalings):
stm.transformers[i].center = center
stm.transformers[i].scaling = scaling
i += 1
X = np.random.randint(0, 100, (150,19)).astype(float)
stm.fit_transform(X)
stm.save(target, overwrite=True)
stm2 = opinf.pre.SnapshotTransformerMulti.load(target)
assert stm2 == stm
os.remove(target)
# Main routines -----------------------------------------------------------
def test_check_shape(self):
"""Test pre.SnapshotTransformerMulti._check_shape()."""
stm = opinf.pre.SnapshotTransformerMulti(12)
stm.n_ = 10
X = np.random.randint(0, 100, (120,23)).astype(float)
stm._check_shape(X)
with pytest.raises(ValueError) as ex:
stm._check_shape(X[:-1])
assert ex.value.args[0] == \
"snapshot set must have num_variables * n = 12 * 10 = 120 rows " \
"(got 119)"
def __testcase(self):
centers = [
False, True,
False, False, False, False, False,
True, True, True, True, True,
]
scalings = [
None, None,
"standard", "minmax", "minmaxsym", "maxabs", "maxabssym",
"standard", "minmax", "minmaxsym", "maxabs", "maxabssym"
]
return opinf.pre.SnapshotTransformerMulti(len(centers),
center=centers,
scaling=scalings,
verbose=True)
def test_fit_transform(self):
"""Test pre.SnapshotTransformerMulti.fit_transform()."""
stm = self.__testcase()
# Inplace transformation.
X = np.random.randint(0, 100, (120,29)).astype(float)
Y = stm.fit_transform(X, inplace=True)
assert stm.n_ == 10
assert stm._is_trained()
assert Y is X
# Non-inplace transformation.
X = np.random.randint(0, 100, (120,29)).astype(float)
Y = stm.fit_transform(X, inplace=False)
assert stm.n_ == 10
assert stm._is_trained()
assert Y is not X
assert type(Y) is type(X)
assert Y.shape == X.shape
# Null transformation.
i = 0
s = slice(i*stm.n_, (i+1)*stm.n_)
assert np.allclose(Y[s], X[s])
for attr in ("mean_", "scale_", "shift_"):
assert not hasattr(stm.transformers[i], attr)
# Centering only.
i += 1
s = slice(i*stm.n_, (i+1)*stm.n_)
µ = np.mean(X[s], axis=1)
assert np.allclose(Y[s], X[s] - µ.reshape(-1,1))
assert hasattr(stm.transformers[i], "mean_")
assert np.allclose(stm.transformers[i].mean_, µ)
for attr in ("scale_", "shift_"):
assert not hasattr(stm.transformers[i], attr)
for ctr in [False, True]:
# Standard scaling.
i += 1
s = slice(i*stm.n_, (i+1)*stm.n_)
assert stm.transformers[i].scaling == "standard"
assert np.isclose(np.mean(Y[s]), 0)
assert np.isclose(np.std(Y[s]), 1)
# Minmax scaling (to [0,1]).
i += 1
s = slice(i*stm.n_, (i+1)*stm.n_)
assert stm.transformers[i].scaling == "minmax"
assert np.isclose(np.min(Y[s]), 0)
assert np.isclose(np.max(Y[s]), 1)
# Symmetric Minmax scaling (to [-1,1]).
i += 1
s = slice(i*stm.n_, (i+1)*stm.n_)
assert stm.transformers[i].scaling == "minmaxsym"
assert np.isclose(np.min(Y[s]), -1)
assert np.isclose(np.max(Y[s]), 1)
# Symmetric Minmax scaling (to [-1,1]).
i += 1
s = slice(i*stm.n_, (i+1)*stm.n_)
assert stm.transformers[i].scaling == "maxabs"
assert np.isclose(np.max(np.abs(Y[s])), 1)
# Symmetric Minmax scaling (to [-1,1]).
i += 1
s = slice(i*stm.n_, (i+1)*stm.n_)
assert stm.transformers[i].scaling == "maxabssym"
assert np.isclose(np.mean(Y[s]), 0)
assert np.isclose(np.max(np.abs(Y[s])), 1)
def test_transform(self):
"""Test pre.SnapshotTransformerMulti.transform()."""
stm = self.__testcase()
X = np.random.randint(0, 100, (120,29)).astype(float)
with pytest.raises(AttributeError) as ex:
stm.transform(X, inplace=False)
assert ex.value.args[0] == \
"transformer not trained (call fit_transform())"
# Inplace transformation.
stm.fit_transform(X)
Y = np.random.randint(0, 100, (120,33)).astype(float)
Z = stm.transform(Y, inplace=True)
assert Z is Y
# Non-inplace transformation.
Y = np.random.randint(0, 100, (120,33)).astype(float)
Z = stm.transform(Y, inplace=False)
assert Z is not Y
assert type(Z) is type(Y)
assert Z.shape == Y.shape
def test_inverse_transform(self):
"""Test pre.SnapshotTransformerMulti.transform()."""
stm = self.__testcase()
X = np.random.randint(0, 100, (120,29)).astype(float)
with pytest.raises(AttributeError) as ex:
stm.inverse_transform(X, inplace=False)
assert ex.value.args[0] == \
"transformer not trained (call fit_transform())"
# Inplace transformation.
stm.fit_transform(X)
Y = | np.random.randint(0, 100, (120,32)) | numpy.random.randint |
import numpy as np
import matplotlib.pyplot as plt
### Command Sequence for Main Odometry Scenario ###
main_sequence_commands = np.array([[0.5, 0, 0], [1.0, 0, 0], [1, 0, 0.785], [1, 0, 1.57], [0, 1, -0.785], [1, 0, 0], [1, 0 , -0.785], [0, -3, 1.57],
[0.5, 0, 0], [1.0, 0, 0], [1, 0, 0.785], [1, 0, 1.57], [0, 1, -0.785], [1, 0, 0], [1, 0 , -0.785], [0, 3, 0]])
### Probabilistic Odometry Pose Estimation Function ###
def probabilstic_odom_estimation(commands, translation_variance_scale=0.1, rotation_variance_scale=0.1):
#######################
### Initial Setting ###
#######################
pose = [0, 0, 0] # x [m], y [m], orientation [radian]
groundtruth_poses = []
groundtruth_poses.append(pose)
odom_est_poses = []
odom_est_poses.append(pose)
print('Initial pose : {}'.format(pose))
print('-----------------------------------------------------')
plt.plot(pose[0], pose[1], 'o')
plt.arrow(pose[0], pose[1], 0.1 * np.cos(pose[2]), 0.1 * np.sin(pose[2]), width=0.03)
##########################################
### Iterative Odometry Pose Estimation ###
##########################################
sample_odom_est = np.ndarray(shape=(2, 1),buffer=np.array([pose])) # Initial setup for odometry estimation samples
# Iterate over the sequence of commands from main odometry scenario
for iteration, (command) in enumerate(zip(commands)):
print('Iteration : {}'.format(iteration))
dx = command[0][0] # Command for X-axis translation
dy = command[0][1] # Command for Y-axis translation
dtheta = command[0][2] # Command for rotation change
print('[Control] dx : {}, dy : {}, dtheta : {}'.format(dx, dy, dtheta))
translation = np.sqrt(dx**2 + dy**2) # Calculate the lenght of translation
rotation_1 = np.arctan2(dy, dx) - pose[2] # Calculate the rotation angle for aligning the heading
rotation_2 = dtheta - rotation_1 # Calculate the rotation angle for aligning final heading
print('Translation : {}'.format(translation))
print('rotation_1 : {}'.format(rotation_1))
print('rotation_2 : {}'.format(rotation_2))
# Rotation with Gaussian Noise
rotation_est = np.random.normal(loc=rotation_1, scale=rotation_variance_scale * abs(rotation_1), size=100)
####################################################################################
# Odometry Pose Estimation Accumulation under Gaussian Noise
idx_odom = 0
for sample_odom in zip(sample_odom_est.T): # Iterate over all odometry pose estimation with gaussian noise
for rotation in zip(rotation_est): # Iterate over all rotation estimation with gaussian noise
# For each noisy pose estimation, apply translation and sample Gaussian distriubtion.
# Covariance of the distribution depends on the value of translation.
# This is because longer the robot travels, its odometry will have more likelihood to receive noise.
mean = [translation * np.cos(pose[2] + rotation[0]) + sample_odom[0][0], translation * | np.sin(pose[2] + rotation[0]) | numpy.sin |
"""
Linear dynamical system model for the AP text dataset.
Each document is modeled as a draw from an LDS with
categorical observations.
"""
import os
import gzip
import time
import pickle
import collections
import numpy as np
from scipy.misc import logsumexp
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
from hips.plotting.layout import create_figure
import brewer2mpl
from pgmult.lds import MultinomialLDS
from pgmult.particle_lds import LogisticNormalMultinomialLDS, ParticleSBMultinomialLDS
from pgmult.hmm import MultinomialHMM
from pgmult.utils import pi_to_psi
from pylds.models import NonstationaryLDS
from pybasicbayes.distributions import GaussianFixed, Multinomial, Regression
from pybasicbayes.util.text import progprint_xrange
from autoregressive.distributions import AutoRegression
colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
goodcolors = np.array([0,1,4,6,7,8,])
colors = np.array(colors)[goodcolors]
np.seterr(invalid="warn")
np.random.seed(0)
np.seterr(invalid="warn")
np.random.seed(0)
# Model parameters
K = 1000 # Number of words
# Data handling
def load(filename=os.path.join("data", "alice", "alice.txt")):
with open(filename,'r') as infile:
bigstr = infile.read()
docs = [bigstr.replace('\n', ' ').translate(None,"\n\'\":?,!.;")]
vectorizer = CountVectorizer(stop_words='english',max_features=K).fit(docs)
docs = [make_onehot_seq(doc, vectorizer) for doc in docs]
# words = vectorizer.get_feature_names()
words = list(vectorizer.vocabulary_.keys())
# Sort by usage
usage = np.array([doc.sum(0) for doc in docs]).sum(0)
perm = np.argsort(usage)[::-1]
docs = [doc[:,perm] for doc in docs]
words = np.array(words)[perm]
return docs, words
def filter_wordseq(doc, vectorizer):
return [w for w in doc if w in vectorizer.vocabulary_]
def make_onehot_seq(doc, vectorizer):
lst = filter_wordseq(vectorizer.build_analyzer()(doc), vectorizer)
indices = {word:idx for idx, word in enumerate(vectorizer.vocabulary_.keys())}
out = np.zeros((len(lst),len(indices)))
for wordidx, word in enumerate(lst):
out[wordidx, indices[word]] = 1
return out
# Inference stuff
# model, lls, test_lls, pred_lls, pis, psis, zs, timestamps
Results = collections.namedtuple("Results", ["lls", "test_lls", "pred_lls", "samples", "timestamps"])
def fit_lds_model(Xs, Xtest, D, N_samples=100):
Nx = len(Xs)
assert len(Xtest) == Nx
mus = [X.sum(0) + 0.1 for X in Xs]
mus = [mu/mu.sum() for mu in mus]
# mus = [np.ones(K)/float(K) for _ in Xs]
models = [MultinomialLDS(K, D,
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=1*np.eye(D),M_0=np.zeros((D,D)),K_0=1*np.eye(D)),
sigma_C=1., mu_pi=mus[i]) for i in range(Nx)]
for X, model in zip(Xs, models):
model.add_data(X)
[model.resample_parameters() for model in models]
def compute_pred_ll():
pred_ll = 0
for Xt, model in zip(Xtest, models):
pred_ll += model.predictive_log_likelihood(Xt, M=1)[0]
return pred_ll
init_results = (0, models, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
list(map(np.array, list(zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))))
timestamps = | np.cumsum(times) | numpy.cumsum |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 8 14:36:04 2021
@author: sgboakes
"""
import numpy as np
import matplotlib.pyplot as plt
from pysatellite import Transformations, Functions, Filters
import pysatellite.config as cfg
import pandas as pd
if __name__ == "__main__":
plt.close('all')
# ~~~~ Variables
sin = np.sin
cos = np.cos
pi = np.float64(np.pi)
sensLat = np.float64(28.300697)
sensLon = np.float64(-16.509675)
sensAlt = np.float64(2390)
sensLLA = np.array([[sensLat * pi / 180], [sensLon * pi / 180], [sensAlt]], dtype='float64')
# sensLLA = np.array([[pi/2], [0], [1000]], dtype='float64')
sensECEF = Transformations.LLAtoECEF(sensLLA)
sensECEF.shape = (3, 1)
simLength = cfg.simLength
stepLength = cfg.stepLength
mu = cfg.mu
trans_earth = False
# ~~~~ Satellite Conversion
# Define sat pos in ECI and convert to AER
# radArr: radii for each sat metres
# omegaArr: orbital rate for each sat rad/s
# thetaArr: inclination angle for each sat rad
# kArr: normal vector for each sat metres
radArr = np.array([7e6, 8e6, 6.8e6, 7.5e6], dtype='float64')
omegaArr = 1 / np.sqrt(radArr ** 3 / mu)
thetaArr = np.array([[0], [2 * pi / 3], [3 * pi / 2], [3 * pi / 4]], dtype='float64')
kArr = np.array([[0, 0, 0],
[0, 0, 1],
[1 / np.sqrt(2), 1 / np.sqrt(2), 0],
[1 / np.sqrt(3), 1 / np.sqrt(3), 1 / np.sqrt(3)]],
dtype='float64')
num_sats = len(radArr)
# Make data structures
satECI = {chr(i + 97): np.zeros((3, simLength)) for i in range(num_sats)}
satAER = {chr(i + 97): np.zeros((3, simLength)) for i in range(num_sats)}
for i in range(num_sats):
c = chr(i + 97)
for j in range(simLength):
v = np.array([[radArr[i] * sin(omegaArr[i] * (j + 1) * stepLength)],
[0],
[radArr[i] * cos(omegaArr[i] * (j + 1) * stepLength)]], dtype='float64')
satECI[c][:, j] = (v @ cos(thetaArr[i])) + (np.cross(kArr[i, :].T, v.T) * sin(thetaArr[i])) + (
kArr[i, :].T * np.dot(kArr[i, :].T, v) * (1 - cos(thetaArr[i])))
satAER[c][:, j:j + 1] = Transformations.ECItoAER(satECI[c][:, j], stepLength, j + 1, sensECEF, sensLLA[0],
sensLLA[1])
if not trans_earth:
if satAER[c][1, j] < 0:
satAER[c][:, j:j + 1] = np.array([[np.nan], [np.nan], [np.nan]])
if np.isnan(satAER[c]).all():
print('Satellite {s} is not observable'.format(s=c))
# Add small deviations for measurements
# Using calculated max measurement deviations for LT:
# Based on 0.15"/pixel, sat size = 2m, max range = 1.38e7
# sigma = 1/2 * 0.15" for it to be definitely on that pixel
# Add angle devs to Az/Elev, and range devs to Range
angMeasDev, rangeMeasDev = 1e-6, 20
satAERMes = {chr(i + 97): np.zeros((3, simLength)) for i in range(num_sats)}
for i in range(num_sats):
c = chr(i + 97)
satAERMes[c][0, :] = satAER[c][0, :] + (angMeasDev * np.random.randn(1, simLength))
satAERMes[c][1, :] = satAER[c][1, :] + (angMeasDev * np.random.randn(1, simLength))
satAERMes[c][2, :] = satAER[c][2, :] + (rangeMeasDev * np.random.randn(1, simLength))
satECIMes = {chr(i + 97): np.zeros((3, simLength)) for i in range(num_sats)}
for i in range(num_sats):
c = chr(i + 97)
for j in range(simLength):
satECIMes[c][:, j:j + 1] = Transformations.AERtoECI(satAERMes[c][:, j], stepLength, j+1, sensECEF, sensLLA[0],
sensLLA[1])
# ~~~~ Temp ECI measurements from MATLAB
# satECIMes['a'] = pd.read_csv('ECI_mes.txt', delimiter=' ').to_numpy(dtype='float64')
# #satECIMes.to_numpy(dtype='float64')
# satECIMes['a'] = satECIMes['a'].T
# np.reshape(satECIMes['a'], (3, simLength))
satState = {chr(i + 97): np.zeros((6, 1)) for i in range(num_sats)}
for i in range(num_sats):
c = chr(i + 97)
for j in range(simLength):
if np.all(np.isnan(satECIMes[c][:, j])):
continue
else:
satState[c][0:3] = np.reshape(satECIMes[c][:, j], (3, 1))
break
# Process noise
stdAng = np.float64(1e-5)
coefA = np.float64(0.25 * stepLength ** 4.0 * stdAng ** 2.0)
coefB = np.float64(stepLength ** 2.0 * stdAng ** 2.0)
coefC = np.float64(0.5 * stepLength ** 3.0 * stdAng ** 2.0)
procNoise = np.array([[coefA, 0, 0, coefC, 0, 0],
[0, coefA, 0, 0, coefC, 0],
[0, 0, coefA, 0, 0, coefC],
[coefC, 0, 0, coefB, 0, 0],
[0, coefC, 0, 0, coefB, 0],
[0, 0, coefC, 0, 0, coefB]],
dtype='float64')
covState = {chr(i + 97): np.zeros((6, 6)) for i in range(num_sats)}
for i in range(num_sats):
c = chr(i + 97)
covState[c] = np.float64(1e10) * np.identity(6)
covAER = np.array([[(angMeasDev * 180 / pi) ** 2, 0, 0],
[0, (angMeasDev * 180 / pi) ** 2, 0],
[0, 0, rangeMeasDev ** 2]],
dtype='float64'
)
measureMatrix = np.append( | np.identity(3) | numpy.identity |
"""
Recent upgrade of keras versions in TF 2.5+, keras has been moved to tf.keras
This has resulted in certain exceptions when keras models are attacked in parallel
This script fixes this behavior by adding an official hotfix for this situation detailed here:
https://github.com/tensorflow/tensorflow/issues/34697
All models/dataset are similar to keras attack tutorial at :
https://textattack.readthedocs.io/en/latest/2notebook/Example_3_Keras.html#
NOTE: This fix might be deprecated in future TF releases
NOTE: This script is not designed to run in a Jupyter notebook due to conflicting namespace issues
We recommend running it as a script only
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.utils import to_categorical
from tensorflow.python.keras.layers import deserialize, serialize
from tensorflow.python.keras.saving import saving_utils
import torch
from textattack import AttackArgs, Attacker
from textattack.attack_recipes import PWWSRen2019
from textattack.datasets import HuggingFaceDataset
from textattack.models.wrappers import ModelWrapper
NUM_WORDS = 1000
def unpack(model, training_config, weights):
restored_model = deserialize(model)
if training_config is not None:
restored_model.compile(
**saving_utils.compile_args_from_training_config(training_config)
)
restored_model.set_weights(weights)
return restored_model
# Hotfix function
def make_keras_picklable():
def __reduce__(self):
model_metadata = saving_utils.model_metadata(self)
training_config = model_metadata.get("training_config", None)
model = serialize(self)
weights = self.get_weights()
return (unpack, (model, training_config, weights))
cls = Model
cls.__reduce__ = __reduce__
# Run the function
make_keras_picklable()
def transform(x):
x_transform = []
for i, word_indices in enumerate(x):
BoW_array = np.zeros((NUM_WORDS,))
for index in word_indices:
if index < len(BoW_array):
BoW_array[index] += 1
x_transform.append(BoW_array)
return np.array(x_transform)
class CustomKerasModelWrapper(ModelWrapper):
def __init__(self, model):
self.model = model
def __call__(self, text_input_list):
x_transform = []
for i, review in enumerate(text_input_list):
tokens = [x.strip(",") for x in review.split()]
BoW_array = | np.zeros((NUM_WORDS,)) | numpy.zeros |
import pandas as pd
import numpy as np
from pylab import rcParams
import glob
from natsort import natsorted
import re
from numpy import linalg as LA
import matplotlib.pyplot as plt
import datetime
import os
import matplotlib.gridspec as gridspec
import seaborn as sns
def dir_check(now_time):
if not os.path.exists('ticc/data/{}'.format(now_time)):
os.mkdir('ticc/data/{}/'.format(now_time))
if not os.path.exists('image/{}/'.format(now_time)):
os.mkdir('image/{}/'.format(now_time))
def convert_rad(df):
df1 = df[(df['human'] ==1) & (df['point'] == 2)]
df2 = df[(df['human'] ==1) & (df['point'] == 3)]
df3 = df[(df['human'] ==1) & (df['point'] == 4)]
df1_x = df1['x'];df1_y = df1['y']
df2_x = df2['x'];df2_y = df2['y']
df3_x = df3['x'];df3_y = df3['y']
p1_x = df1_x.to_numpy();p1_y = df1_y.to_numpy()
p2_x = df2_x.to_numpy();p2_y = df2_y.to_numpy()
p3_x = df3_x.to_numpy();p3_y = df3_y.to_numpy()
rad_list = [];frame_count = []
for j in range(len(p3_x)):
u = np.array([p1_x[j] - p2_x[j], p1_y[j] - p2_y[j]])
v = np.array([p3_x[j] - p2_x[j], p3_y[j] - p2_y[j]])
i = np.inner(u, v)
n = LA.norm(u) * LA.norm(v)
if n == 0:
a = 0
else:
c = i / n
a = np.rad2deg(np.arccos(np.clip(c, -1.0, 1.0)))
rad_list.append(a)
frame_count.append(j)
return frame_count,rad_list
def normalization(p):
min_p = p.min()
max_p = p.max()
nor = (p - min_p) / (max_p - min_p)
return nor
def rad_convert_nor(rad_list):
rad = np.array(rad_list)
# count = np.array(frame_count)
nor_list = normalization(rad)
# con_list = np.stack([count, nor_list],1)
return nor_list
def save_dataframe(rad_list,con_list):
df = pd.DataFrame({'frame':con_list[:,0],'rad':con_list[:,1],'nor_rad':rad_list[:,0]})
print(df)
return df
d = lambda a,b: (a - b)**2
first = lambda x: x[0]
second = lambda x: x[1]
def minVal(v1, v2, v3):
if first(v1) <= min(first(v2), first(v3)):
return v1, 0
elif first(v2) <= first(v3):
return v2, 1
else:
return v3, 2
def calc_dtw(A,B):
S = len(A)
T = len(B)
m = [[0 for j in range(T)] for i in range(S)]
m[0][0] = (d(A[0],B[0]), (-1,-1))
for i in range(1,S):
m[i][0] = (m[i-1][0][0] + d(A[i], B[0]), (i-1,0))
for j in range(1,T):
m[0][j] = (m[0][j-1][0] + d(A[0], B[j]), (0,j-1))
for i in range(1,S):
for j in range(1,T):
minimum, index = minVal(m[i-1][j], m[i][j-1], m[i-1][j-1])
indexes = [(i-1,j), (i,j-1), (i-1,j-1)]
m[i][j] = (first(minimum)+d(A[i], B[j]), indexes[index])
return m
def backward(m):
path = []
path.append([len(m)-1, len(m[0])-1])
while True:
path.append(m[path[-1][0]][path[-1][1]][1])
if path[-1]==(0,0):
break
path = | np.array(path) | numpy.array |
#!/usr/bin/env python
from mpi4py import MPI
import sys
sys.path.append( '../stochastic')
from st_utils.coords import *
import vtk
import numpy as np
class Args(object):
pass
def transform_back(pt,pd):
#The reconstructed surface is transformed back to where the
#original points are. (Hopefully) it is only a similarity
#transformation.
#1. Get bounding box of pt, get its minimum corner (left, bottom, least-z), at c0, pt_bounds
#2. Get bounding box of surface pd, get its minimum corner (left, bottom, least-z), at c1, pd_bounds
#3. compute scale as:
# scale = (pt_bounds[1] - pt_bounds[0])/(pd_bounds[1] - pd_bounds[0]);
#4. transform the surface by T := T(pt_bounds[0], [2], [4]).S(scale).T(-pd_bounds[0], -[2], -[4])
pt_bounds=pt.GetBounds()
pd_bounds=pd.GetBounds()
scale = (pt_bounds[1] - pt_bounds[0])/(pd_bounds[1] - pd_bounds[0]);
transp = vtk.vtkTransform()
transp.Translate(pt_bounds[0], pt_bounds[2], pt_bounds[4]);
transp.Scale(scale, scale, scale);
transp.Translate(- pd_bounds[0], - pd_bounds[2], - pd_bounds[4]);
tpd = vtk.vtkTransformPolyDataFilter();
tpd.SetInput(pd);
tpd.SetTransform(transp);
tpd.Update();
return tpd.GetOutput();
class rgbPainter:
def __init__(self):
self.values=[]
def setValue(self,val):
self.values.append(float(val))
def getMinValue(self):
a= | np.array(self.values) | numpy.array |
from time import sleep
import numpy as np
from scipy.fft import fft
from scipy.integrate import simps
NUM_SAMPLES = 1024
SAMPLING_RATE = 44100.
MAX_FREQ = SAMPLING_RATE / 2
FREQ_SAMPLES = NUM_SAMPLES / 8
TIMESLICE = 100 # ms
NUM_BINS = 16
data = {'values': None}
try:
import pyaudio
def update_audio_data():
pa = pyaudio.PyAudio()
stream = pa.open(
format=pyaudio.paInt16,
channels=1,
rate=int(SAMPLING_RATE),
input=True,
frames_per_buffer=NUM_SAMPLES
)
while True:
try:
raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16)
signal = raw_data / 32768.0
spectrum = fft(signal)
spectrum = abs(spectrum)[:int(NUM_SAMPLES/2)]
power = spectrum**2
bins = simps(np.split(power, NUM_BINS))
data['values'] = signal, spectrum, bins
except:
continue
except ImportError:
print()
print(" *** Pyaudio package not installed, using synthesized audio data ***")
print()
def fm_modulation(x, f_carrier = 220, f_mod =220, Ind_mod = 1):
y = np.sin(2*np.pi*f_carrier*x + Ind_mod*np.sin(2*np.pi*f_mod*x))
return y
# These are basically picked out of a hat to show something vaguely interesting
_t = np.arange(0, NUM_SAMPLES/SAMPLING_RATE, 1.0/SAMPLING_RATE)
_f_carrier = 2000
_f_mod = 1000
_ind_mod = 1
def update_audio_data():
while True:
# Generate FM signal with drifting carrier and mod frequencies
global _f_carrier, _f_mod, _ind_mod
_f_carrier = max([_f_carrier+np.random.randn()*50, 0])
_f_mod = max([_f_mod+np.random.randn()*20, 0])
_ind_mod = max([_ind_mod+ | np.random.randn() | numpy.random.randn |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016 <NAME>, <NAME>, and <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import numpy as np
import time
import warnings
import sys
from sklearn.base import BaseEstimator
from joblib import Parallel, delayed
class ReliefF(BaseEstimator):
"""Feature selection using data-mined expert knowledge.
Based on the ReliefF algorithm as introduced in:
Kononenko, Igor et al. Overcoming the myopia of inductive learning
algorithms with RELIEFF (1997), Applied Intelligence, 7(1), p39-55
"""
def __init__(self, n_features_to_select=10, n_neighbors=100, discrete_threshold=10, verbose=False, n_jobs=1):
"""Sets up ReliefF to perform feature selection.
Parameters
----------
n_features_to_select: int (default: 10)
the number of top features (according to the relieff score) to
retain after feature selection is applied.
n_neighbors: int (default: 100)
The number of neighbors to consider when assigning feature
importance scores. More neighbors results in more accurate scores,
but takes longer.
discrete_threshold: int (default: 10)
Value used to determine if a feature is discrete or continuous.
If the number of unique levels in a feature is > discrete_threshold, then it is
considered continuous, or discrete otherwise.
verbose: bool (default: False)
If True, output timing of distance array and scoring
n_jobs: int (default: 1)
The number of cores to dedicate to computing the scores with joblib.
Assigning this parameter to -1 will dedicate as many cores as are available on your system.
We recommend setting this parameter to -1 to speed up the algorithm as much as possible.
"""
self.n_features_to_select = n_features_to_select
self.n_neighbors = n_neighbors
self.discrete_threshold = discrete_threshold
self.verbose = verbose
self.n_jobs = n_jobs
#=========================================================================#
def fit(self, X, y):
"""Computes the feature importance scores from the training data.
Parameters
----------
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
y: array-like {n_samples}
Training labels
Returns
-------
Copy of the ReliefF instance
"""
self._X = X
self._y = y
# Disallow parallelization in Python 2
if self.n_jobs != 1 and sys.version_info[0] < 3:
warnings.warn('Parallelization is currently not supported in Python 2. Settings n_jobs to 1.', RuntimeWarning)
self.n_jobs = 1
# Set up the properties for ReliefF
self._datalen = len(self._X)
self._label_list = list(set(self._y))
discrete_label = (len(self._label_list) <= self.discrete_threshold)
if discrete_label:
self._class_type = 'discrete'
else:
self._class_type = 'continuous'
# Training labels standard deviation -- only used if the training labels are continuous
self._labels_std = 0.
if len(self._label_list) > self.discrete_threshold:
self._labels_std = np.std(self._y, ddof=1)
self._num_attributes = len(self._X[0])
self._missing_data_count = np.isnan(self._X).sum()
# Assign internal headers for the features
xlen = len(self._X[0])
mxlen = len(str(xlen + 1))
self._headers = ['X{}'.format(str(i).zfill(mxlen)) for i in range(1, xlen + 1)]
# Determine the data type
C = D = False
attr = self._get_attribute_info()
for key in attr.keys():
if attr[key][0] == 'discrete':
D = True
if attr[key][0] == 'continuous':
C = True
if C and D:
self.data_type = 'mixed'
print('mix')
elif D and not C:
self.data_type = 'discrete'
elif C and not D:
self.data_type = 'continuous'
print('cont')
else:
raise ValueError('Invalid data type in data set.')
# Compute the distance array between all data points
start = time.time()
attr = self._get_attribute_info()
diffs, cidx, didx = self._dtype_array(attr)
print (didx)
cdiffs = diffs[cidx]
xc = self._X[:,cidx]
xd = self._X[:,didx]
if self._missing_data_count > 0:
self._distance_array = self._distarray_missing(xc, xd, cdiffs)
else:
self._distance_array = self._distarray_no_missing(xc, xd)
if self.verbose:
elapsed = time.time() - start
print('Created distance array in {} seconds.'.format(elapsed))
print('Feature scoring under way ...')
start = time.time()
self.feature_importances_ = self._run_algorithm()
if self.verbose:
elapsed = time.time() - start
print('Completed scoring in {} seconds.'.format(elapsed))
# Compute indices of top features
self.top_features_ = np.argsort(self.feature_importances_)[::-1]
# Delete the internal distance array because it is no longer needed
del self._distance_array
return self
#=========================================================================#
def transform(self, X):
"""Reduces the feature set down to the top `n_features_to_select` features.
Parameters
----------
X: array-like {n_samples, n_features}
Feature matrix to perform feature selection on
Returns
-------
X_reduced: array-like {n_samples, n_features_to_select}
Reduced feature matrix
"""
return X[:, self.top_features_[:self.n_features_to_select]]
#=========================================================================#
def fit_transform(self, X, y):
"""Computes the feature importance scores from the training data, then reduces the feature set down to the top `n_features_to_select` features.
Parameters
----------
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
y: array-like {n_samples}
Training labels
Returns
-------
X_reduced: array-like {n_samples, n_features_to_select}
Reduced feature matrix
"""
self.fit(X, y)
return self.transform(X)
######################### SUPPORTING FUNCTIONS ###########################
def _get_attribute_info(self):
attr = dict()
d = 0
limit = self.discrete_threshold
w = self._X.transpose()
for idx in range(len(w)):
h = self._headers[idx]
z = w[idx]
if self._missing_data_count > 0:
z = z[np.logical_not(np.isnan(z))]
zlen = len(np.unique(z))
if zlen <= limit:
attr[h] = ('discrete', 0, 0, 0)
d += 1
else:
mx = np.max(z)
mn = np.min(z)
attr[h] = ('continuous', mx, mn, mx - mn)
return attr
#==================================================================#
def _distarray_no_missing(self, xc, xd):
"""Distance array for data with no missing values"""
from scipy.spatial.distance import pdist, squareform
attr = self._get_attribute_info()
#------------------------------------------#
def pre_normalize(x):
"""Normalizes continuous features so they are in the same range"""
idx = 0
print(x.shape)
x = x.transpose()
for i in range(len(x)):
cmin = np.min(x[i])
diff = np.max(x[i])-cmin
x[i] -= cmin
x[i] /= diff
idx += 1
return x.transpose()
#------------------------------------------#
if self.data_type == 'discrete':
return squareform(pdist(self._X, metric='hamming'))
elif self.data_type == 'mixed':
d_dist = squareform(pdist(xd, metric='hamming'))
c_dist = squareform(pdist(pre_normalize(xc), metric='cityblock'))
return np.add(d_dist, c_dist) / self._num_attributes
else:
self._X = pre_normalize(self._X)
return squareform(pdist(self._X, metric='cityblock'))
#==================================================================#
def _dtype_array(self, attr):
"""Return mask for discrete(0)/continuous(1) attributes and their indices. Return array of max/min diffs of attributes."""
attrtype = []
attrdiff = []
for key in self._headers:
if attr[key][0] == 'continuous':
attrtype.append(1)
else:
attrtype.append(0)
attrdiff.append(attr[key][3])
attrtype = np.array(attrtype)
cidx = np.where(attrtype == 1)[0]
didx = np.where(attrtype == 0)[0]
attrdiff = np.array(attrdiff)
return attrdiff, cidx, didx
#==================================================================#
def _distarray_missing(self, xc, xd, cdiffs):
"""Distance array for data with missing values"""
cindices = []
dindices = []
for i in range(self._datalen):
cindices.append(np.where(np.isnan(xc[i]))[0])
dindices.append(np.where(np.isnan(xd[i]))[0])
dist_array = Parallel(n_jobs=self.n_jobs)(delayed(self._get_row_missing)(xc, xd, cdiffs, index, cindices, dindices) for index in range(self._datalen))
return np.array(dist_array)
#==================================================================#
def _get_row_missing(self, xc, xd, cdiffs, index, cindices, dindices):
row = np.empty(0, dtype=np.double)
cinst1 = xc[index]
dinst1 = xd[index]
can = cindices[index]
dan = dindices[index]
for j in range(index):
dist = 0
dinst2 = xd[j]
cinst2 = xc[j]
# continuous
cbn = cindices[j]
idx = np.unique(np.append(can, cbn)) # create unique list
c1 = np.delete(cinst1, idx) # remove elements by idx
c2 = np.delete(cinst2, idx)
cdf = np.delete(cdiffs, idx)
# discrete
dbn = dindices[j]
idx = np.unique(np.append(dan, dbn))
d1 = np.delete(dinst1, idx)
d2 = np.delete(dinst2, idx)
# discrete first
dist += len(d1[d1 != d2])
# now continuous
dist += np.sum(np.absolute(np.subtract(c1, c2)) / cdf)
row = np.append(row, dist)
return row
############################# ReliefF ############################################
def _find_neighbors(self, inst):
dist_vect = []
for j in range(self._datalen):
if inst != j:
locator = [inst, j]
if inst < j:
locator.reverse()
dist_vect.append(self._distance_array[locator[0]][locator[1]])
else:
dist_vect.append(sys.maxsize)
dist_vect = np.array(dist_vect)
nn_list = []
match_count = 0
miss_count = 0
for nn_index in | np.argsort(dist_vect) | numpy.argsort |
import numpy as np
import cv2
from scipy.ndimage import label
from .vistools import norm_atten_map
import torch.nn.functional as F
def get_topk_boxes(logits, cam_map, im_file, input_size, crop_size, topk=(1, ), threshold=0.2, mode='union', gt=None):
maxk = max(topk)
maxk_cls = | np.argsort(logits) | numpy.argsort |
import tensorflow as tf
from keras.layers import Dense, Flatten, Lambda, Activation, MaxPooling2D
from keras.layers.convolutional import Convolution2D
from keras.models import Sequential
from keras.optimizers import Adam
import os, sys
import errno
import json
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.misc
from scipy.ndimage import rotate
from scipy.stats import bernoulli
# Some useful constants
DRIVING_LOG_FILE = './data/driving_log.csv'
IMG_PATH = './data/'
STEERING_COEFFICIENT = 0.229
#number_of_epochs = 8
#number_of_samples_per_epoch = 20032
#number_of_validation_samples = 6400
number_of_epochs = 1
number_of_samples_per_epoch = 200
number_of_validation_samples = 64
learning_rate = 1e-4
activation_relu = 'relu'
#tf.python.control_flow_ops = tf
def crop(image, top_percent, bottom_percent):
assert 0 <= top_percent < 0.5, 'top_percent should be between 0.0 and 0.5'
assert 0 <= bottom_percent < 0.5, 'top_percent should be between 0.0 and 0.5'
top = int(np.ceil(image.shape[0] * top_percent))
bottom = image.shape[0] - int(np.ceil(image.shape[0] * bottom_percent))
return image[top:bottom, :]
def resize(image, new_dim):
return scipy.misc.imresize(image, new_dim)
def random_flip(image, steering_angle, flipping_prob=0.5):
head = bernoulli.rvs(flipping_prob)
if head:
return np.fliplr(image), -1 * steering_angle
else:
return image, steering_angle
def random_gamma(image):
gamma = np.random.uniform(0.4, 1.5)
inv_gamma = 1.0 / gamma
table = np.array([((i / 255.0) ** inv_gamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def random_shear(image, steering_angle, shear_range=200):
rows, cols, ch = image.shape
dx = np.random.randint(-shear_range, shear_range + 1)
random_point = [cols / 2 + dx, rows / 2]
pts1 = np.float32([[0, rows], [cols, rows], [cols / 2, rows / 2]])
pts2 = np.float32([[0, rows], [cols, rows], random_point])
dsteering = dx / (rows / 2) * 360 / (2 * np.pi * 25.0) / 6.0
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, (cols, rows), borderMode=1)
steering_angle += dsteering
return image, steering_angle
def random_rotation(image, steering_angle, rotation_amount=15):
angle = np.random.uniform(-rotation_amount, rotation_amount + 1)
rad = (np.pi / 180.0) * angle
return rotate(image, angle, reshape=False), steering_angle + (-1) * rad
def min_max(data, a=-0.5, b=0.5):
data_max = | np.max(data) | numpy.max |
import OpenEXR
import Imath
import numpy as np
import time
import data.util_exr as exr_utils
import os
def _crop(img, pos, size):
ow, oh = img.shape[0], img.shape[1]
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
# return img.crop((x1, y1, x1 + tw, y1 + th)) #CHANGED
return img[x1:(x1 + tw), y1:(y1 + th), :]
return img
def get_distinct_prefix(dir_path):
names = set()
for f in os.listdir(dir_path):
if os.path.isfile(os.path.join(dir_path, f)):
names.add(f.split(".")[0].rsplit("-",1)[0])
return list(names)
# Divide variance by mean^2 to get relative variance
def CalcRelVar(data, var, calcLog, calcLum=True, calcMean=False):
if calcLum:
denom = np.expand_dims(CalcLuminance(data), axis=2)
elif calcMean:
denom = np.expand_dims(CalcMean(data), axis=2)
else:
denom = data
var = var / ((denom * denom) + 1.0e-5)
if calcLog:
var = LogTransform(var)
return var
# Calculate log transform (with an offset to map zero to zero)
def LogTransform(data):
assert(np.sum(data < 0) == 0)
return np.log(data + 1.0)
# Calculate luminance (3 channels in and 1 channel out)
def CalcLuminance(data):
return (0.2126*data[:,:,0] + 0.7152*data[:,:,1] + 0.0722*data[:,:,2])
# Calculate mean (3 channels in and 1 channel out)
def CalcMean(data):
return (0.3333*data[:,:,0] + 0.3333*data[:,:,1] + 0.3333*data[:,:,2])
# for shading
def loadDisneyEXR_feature_shading(path, FEATURE_LIST):
# time0 = time.time()
prefix = path.split(".")[0]
# color_path = prefix + "_color.exr"
variance_path = prefix + "_variance.exr"
normal_path = prefix + "_normal.exr"
depth_path = prefix + "_depth.exr"
texture_path = prefix + "_texture.exr"
visibility_path = prefix + "_visibility.exr"
diffuse_path = prefix + "_diffuse.exr"
specular_path = prefix + "_specular.exr"
# inFile = exr_utils.open(variance_path)
# variance = inFile.get_all()["default"]
if "normal" in FEATURE_LIST:
try:
inFile = exr_utils.open(normal_path)
normal = inFile.get_all()["default"]
normal = _crop(normal, (1,1), 128)
except Exception:
normal = np.zeros((128,128,3))
if "depth" in FEATURE_LIST:
try:
inFile = exr_utils.open(depth_path)
depth = inFile.get_all()["default"]
depth = _crop(depth, (1,1), 128)
except Exception:
depth = np.zeros((128,128,1))
# if "albedo" in FEATURE_LIST: //always load in albedo
try:
inFile = exr_utils.open(texture_path)
texture = inFile.get_all()["default"]
texture = _crop(texture, (1,1), 128)
except Exception:
texture = | np.zeros((128,128,3)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
PyDDSBB @ GT - DDPSE
@author: JianyuanZhai
"""
import numpy as np
from PyDDSBB._utilis import LHS
import PyDDSBB._problem as _problem
import PyDDSBB._underestimators
import time
from PyDDSBB._node import Node
from PyDDSBB._splitter import Splitter
from PyDDSBB._machine_learning import LocalSVR
import pyomo.environ as pe
UNDERESTIMATORS = {'Quadratic': PyDDSBB._underestimators.DDCU_Nonuniform}
INFINITY = np.inf
class Tree:
def __init__(self):
self.Tree = {}
self.current_level = 0
self.Tree[self.current_level] = {}
self.flb_current = INFINITY
self.yopt_global = INFINITY
self.xopt_global = None
self.min_xrange = INFINITY
def _activate_node(self):
pass
def _add_level(self):
self.current_level += 1
self.Tree[self.current_level] = {}
self.lowerbound_global = self.flb_current
self.flb_current = INFINITY
self._xopt_hist.append(self.xopt_global)
def _add_node(self, node):
if node.yopt_local <= self.yopt_global:
self.yopt_global = node.yopt_local
self.best_node = node.node
self.best_level = node.level
self.xopt_global = node.xopt_local
if node.flb > self.yopt_global:
node.set_decision(0)
else:
if node.yopt_local == INFINITY:
if node.level == 1:
if self.Tree[node.level - 1][node.pn].yopt_local == INFINITY:
node.set_decision(0)
if node.level > 1:
parent = self.Tree[node.level - 1][node.pn]
if parent.yopt_local == INFINITY and self.Tree[parent.level - 1][parent.pn].yopt_local == INFINITY:
node.set_decision(0)
else:
node.set_decision(1)
if node.flb < self.flb_current:
self.flb_current = node.flb
if node.min_xrange < self.min_xrange:
self.min_xrange = node.min_xrange
self.Tree[self.current_level][node.node] = node
class NodeOperation:
"""
Parent class for all node operation
"""
def __init__(self, multifidelity, split_method, variable_selection, underestimator_option, minimum_bd):
"""
Inputs
------
multifidelity: bool
True to turn on multifidelity option
False to turn off multifidelity option
split_method: str
variable_selection: str
underestimator_option: str
minimum_bd: float
"""
self._underestimate = UNDERESTIMATORS[underestimator_option]()._underestimate
self.multifidelity = multifidelity
self.split = Splitter(split_method, variable_selection, minimum_bd).split
self.variable_selection = variable_selection
if multifidelity is not False or self.variable_selection == 'svr_var_select':
self.MF = LocalSVR()
self.time_underestimate = 0.
def _set_adaptive(self, adaptive_number):
"""
Set adaptive sampling rule
Input
-----
adaptive_number: int
"""
self.adaptive_number = adaptive_number
def _adaptive_sample(self):
"""
Use augmented latin hypercube strategy to add more samples
"""
x_corner = np.zeros((2,self.dim))
x_corner[1,:] = 1.0
self._update_sample(x_corner)
if self.adaptive_number - len(self.y) > 0:
Xnew = LHS.augmentLHS(self.X, self.adaptive_number - len(self.y))
self._update_sample(Xnew)
## Check if cornor points the samples already, if not sample them
def _min_max_rescaler(self, Xnew):
"""
Scale Xnew by the original bounds
Input
------
Xnew: ndarry of shape (n_samples, n_variables)
Return
------
xnew: ndarray of shape (n_samples, n_variables)
"""
xnew = Xnew*self.xrange + self.bounds[0, :]
return xnew
def _split_node(self, parent):
"""
Split a node into two child node apply split method
Input
-----
parent: node
Returns
-------
child1, child2: node
"""
child_bound1, child_bound2 = self.split(parent)
child1 = self._create_child(child_bound1, parent)
child2 = self._create_child(child_bound2, parent)
return child1, child2
class BoxConstrained(NodeOperation):
"""
Node operations for box-constrained problems
Derived class from NodeOperation
"""
def __init__(self, multifidelity, split_method, variable_selection, underestimator_option, minimum_bd):
super().__init__(multifidelity, split_method, variable_selection, underestimator_option, minimum_bd)
def _add_problem(self, problem):
"""
Add problem to node operator
Input
-----
problem: DDSBBModel
"""
self.simulator = _problem.BoundConstrainedSimulation(problem)
self.bounds = self.simulator._bounds
self.dim = self.simulator._dim
def _min_max_single_scaler(self):
"""
Scale one sample between 0 and 1 based on the variable bounds and range of y
"""
self.ymin_local = float(self.y)
self.ymax_local = float(self.y)
self.xrange = (self.bounds[1, :] - self.bounds[0, :])
self.X = (self.x - self.bounds[0, :])/self.xrange
if self.valid_ind != []:
self.yopt_local = float(self.y)
self.xopt_local = self.x
else:
self.yopt_local = INFINITY
self.xopt_local = None
self.yrange = self.ymax_local - self.ymin_local
if self.yrange== 0. :
self.Y = 1.
else:
self.Y = (self.y - self.ymin_local)/ self.yrange
def _min_max_scaler(self):
"""
Scale current samples between 0 and 1 based on the variable bounds and range of y
"""
if self.valid_ind != []:
self.yopt_local = min(self.y[self.valid_ind])
min_ind = np.where(self.y == self.yopt_local)
self.xopt_local = self.x[min_ind]
self.ymin_local = min(self.y[self.valid_ind])
self.ymax_local = max(self.y[self.valid_ind])
else:
self.yopt_local = INFINITY
self.xopt_local = None
self.ymin_local = min(self.y)
self.ymax_local = max(self.y)
self.yrange = self.ymax_local - self.ymin_local
self.xrange = self.bounds[1, :] - self.bounds[0, :]
if self.yrange== 0. :
self.Y = np.ones(self.y.shape)
else:
self.Y = (self.y - self.ymin_local)/ self.yrange
self.X = (self.x - self.bounds[0, :])/self.xrange
def _create_child(self, child_bounds, parent):
"""
create a child node
Inputs
------
child_bounds: ndarray of shape (2, n_variables)
bounds of the search space of the child node
lower bound in row 1
upper bound in row 2
parent: node
parent node
Return
------
child: node
child node with added samples, LB and UB informations
"""
self.level = parent.level + 1
ind1 = np.where((parent.x <= child_bounds[1, :]).all(axis=1) == True)
ind2 = np.where((parent.x >= child_bounds[0, :]).all(axis=1) == True)
ind = np.intersect1d(ind1,ind2)
self.x = parent.x[ind, :]
self.y = parent.y[ind]
self.valid_ind = [i for i in range(len(ind)) if self.y[i] != INFINITY]
self.bounds = child_bounds
self._min_max_scaler()
self._adaptive_sample()
flb = self._training_DDCU()
self.node += 1
child = Node(parent.level + 1, self.node, self.bounds, parent.node)
child.add_data(self.x, self.y)
child.set_opt_flb(flb)
child.set_opt_local(self.yopt_local, self.xopt_local)
if self.variable_selection == 'svr_var_selection':
child.add_score(self.MF.rank())
child.add_valid_ind(self.valid_ind)
return child
def _update_sample(self, Xnew):
"""
Update current sample set with new samples Xnew
Input
-----
Xnew: ndarray of shape (n_samples, n_variables)
new samples scaled between 0 and 1
"""
index = [i for i in range(len(Xnew)) if (np.round(abs(self.X - Xnew[i, :]), 3) != 0.).all()]
if index != []:
Xnew = Xnew[index, :]
xnew = self._min_max_rescaler(Xnew)
ynew = self.simulator._simulate(xnew)
self.X = | np.concatenate((self.X, Xnew), axis=0) | numpy.concatenate |
import os
import numpy as np
from numpy.core.fromnumeric import ptp
import raisimpy as raisim
import time
import sys
import datetime
import matplotlib
import matplotlib.pyplot as plt
from xbox360controller import Xbox360Controller
xbox = Xbox360Controller(0, axis_threshold=0.02)
# v_ref = xbox.trigger_r.value * (-4) - 3
# v_ref = xbox.trigger_r.value * (-7) - 5
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + "/utils")
print(os.path.abspath(os.path.dirname(__file__))) # get current file path
from ParamsCalculate import ControlParamCal
import visualization
import FileSave
raisim.World.setLicenseFile(os.path.dirname(os.path.abspath(__file__)) + "/activation.raisim")
ball1_urdf_file = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/urdf/ball.urdf"
# ball_file = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/urdf/meshes/ball/ball.obj"
# ball1_urdf_file = "/home/stylite-y/Documents/Raisim/raisim_workspace/raisimLib/rsc/anymal/urdf/anymal.urdf"
print(ball1_urdf_file)
world = raisim.World()
ground = world.addGround(0)
t_step = 0.0001
world.setTimeStep(t_step)
gravity = world.getGravity()
# print(1)
ball1 = world.addArticulatedSystem(ball1_urdf_file)
print(ball1.getDOF())
ball1.setName("ball1")
gravity = world.getGravity()
print(gravity)
print(ball1.getGeneralizedCoordinateDim())
jointNominalConfig = np.array([0.0, 0.0, 0.15, 1.0, 0.0, 0.0, 0.0])
jointVelocityTarget = | np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) | numpy.array |
"""Tools for Loop-detection analysis."""
from multiprocessing import Pool
from typing import Tuple, Sequence, Iterator
from dataclasses import dataclass
import numpy as np
import pandas as pd
from scipy import ndimage, stats, sparse
from sklearn.cluster import DBSCAN
from statsmodels.stats import multitest
from .utils.utils import CPU_CORE, suppress_warning
from .utils.numtools import mask_array, index_array, Toeplitz
from .chrommatrix import ChromMatrix, Array
HKernels = Tuple[Sequence[np.ndarray], Tuple[int, int]]
@dataclass
class HiccupsPeaksFinder(object):
chrom_ma: ChromMatrix
inner_radius: int = 2
outer_radius: int = 5
band_width: int = 600
fdrs: Tuple[float, float, float, float] = (0.1, 0.1, 0.1, 0.1)
sigs: Tuple[float, float, float, float] = (0.1, 0.1, 0.1, 0.1)
fold_changes: Tuple[float, float, float, float] = (1.5, 1.5, 1.5, 1.5)
num_cpus: int = max(1, CPU_CORE - 2)
def __post_init__(self):
self.kernels: HKernels = self.fetch_kernels(self.inner_radius, self.outer_radius)
def __call__(self) -> pd.DataFrame:
observed = sparse.csr_matrix(self.chrom_ma.ob(sparse=True))
decay = self.chrom_ma.decay()
weights = self.chrom_ma.weights
# fetch chunk slices
chunks: Iterator[Tuple[slice, slice]] = self.get_chunk_slices(
length=self.chrom_ma.shape[0],
band_width=self.band_width,
height=self.band_width,
ov_length=2 * self.outer_radius
)
# fetching backgrounds model for nonzero pixles for each chunk for 4 kernels
with Pool(processes=self.num_cpus) as pool:
params = (
(observed[s1, s2], (decay[s1], decay[s2]), (1 / weights[s1], 1 / weights[s2]),
self.kernels, self.band_width)
for s1, s2 in chunks
)
backgounds = pool.starmap(self.calculate_chunk, params)
# indices are 0-based, plus onto the start index in the original matrix
for (indices, *_), chunk in zip(backgounds, chunks):
x_st, y_st = chunk[0].start, chunk[1].start
indices += np.array([[x_st], [y_st]])
# 1. gathering backgrounds info of all nonzero pixels
indices = np.concatenate([b[0] for b in backgounds], axis=1)
contacts_array = np.concatenate([b[1] for b in backgounds])
lambda_array = np.concatenate([b[2] for b in backgounds], axis=1)
enrich_ratio = np.concatenate([b[3] for b in backgounds])
# print(f'Before multiple test: {indices[0].size}')
# 2. Multiple test. Filtering insignificant point after calculating padj using fdr_bh multiple test method.
pvals, padjs, rejects = self.multiple_test(contacts_array, lambda_array, fdrs=self.fdrs, sigs=self.sigs)
peaks = (indices, contacts_array, lambda_array, enrich_ratio, pvals, padjs)
peaks = tuple(mask_array(np.all(rejects, axis=0), *peaks))
# print(f'After multiple test: {peaks[0][0].size}')
# 3. Apply greedy clustering to merge points into confidant peaks.
peak_indexs, shapes = self.cluster(peaks[0], peaks[1], peaks[2])
peaks = (*tuple(index_array(peak_indexs, *peaks)), shapes)
# print(f'After cluster: {peaks[0][0].size}')
# 4. Filter by gap_region, fold changes(enrichment) and singlet peak's sum-qvalue.
valid_mask = self.filter(peaks, gap_mask=~self.chrom_ma.mask, fold_changes=self.fold_changes)
peaks = tuple(mask_array(valid_mask, *peaks))
# indices, contacts_array, lambda_array, enrich_ratio, pvals, padjs, shape = peaks
# print(f'After filter: {peaks[0][0].size}')
peask_df = self.build_results(peaks, binsize=self.chrom_ma.binsize)
return peask_df
@staticmethod
def fetch_kernels(p: int, w: int) -> HKernels:
"""Return kernels of four regions: donut region, vertical, horizontal, lower_left region.
"""
def region_to_kernel(*regions) -> np.ndarray:
for region in regions:
kernel = np.full((2 * w + 1, 2 * w + 1), 0, dtype=np.int)
for i, j in region:
kernel[i + w, j + w] = 1
yield kernel
def rect(x_start, x_len, y_start, y_len):
return {
(i, j)
for i in range(x_start, x_start + x_len)
for j in range(y_start, y_start + y_len)
}
length = 2 * w + 1
center = rect(-p, 2 * p + 1, -p, 2 * p + 1)
strips = rect(-w, length, 0, 1) | rect(0, 1, -w, length)
donut = rect(-w, length, -w, length) - (center | strips)
vertical = rect(-w, length, -1, 3) - center
horizontal = rect(-1, 3, -w, length) - center
lower_left = rect(1, w, -w, w) - center
return tuple(region_to_kernel(donut, vertical, horizontal, lower_left)), (p, w)
@staticmethod
def get_chunk_slices(length: int,
band_width: int,
height: int,
ov_length: int) -> Iterator[Tuple[slice, slice]]:
"""Return slices of all chunks along the digonal that ensure the band region with specified width is fully covered.\n
Band region's left border is the main diagonal.
"""
band_width *= 2
start = 0
while 1:
y_end = start + band_width
x_end = start + height
if (y_end < length) and (x_end < length):
yield slice(start, x_end), slice(start, y_end)
start += height - ov_length
else:
yield slice(start, length), slice(start, length)
break
@staticmethod
@suppress_warning
def calculate_chunk(observed: Array,
exps: Tuple[np.ndarray, np.ndarray],
factors: Tuple[np.ndarray, np.ndarray],
kernels: HKernels,
band_width: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""For a given chunk, calculate lambda values and contact(true counts) values of each pixel in regions specified in kernels.
"""
ks, (r1, r2) = kernels
num_kernels = len(ks)
try:
if isinstance(observed, sparse.spmatrix):
observed = observed.toarray()
expected = Toeplitz(*exps)[:]
observed[np.isnan(observed)] = 0
zero_region = observed == 0
expected[zero_region] = 0
# calculate lambda array for all nonzero pixels in valid region under each kernel
x, y = observed.nonzero()
dis = y - x
mask = ((dis <= (band_width - 2 * r2))
& (x < (observed.shape[0] - r2))
& (dis >= r2)
& (x >= r2))
x, y = x[mask], y[mask]
if x.size == 0:
return np.empty((2, 0)), np.empty(0), np.empty((num_kernels, 0)), np.empty(0)
ratio_array = np.full((num_kernels, x.size), 0, dtype=np.float)
oe_matrix = observed / expected
for index, kernel in enumerate(ks):
# ob_sum = ndimage.convolve(observed, kernel)
# ex_sum = ndimage.convolve(expected, kernel)
# ratio_array[index] = (ob_sum / ex_sum)[(x, y)]
# Another option
# counts = ndimage.convolve(valid_mat, kernel)
ratio = ndimage.convolve(oe_matrix, kernel) / kernel.sum()
ratio_array[index] = ratio[x, y]
lambda_array = (ratio_array
* expected[x, y]
* factors[0][x]
* factors[1][y])
inner_len = 2 * r1 + 1
outer_len = 2 * r2 + 1
inner_num = inner_len ** 2
percentage = (inner_num / outer_len ** 2)
plateau_ma = oe_matrix - ndimage.percentile_filter(
oe_matrix,
int((1 - percentage) * 100),
(outer_len, outer_len)
)
plateau_region = (plateau_ma > 0).astype(np.int16)
enrich_ratio = ndimage.convolve(
plateau_region,
np.ones((inner_len, inner_len))
)[x, y] / inner_num
nan_mask = np.isnan(lambda_array)
lambda_array[nan_mask] = 0
contacts_array = observed[x, y] * factors[0][x] * factors[1][y]
non_nan_mask = ~(np.any(nan_mask, axis=0) | np.isnan(contacts_array))
indices = np.vstack((x, y))
# Another option is to prefilter by fold changes
return (indices[:, non_nan_mask],
contacts_array[non_nan_mask],
lambda_array[:, non_nan_mask],
enrich_ratio[non_nan_mask])
except Exception as e:
return np.empty((2, 0)), np.empty(0), np.empty((num_kernels, 0)), np.empty(0)
@staticmethod
def multiple_test(contact_array: np.ndarray,
lambda_array: np.ndarray,
fdrs: Tuple[float, float, float, float],
sigs: Tuple[float, float, float, float],
method: str = "fdr_bh") -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Conduct poisson test on each pixel and multiple test correction for all tests.
"""
def lambda_chunks(lambda_array: np.ndarray,
full: bool = False,
base: float = 2,
exponent: float = 1 / 3) -> Iterator[Tuple[float, float, np.ndarray]]:
"""Assign values in lambda_array to logarithmically spaced chunks of every base**exponent range.
"""
min_value = np.min(lambda_array)
num = int(np.ceil(np.log2(np.max(lambda_array)) / exponent) + 1)
lambda_values = np.logspace(
start=0,
stop=(num - 1) * exponent,
num=num,
base=base
)
for start, end in zip(lambda_values[:-1], lambda_values[1:]):
if not full and min_value > end:
continue
mask = (start < lambda_array) & (lambda_array <= end)
yield start, end, mask
num_test, len_test = lambda_array.shape
pvals = np.full((num_test, len_test), 1, np.float)
padjs = np.full((num_test, len_test), 1, np.float)
rejects = np.full((num_test, len_test), False, np.bool)
for test_i in range(num_test):
for _, end, lambda_mask in lambda_chunks(lambda_array[test_i]):
chunk_size = lambda_mask.sum()
if chunk_size == 0:
continue
# poisson_model = stats.poisson(np.ones(chunk_size) * end)
poisson_model = stats.poisson(lambda_array[test_i, lambda_mask])
_pvals = 1 - poisson_model.cdf(contact_array[lambda_mask])
reject, _padjs, _, _ = multitest.multipletests(
pvals=_pvals,
alpha=fdrs[test_i],
method=method
)
rejects[test_i][lambda_mask] = reject
padjs[test_i][lambda_mask] = _padjs
pvals[test_i][lambda_mask] = _pvals
rejects = rejects & (padjs < np.array(sigs)[:, None])
return pvals, padjs, rejects
@staticmethod
def cluster(indices: np.ndarray,
contacts: np.ndarray,
lambda_array: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
dbscan = DBSCAN(2)
dbscan.fit(indices.T)
peak_indexs, shapes = [], []
for cluster_id in set(dbscan.labels_) - {-1}:
point_indexs = | np.where(dbscan.labels_ == cluster_id) | numpy.where |
import scipy
import scipy.misc
import numpy as np
def load(path):
img = scipy.misc.imread(path)
## TODO check what is the possible returned shapes
if img.shape[-1] == 1: # grey image
img = np.array([img, img, img])
elif img.shape[-1] == 4: # alpha component
img = img[:,:,:3]
return img
def save(path, img):
img = | np.clip(img, 0, 255) | numpy.clip |
# pylint: disable=E1101
"""
Generic classes and utility functions
"""
from datetime import timedelta
import numpy as np
class FlopyBinaryData:
"""
The FlopyBinaryData class is a class to that defines the data types for
integer, floating point, and character data in MODFLOW binary
files. The FlopyBinaryData class is the super class from which the
specific derived classes are formed. This class should not be
instantiated directly.
"""
def __init__(self):
self.integer = np.int32
self.integerbyte = self.integer(1).nbytes
self.character = np.uint8
self.textbyte = 1
return
def set_float(self, precision):
self.precision = precision
if precision.lower() == "double":
self.real = np.float64
self.floattype = "f8"
else:
self.real = np.float32
self.floattype = "f4"
self.realbyte = self.real(1).nbytes
return
def read_text(self, nchar=20):
bytesvalue = self._read_values(self.character, nchar).tobytes()
return bytesvalue.decode().strip()
def read_integer(self):
return self._read_values(self.integer, 1)[0]
def read_real(self):
return self._read_values(self.real, 1)[0]
def read_record(self, count, dtype=None):
if dtype is None:
dtype = self.dtype
return self._read_values(dtype, count)
def _read_values(self, dtype, count):
return | np.fromfile(self.file, dtype, count) | numpy.fromfile |
import datetime
from dateutil.relativedelta import *
from fuzzywuzzy import fuzz
import argparse
import glob
import numpy as np
import pandas as pd
from scipy.stats import ttest_1samp
import sys
import xarray as xr
from paths_bra import *
sys.path.append('./..')
from refuelplot import *
setup()
from utils import *
gen_path = bra_path + '/generation'
# get GWA version
parser = argparse.ArgumentParser(description='Insert optionally GWA')
parser.add_argument('-GWA')
args = parser.parse_args()
if(args.GWA == None):
GWA = "3"
else:
GWA = args.GWA
if GWA == "2":
results_path2 = results_path
results_path = results_path + '/results_GWA2'
# load generation data
print('load generation data')
# load usinas hourly
if gen_path + '/hourly/usinas.pkl' not in glob.glob(gen_path + '/hourly/*.pkl'):
USIh = pd.read_csv(gen_path + '/hourly/Comparativo_Geração_de_Energia_Semana_data_usinas.csv',
sep = ';', index_col = 0, parse_dates = True, dayfirst = True).iloc[1:,[6,8]].sort_index()
# remove missing values
USIh = USIh.loc[USIh.index.notnull()].dropna()
USIh.columns = ['usina','prod_GWh']
# in RIO DO FOGO there is one duplicate hour after one missing hour -> change timestamps of those hours
idxUSIh = USIh.index.values
midxUSIh = USIh.reset_index().set_index(['usina','Data Escala de Tempo 1 GE Comp 3']).index
idxUSIh[midxUSIh.duplicated(keep='last')] = idxUSIh[midxUSIh.duplicated(keep='first')] - np.timedelta64(1,'h')
USIh.index = pd.DatetimeIndex(idxUSIh)
USIhs = USIh.reset_index().set_index(['usina','index']).unstack(level=0).prod_GWh
USIhs.to_csv(gen_path + '/hourly/usinas.csv')
USIhs.to_pickle(gen_path + '/hourly/usinas.pkl')
wpUSIhs = pd.read_pickle(gen_path + '/hourly/usinas.pkl')
# load and match aneel and ons windparks
def get_cap_df(cap,comdate):
com = pd.DataFrame({'capacity': cap}).groupby(comdate).sum()
cap_cum = com.capacity.cumsum()
# if only years given for commissioning dates -> gradual capacity increase over year, full capacity at end of year
if type(cap_cum.index.values[0]) == np.int64:
cap_cum.index = [np.datetime64(str(int(year))+"-12-31 23:00:00") for year in cap_cum.index.values]
# create yearly dates at yearends
drcc = pd.date_range(np.datetime64('2005-12-31 23:00:00'),
np.datetime64('2019-12-31 23:00:00'),freq= 'y')
cap_cum = pd.Series(drcc.map(cap_cum),index = drcc)
# if first year emtpy: either year before or 0 if nothing before
if(sum(com.index<2000) > 0):
cap_cum[0] = com.cumsum()[com.index<2000].max()
else:
cap_cum[0] = 0
# if missing years -> put capacity of year before
cap_cum = cap_cum.ffill()
dr = pd.date_range('1/1/2006','31/12/2019 23:00:00',freq = 'h')
cap_ts = pd.Series(dr.map(cap_cum),index = dr)
cap_ts[0] = cap_cum[cap_cum.index<=pd.Timestamp('2006-01-01')].max()
if type(comdate[0]) == np.int64:
return(cap_ts.interpolate(method='linear'))
else:
return(cap_ts.fillna(method='ffill'))
def matchWords(word, statements):
# function to match a word to different statements
# output: ratio of matching (0-100) for all provided statements
results = []
for s in statements:
r = fuzz.ratio(word, s)
results.append(r)
return results
def match_string(string, array):
# function for matching casefolded strings
Slc = string.strip().casefold()
Alc = [arr.casefold() for arr in array.str.strip().unique()]
scores = matchWords(Slc, Alc)
mscore = max(scores)
strarr = array.unique()[np.where( | np.array(scores) | numpy.array |
import itertools
from typing import Union, Sequence, Optional
import numpy as np
_RealArraylike = Union[np.ndarray, float]
def _single_qubit_unitary(
theta: _RealArraylike, phi_d: _RealArraylike, phi_o: _RealArraylike
) -> np.ndarray:
"""Single qubit unitary matrix.
Args:
theta: cos(theta) is magnitude of 00 matrix element. May be a scalar
or real ndarray (for broadcasting).
phi_d: exp(i phi_d) is the phase of 00 matrix element. May be a scalar
or real ndarray (for broadcasting).
phi_o: i exp(i phi_o) is the phase of 10 matrix element. May be a scalar
or real ndarray (for broadcasting).
Notes:
The output is vectorized with respect to the angles. I.e, if the angles
are (broadcastable) arraylike objects whose sum would have shape (...),
the output is an array of shape (...,2,2), where the final two indices
correspond to unitary matrices.
"""
U00 = np.cos(theta) * np.exp(1j * np.asarray(phi_d))
U10 = 1j * np.sin(theta) * np.exp(1j * np.asarray(phi_o))
# This implementation is agnostic to the shapes of the angles, as long
# as they can be broadcast together.
Udiag = np.array([[U00, np.zeros_like(U00)], [np.zeros_like(U00), U00.conj()]])
Udiag = np.moveaxis(Udiag, [0, 1], [-2, -1])
Uoff = np.array([[np.zeros_like(U10), -U10.conj()], [U10, np.zeros_like(U10)]])
Uoff = np.moveaxis(Uoff, [0, 1], [-2, -1])
return Udiag + Uoff
def random_qubit_unitary(
shape: Sequence[int] = (),
randomize_global_phase: bool = False,
rng: Optional[np.random.RandomState] = None,
) -> np.ndarray:
"""Random qubit unitary distributed over the Haar measure.
The implementation is vectorized for speed.
Args:
shape: The broadcasted shape of the output. This is used to generate
a tensor of random unitaries with dimensions tuple(shape) + (2,2).
randomize_global_phase: (Default False) If True, a global phase is also
sampled randomly. This corresponds to sampling over U(2) instead of
SU(2).
rng: Random number generator to be used in sampling. Default is
numpy.random.
"""
rng = np.random if rng is None else rng
theta = np.arcsin(np.sqrt(rng.rand(*shape)))
phi_d = rng.rand(*shape) * np.pi * 2
phi_o = rng.rand(*shape) * np.pi * 2
out = _single_qubit_unitary(theta, phi_d, phi_o)
if randomize_global_phase:
out = np.moveaxis(out, (-2, -1), (0, 1))
out *= np.exp(1j * np.pi * 2 * rng.rand(*shape))
out = np.moveaxis(out, (0, 1), (-2, -1))
return out
def vector_kron(first: np.ndarray, second: np.ndarray) -> np.ndarray:
"""Vectorized implementation of kron for square matrices."""
s_0, s_1 = first.shape[-2:], second.shape[-2:]
assert s_0[0] == s_0[1]
assert s_1[0] == s_1[1]
out = np.einsum('...ab,...cd->...acbd', first, second)
s_v = out.shape[:-4]
return out.reshape(s_v + (s_0[0] * s_1[0],) * 2)
# Encode all possible local operations that produce equivalent KAK vectors
# and which can also be detected by the entanglement fidelity function
# These operations can be decomposed as s_x^a s_y^b s_z^c n_j p, where
# s_j denotes a pi/2 shift in index j (a,b,c are 0 or 1), n_j is a pi rotation
# about the j axis, and p is a permutation of the three indices.
# all permutations of (1,2,3)
_perms_123 = np.zeros((6, 3, 3), int)
for ind, perm in enumerate(itertools.permutations((0, 1, 2))):
_perms_123[ind, (0, 1, 2), perm] = 1
_negations = np.zeros((4, 3, 3), int)
_negations[0, (0, 1, 2), (0, 1, 2)] = 1
_negations[1, (0, 1, 2), (0, 1, 2)] = (1, -1, -1)
_negations[2, (0, 1, 2), (0, 1, 2)] = (-1, 1, -1)
_negations[3, (0, 1, 2), (0, 1, 2)] = (-1, -1, 1)
_offsets = np.zeros((8, 3))
_offsets[1, 0] = np.pi / 2
_offsets[2, 1] = np.pi / 2
_offsets[3, 2] = np.pi / 2
_offsets[4, (1, 2)] = np.pi / 2
_offsets[5, (0, 2)] = np.pi / 2
_offsets[6, (0, 1)] = np.pi / 2
_offsets[7, (0, 1, 2)] = np.pi / 2
def _kak_equivalent_vectors(kak_vec) -> np.ndarray:
"""Generates all KAK vectors equivalent under single qubit unitaries."""
# Technically this is not all equivalent vectors, but a subset of vectors
# which are not guaranteed to give the same answer under the infidelity
# formula.
kak_vec = np.asarray(kak_vec, dtype=float)
# Apply all permutations, then all negations, then all shifts.
out = np.einsum('pab,...b->...pa', _perms_123, kak_vec) # (...,6,3)
out = np.einsum('nab,...b->...na', _negations, out) # (...,6,4,3)
# (...,8,6,4,3)
out = out[..., np.newaxis, :, :, :] + _offsets[:, np.newaxis, np.newaxis, :]
# Merge indices
return np.reshape(out, out.shape[:-4] + (192, 3))
def kak_vector_infidelity(
k_vec_a: np.ndarray, k_vec_b: np.ndarray, ignore_equivalent_vectors: bool = False
) -> np.ndarray:
r"""The locally invariant infidelity between two KAK vectors.
This is the quantity
$$
\min 1 - F_e( \exp(i k_a · (XX,YY,ZZ)) kL \exp(i k_b · (XX,YY,ZZ)) kR)
$$
where $F_e$ is the entanglement (process) fidelity and the minimum is taken
over all 1-local unitaries kL, kR.
Args:
k_vec_a: A 3-vector or tensor of 3-vectors with shape (...,3).
k_vec_b: A 3-vector or tensor of 3-vectors with shape (...,3). If both
k_vec_a and k_vec_b are tensors, their shapes must be compatible
for broadcasting.
ignore_equivalent_vectors: If True, the calculation ignores any other
KAK vectors that are equivalent to the inputs under local unitaries.
The resulting infidelity is then only an upper bound to the true
infidelity.
Returns:
An ndarray storing the locally invariant infidelity between the inputs.
If k_vec_a or k_vec_b is a tensor, the result is vectorized.
"""
k_vec_a, k_vec_b = np.asarray(k_vec_a), np.asarray(k_vec_b)
if ignore_equivalent_vectors:
k_diff = k_vec_a - k_vec_b
out = 1 - np.product(np.cos(k_diff), axis=-1) ** 2
out -= np.product(np.sin(k_diff), axis=-1) ** 2
return out
# We must take the minimum infidelity over all possible locally equivalent
# and nontrivial KAK vectors. We need only consider equivalent vectors
# of one input.
# Ensure we consider equivalent vectors for only the smallest input.
if k_vec_a.size < k_vec_b.size:
k_vec_a, k_vec_b = k_vec_b, k_vec_a # coverage: ignore
k_vec_a = k_vec_a[..., np.newaxis, :] # (...,1,3)
k_vec_b = _kak_equivalent_vectors(k_vec_b) # (...,192,3)
k_diff = k_vec_a - k_vec_b
out = 1 - np.product(np.cos(k_diff), axis=-1) ** 2
out -= np.product(np.sin(k_diff), axis=-1) ** 2 # (...,192)
return out.min(axis=-1)
def in_weyl_chamber(kak_vec: np.ndarray) -> np.ndarray:
"""Whether a given collection of coordinates is within the Weyl chamber.
Args:
kak_vec: A numpy.ndarray tensor encoding a KAK 3-vector. Input may be
broadcastable with shape (...,3).
Returns:
np.ndarray of boolean values denoting whether the given coordinates
are in the Weyl chamber.
"""
kak_vec = np.asarray(kak_vec)
assert kak_vec.shape[-1] == 3, 'Last index of input must represent a 3-vector.'
# For convenience
xp, yp, zp = kak_vec[..., 0], kak_vec[..., 1], kak_vec[..., 2]
pi_4 = np.pi / 4
x_inside = np.logical_and(0 <= xp, xp <= pi_4)
y_inside = np.logical_and(0 <= yp, yp <= pi_4)
y_inside = np.logical_and(y_inside, xp >= yp)
z_inside = np.abs(zp) <= yp
return np.logical_and.reduce((x_inside, y_inside, z_inside))
def weyl_chamber_mesh(spacing: float) -> np.ndarray:
"""Cubic mesh of points in the Weyl chamber.
Args:
spacing: Euclidean distance between neighboring KAK vectors.
Returns:
np.ndarray of shape (N,3) corresponding to the points in the Weyl
chamber.
"""
if spacing < 1e-3: # memory required ~ 1 GB
raise ValueError(f'Generating a mesh with spacing {spacing} may cause system to crash.')
# Uniform mesh
disps = np.arange(-np.pi / 4, np.pi / 4, step=spacing)
mesh_points = np.array([a.ravel() for a in np.array(np.meshgrid(*(disps,) * 3))])
mesh_points = np.moveaxis(mesh_points, 0, -1)
# Reduce to points within Weyl chamber
return mesh_points[in_weyl_chamber(mesh_points)]
_XX = np.zeros((4, 4))
_XX[(0, 1, 2, 3), (3, 2, 1, 0)] = 1
_ZZ = np.diag([1, -1, -1, 1])
_YY = -_XX @ _ZZ
_kak_gens = np.array([_XX, _YY, _ZZ])
def kak_vector_to_unitary(vector: np.ndarray) -> np.ndarray:
r"""Convert a KAK vector to its unitary matrix equivalent.
Args:
vector: A KAK vector shape (..., 3). (Input may be vectorized).
Returns:
unitary: Corresponding 2-qubit unitary, of the form
$exp( i k_x \sigma_x \sigma_x + i k_y \sigma_y \sigma_y
+ i k_z \sigma_z \sigma_z)$.
matrix or tensor of matrices of shape (..., 4,4).
"""
vector = np.asarray(vector)
gens = | np.einsum('...a,abc->...bc', vector, _kak_gens) | numpy.einsum |
"""
Created on June 6th, 2019. This script compares surface ozone data with hourly resolution from Summit (SUM) station
in Greenland with Summit GC ethane and acetylene data. 'ozone.py' compares the ozone with the residual values
The ozone data used here is courtesy of NOAA ESRL GMD. See the citation below.
<NAME>., <NAME>., <NAME>., (2014) NOAA Global Monitoring Surface Ozone Network.
Summit, 2012-2018. National Oceanic and Atmospheric Administration, Earth Systems Research Laboratory
Global Monitoring Division. Boulder, CO. 5/30/2019. http://dx.doi.org/10.7289/V57P8WBF
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.linear_model import LinearRegression
def ozonePlot2():
# Import Data
root = r'C:\Users\ARL\Desktop\J_Summit\analyses\HarmonicFit\textfiles' # root source
ozone = pd.read_csv(root + r'\ozone.txt', encoding='utf8', delim_whitespace=True,
header=None)
ozone.columns = ['date', 'value', 'function', 'resid', 'resid_smooth']
ethane = pd.read_csv(root + r'\ethane.txt', encoding='utf8', delim_whitespace=True)
ace = pd.read_csv(root + r'\acetylene.txt', encoding='utf8', delim_whitespace=True)
sns.set()
f, ax = plt.subplots(ncols=2, nrows=2)
sns.despine(f)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5) # adjust plot spacing
# ----
# Background Ozone Data and Fitted Harmonic Functions
ax1 = sns.scatterplot(x='date', y='value', data=ozone, ax=ax[0, 0],
alpha=0.7, s=10, legend='brief', label='Ozone Data')
ax2 = sns.lineplot(x='date', y='function', data=ozone, ax=ax[0, 0],
linewidth=2, label='Fitted Curve')
ax1.set_title('Ozone Surface MR from Summit', fontsize=12)
ax1.set_xlabel('Decimal Year')
ax1.set_ylabel('Mixing Ratio [ppb]')
ax1.legend()
print('Plot 1 Created')
# ----
# Ozone Residuals with smoothed residual curve
ax3 = sns.scatterplot(x='date', y='resid', data=ozone, ax=ax[0, 1], alpha=0.7, s=10,
label='Ozone Residuals', legend='brief')
ax4 = sns.lineplot(x='date', y='resid_smooth', data=ozone, ax=ax[0, 1], linewidth=2,
label='Smoothed Residual Curve')
ax3.set_title('Ozone Residuals w/ Smoothed Curve')
ax3.set_xlabel('Decimal Year')
ax3.set_ylabel('Mixing Ratio [ppb]')
ax3.legend()
print('Plot 2 Created')
# ----
# Create new dataframes for residual comparison
titles_old = ['date', 'value', 'residuals']
ace_titles_new = ['date_ace', 'value_ace', 'resid_ace']
eth_titles_new = ['date_eth', 'value_eth', 'resid_eth']
# drop unused columns for speed
ethDrop = ethane.drop(labels=(ethane.columns[np.logical_and((np.logical_and(ethane.columns != titles_old[0],
ethane.columns != titles_old[1])),
ethane.columns != titles_old[2])]), axis=1)
aceDrop = ace.drop(labels=(ace.columns[np.logical_and((np.logical_and(ace.columns != titles_old[0],
ace.columns != titles_old[1])),
ace.columns != titles_old[2])]), axis=1)
ethDrop.columns = eth_titles_new # rename columns
aceDrop.columns = ace_titles_new
# Trim the data and assign equivalent date conditions
earlyVals = ~(ozone['date'] < ethDrop['date_eth'][0]) # early ozone vals
ozone = ozone[earlyVals] # remove those vals
ozone = ozone.reset_index() # reset index
ozone = ozone.drop('index', axis=1) # remove unnece column
ozoneEthane = pd.concat([ozone, ethDrop], sort=False, axis=1) # combine datasets
ozoneData = pd.concat([ozoneEthane, aceDrop], sort=False, axis=1)
ozoneData['date_ace'] = ozoneData['date_eth']
dataClean = []
tolerence = 1 / 365 # ozone valus within day
for index, value in ozoneData.iterrows():
high = value.date_eth + tolerence # upper date lim
low = value.date_eth - tolerence # lower date lim
indices = (ozoneData['date'] <= high) & (ozoneData['date'] >= low) # indices between
ozoneAv = | np.nanmean(ozoneData['resid'][indices].values) | numpy.nanmean |
# Client --> ./templates/index.html
# -*- coding: utf-8 -*-
# 导入常用的库
from flask import Flask, jsonify, render_template, request
from utils import Config, Logger, CharsetMapper
import torchvision.transforms as transforms
from PIL import Image
import torch.nn.functional as F
import numpy as np
import cv2
import PIL
import torch
import logging
import time
import os
# 导入flask库的Flask类和request对象
app = Flask(__name__)
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
# /home/std2022/zhaoxu/disk/ABINet
def get_model(config):
# load the model according to the config.file
import importlib
names = config.model_name.split('.')
module_name, class_name = '.'.join(names[:-1]), names[-1]
# getarr() return the attribute of an object
cls = getattr(importlib.import_module(module_name), class_name)
model = cls(config)
logging.info(model)
model = model.eval()
return model
def load(model, file, device=None, strict=True):
if device is None:
device = 'cpu'
elif isinstance(device, int):
device = torch.device('cuda', device)
assert os.path.isfile(file)
state = torch.load(file, map_location=device)
if set(state.keys()) == {'model', 'opt'}:
state = state['model']
model.load_state_dict(state, strict=strict)
return model
def preprocess(img, width, height):
img = cv2.resize( | np.array(img) | numpy.array |
import os
from pathlib import Path
import numpy as np
from sklearn.ensemble import RandomForestClassifier
print('hi')
os.getpid()
Path('/')
| np.array([1, 2, 3]) | numpy.array |
import numpy as np
import torch
from L96sim.L96_base import f1, f2, pf2
def init_torch_device():
if torch.cuda.is_available():
print('using CUDA !')
device = torch.device("cuda")
torch.set_default_tensor_type("torch.cuda.FloatTensor")
else:
print("CUDA not available")
device = torch.device("cpu")
torch.set_default_tensor_type("torch.FloatTensor")
return device
device, dtype, dtype_np = init_torch_device(), torch.float32, np.float32
def as_tensor(x):
return torch.as_tensor(x, dtype=dtype, device=device)
def sortL96intoChannels(x, J):
shape = x.shape
K = shape[-1]//(J+1)
assert shape[-1]/(J+1) == K
if isinstance(x, torch.Tensor):
out = torch.cat((x[...,:K].reshape(*shape[:-1],K,1), x[...,K:].reshape(*shape[:-1], K, J)),
axis=-1).permute(*range(len(shape)-1),-1,-2)
elif isinstance(x, np.ndarray):
out = np.concatenate((x[...,:K].reshape(*shape[:-1],K,1), x[...,K:].reshape(*shape[:-1], K, J)),
axis=-1).transpose(*range(len(shape)-1),-1,-2)
return out
def sortL96fromChannels(x):
shape = x.shape
J, K = shape[-2]-1, shape[-1]
if isinstance(x, torch.Tensor):
out = torch.cat((x[...,0,:],
x[...,1:,:].permute(*range(len(shape)-2),-1,-2).reshape(*shape[:-2], K*J)),
axis=-1)
elif isinstance(x, np.ndarray):
out = np.concatenate((x[...,0,:],
x[...,1:,:].transpose(*range(len(shape)-2),-1,-2).reshape(*shape[:-2], K*J)),
axis=-1)
return out
def predictor_corrector(fun, y0, times, alpha=0.5):
y = np.zeros((len(times), *y0.shape), dtype=y0.dtype)
y[0] = y0.copy()
for i in range(1,len(times)):
dt = times[i] - times[i-1]
f0 = fun(times[i-1], y[i-1]).copy()
f1 = fun(times[i], y[i-1] + dt*f0)
y[i] = y[i-1] + dt * (alpha*f0 + (1.-alpha)*f1)
return y
def rk4_default(fun, y0, times):
y = np.zeros((len(times), *y0.shape), dtype=y0.dtype)
y[0] = y0.copy()
for i in range(1,len(times)):
dt = times[i] - times[i-1]
f0 = fun(times[i-1], y[i-1]).copy()
f1 = fun(times[i-1] + dt/2., y[i-1] + dt*f0/2.).copy()
f2 = fun(times[i-1] + dt/2., y[i-1] + dt*f1/2.).copy()
f3 = fun(times[i], y[i-1] + dt*f2).copy()
y[i] = y[i-1] + dt/6. * (f0 + 2.*f1 + 2.*f2 + f3)
return y
def calc_jakobian_onelevelL96_tendencies(inputs, n):
inputs_m1 = np.concatenate((inputs[-1:], inputs[:-1]))
inputs_m2 = np.concatenate((inputs[-2:], inputs[:-2]))
inputs_p1 = np.concatenate((inputs[1:], inputs[:1]))
dfdx = - 1. * np.eye(n, dtype=dtype_np)
dfdx += np.diag(inputs_m1[:-1], 1) + np.diag(inputs_m1[-1:], -n+1)
dfdx -= np.diag(inputs_p1[:-2],-2) + np.diag(inputs_p1[-2:], n-2)
dfdx += np.diag(inputs_p1[1:]-inputs_m2[1:],-1) + np.diag(inputs_p1[:1]-inputs_m2[:1], n-1)
return dfdx
def calc_jakobian_rk4(inputs, calc_f, calc_J_f, dt, n):
I = np.eye(n, dtype=dtype_np)
f0 = calc_f(inputs)
f1 = calc_f(inputs + dt/2. * f0)
f2 = calc_f(inputs + dt/2. * f1)
J0 = calc_J_f(inputs=inputs, n=n)
J1 = calc_J_f(inputs=inputs+dt/2.*f0, n=n).dot(dt/2*J0+I)
J2 = calc_J_f(inputs=inputs+dt/2.*f1, n=n).dot(dt/2*J1+I)
J3 = calc_J_f(inputs=inputs+dt *f2, n=n).dot(dt*J2+I)
J = I + dt/6. * (J0 + 2 * J1 + 2 * J2 + J3)
return J
def get_jacobian_torch(model, inputs, n):
J = np.zeros((n,n), dtype=dtype_np)
for i in range(n):
inputs.grad = None
L = model(inputs).flatten()[i]
L.backward()
J[i,:] = inputs.grad.detach().cpu().numpy()
return J
def get_data(K, J, T, dt, N_trials=1, F=10., h=1., b=10., c=10.,
resimulate=True, solver=rk4_default, save_sim=False, data_dir=None):
if N_trials > 1:
fn_data = f'out_K{K}_J{J}_T{T}_N{N_trials}_dt0_{str(dt)[2:]}'
else:
fn_data = f'out_K{K}_J{J}_T{T}_dt0_{str(dt)[2:]}'
if J > 0:
if N_trials > 1:
def fun(t, x):
return pf2(x, F, h, b, c, dX_dt, K, J)
else:
def fun(t, x):
return f2(x, F, h, b, c, dX_dt, K, J)
else:
def fun(t, x):
return f1(x, F, dX_dt, K)
times = np.linspace(0, T, int(np.floor(T/dt)+1))
if resimulate:
print('simulating data')
X_init = F * (0.5 + | np.random.randn(K*(J+1),N_trials) | numpy.random.randn |
# coding: utf-8
'''
from: examples/tutorial/fifth.cc
to: fifth.py
time: 20101110.1948.
//
// node 0 node 1
// +----------------+ +----------------+
// | ns-3 TCP | | ns-3 TCP |
// +----------------+ +----------------+
// | 10.1.1.1 | | 10.1.1.2 |
// +----------------+ +----------------+
// | point-to-point | | point-to-point |
// +----------------+ +----------------+
// | |
// +---------------------+
// 5 Mbps, 2 ms
//
//
// We want to look at changes in the ns-3 TCP congestion window. We need
// to crank up a flow and hook the CongestionWindow attribute on the socket
// of the sender. Normally one would use an on-off application to generate a
// flow, but this has a couple of problems. First, the socket of the on-off
// application is not created until Application Start time, so we wouldn't be
// able to hook the socket (now) at configuration time. Second, even if we
// could arrange a call after start time, the socket is not public so we
// couldn't get at it.
//
// So, we can cook up a simple version of the on-off application that does what
// we want. On the plus side we don't need all of the complexity of the on-off
// application. On the minus side, we don't have a helper, so we have to get
// a little more involved in the details, but this is trivial.
//
// So first, we create a socket and do the trace connect on it; then we pass
// this socket into the constructor of our simple application which we then
// install in the source node.
'''
import sys
import ns.applications
import ns.core
import ns.internet
import ns.network
import ns.point_to_point
import ns3
import pandas as pd
import pandas as pd
import numpy as np
import scipy
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
import seaborn as sns
import statsmodels as sm
import scipy.stats as stats
import matplotlib.pyplot as plt
import os
import statsmodels.distributions.empirical_distribution as edf
from scipy.interpolate import interp1d
from scipy.stats.distributions import chi2
import random
# Desligando avisos
import warnings
warnings.filterwarnings("ignore")
# Opções de geração por "Trace" ou "PD"(Probability Distribution)
mt_RG = "PD"
# Opções de geração de números aleatórios por "tcdf" ou "ecdf"
tr_RG = "tcdf"
# Definindo variáveis globais
# Auxilia da geração de tempos na rede
aux_global_time = 0
# Variável que auxilia se os arquivos de trace estão prontos para serem lidos
# tr_reader = True
# Define o parametro de rede utilizado nas funções
parameter = ""
# Armazena em np.arrays() os dados dos traces
t_time = np.empty(1)
t_size = np.empty(1)
# Variáveis que armazenam os parametros das distribuições de probabilidade
# time
dist_time = ""
arg_time = []
loc_time = 0
scale_time = 0
# size
dist_size = ""
arg_size = []
loc_size = 0
scale_size = 0
# Variável de auxilio de parada da função tcdf
first_tcdf_time = 0
first_tcdf_size = 0
# Variável de auxilio de parada da função read_trace
first_trace_time = 0
first_trace_size = 0
# Definindo se o trace é ".txt" ou "xml"
reader = "txt"
size_xml = 0
stop_xml = 0
# Função de leitura dos arquivos xml
def read_xml(parameter):
global size_xml
global stop_xml
ifile = open('scratch/results-http-docker.pdml','r')
print(ifile)
columns = ["length", "time"]
df = pd.DataFrame(columns = columns)
data0 = []
data1 = []
for line in ifile.readlines():
if ("httpSample" in line and "</httpSample>" not in line):
data0.append(line)
if ("httpSample" in line and "</httpSample>" not in line):
data1.append(line)
ifile.close()
# Save parameters in DataFrames and Export to .txt
df = pd.DataFrame(list(zip(data0, data1)), columns=['length', 'time'])
df['length'] = df['length'].str.split('by="').str[-1]
df['time'] = df['time'].str.split('ts="').str[-1]
df['length'] = df['length'].str.split('"').str[0]
df['time'] = df['time'].str.split('"').str[0]
df["length"] = pd.to_numeric(df["length"],errors='coerce')
df["time"] = pd.to_numeric(df["time"],errors='coerce')
print("DF: ", df)
size_xml = len(df["time"])
stop_xml = df["time"]
print("STOP: ", len(stop_xml))
stop_xml = stop_xml[len(stop_xml)-1]
if parameter == "Size":
# Chamando variáveis globais
global t_size
global first_trace_size
# Abrindo arquivos .txt
t_size = np.array(df['length'])
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
plt.hist(t_size)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# y_size_df = pd.DataFrame(y_size, columns=['Size'])
# y_size_df.describe()
# Definindo que o parametro size pode ser lido apenas uma vez.
first_trace_size = 1
if parameter == "Time":
# Chamando variáveis globais
global t_time
global first_trace_time
# Abrindo arquivos .txt
t_time = np.array(df['time'])
# Obtendo os tempos entre pacotes do trace
sub = []
i=0
for i in range(len(t_time)-1):
sub.append(t_time[i+1] - t_time[i])
# Passando valores resultantes para a variável padrão t_time
t_time = np.array(sub)
# print("Trace Time: ", t_time)
# Plot histograma t_time:
plt.hist(t_time)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas pode-se encontrar algumas estatísticas importantes.
# t_time_df = pd.DataFrame(t_time, columns=['Time'])
# t_time_df.describe()
# Definindo que o parametro time pode ser lido apenas uma vez.
first_trace_time = 1
# Função de leitura dos traces e atribuição dos respectivos dados aos vetores
def read_txt(parameter):
if parameter == "Size":
# Chamando variáveis globais
global t_size
global first_trace_size
# Abrindo arquivos .txt
t_size = np.loadtxt("scratch/size.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
plt.hist(t_size)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# y_size_df = pd.DataFrame(y_size, columns=['Size'])
# y_size_df.describe()
# Definindo que o parametro size pode ser lido apenas uma vez.
first_trace_size = 1
if parameter == "Time":
# Chamando variáveis globais
global t_time
global first_trace_time
# Abrindo arquivos .txt
t_time = np.loadtxt("scratch/time.txt", usecols=0)
# Obtendo os tempos entre pacotes do trace
sub = []
i=0
for i in range(len(t_time)-1):
sub.append(t_time[i+1] - t_time[i])
# Passando valores resultantes para a variável padrão t_time
t_time = np.array(sub)
# print("Trace Time: ", t_time)
# Plot histograma t_time:
plt.hist(t_time)
plt.title("Histogram of trace ("+parameter+")")
plt.show()
# Com ajuda da lib Pandas pode-se encontrar algumas estatísticas importantes.
# t_time_df = pd.DataFrame(t_time, columns=['Time'])
# t_time_df.describe()
# Definindo que o parametro time pode ser lido apenas uma vez.
first_trace_time = 1
# Função de geração de variáveis aleatórias por meio da ECDF
def ecdf(y, parameter):
# Criando listas para os dados utilizados
Fx = []
Fx_ = []
# Realizando ajustes para os vetores que selecionaram os valores gerados
for i in range(len(y)):
Fx.append(i/(len(y)+1))
if i != 0:
Fx_.append(i/(len(y)+1))
# Adicionando 1 no vetor Fx_
Fx_.append(1)
# print ("Fx: ", len(Fx))
# print ("Fx_: ", len(Fx_))
# Organizando o vetor com os dados do trace
y.sort()
# print ("Y: ", len(y))
# Gerando um valor aleatório entre 0 e 1 uniforme
rand = np.random.uniform(0,1)
# print("Rand: ", rand)
# Pecorrer todos os valores do vetor com dados do trace
# para determinar o valor a ser gerado de acordo com o resultado da distribuição uniforme
for i in range(len(y)):
# Condição que define em qual classe o valor é encontrado
if rand > Fx[i] and rand < Fx_[i]:
# Determinando o valor resultante
r_N = y[i]
# Condição para retorno do valor de acordo com o parametro de rede.
if parameter == "Size":
# print ("ECDF SIZE: ", r_N)
return(int(r_N))
if parameter == "Time":
# print ("ECDF TIME: ", r_N)
return(r_N)
# Função para definir a distribuição de probabilidade compatível com os
# valores do trace utilizada para gerar valores aleatórios por TCDF
def tcdf(y, parameter):
# Indexar o vetor y pelo vetor x
x = np.arange(len(y))
# Definindo o tamanho da massa de dados
size = len(x)
# Definindo a quantidade de bins (classes) dos dados
nbins = int(np.sqrt(size))
# Normalização dos dados
sc=StandardScaler()
yy = y.reshape (-1,1)
sc.fit(yy)
y_std = sc.transform(yy)
y_std = y_std.flatten()
del yy
# O python pode relatar avisos enquanto executa as distribuições
# Mais distribuições podem ser encontradas no site da lib "scipy"
# Veja https://docs.scipy.org/doc/scipy/reference/stats.html para mais detalhes
dist_names = ['erlang',
'expon',
'gamma',
'lognorm',
'norm',
'pareto',
'triang',
'uniform',
'dweibull',
'weibull_min',
'weibull_max']
# Obter os métodos de inferência KS test e Chi-squared
# Configurar listas vazias para receber os resultados
chi_square = []
ks_values = []
#--------------------------------------------------------#
# Chi-square
# Configurar os intervalos de classe (nbins) para o teste qui-quadrado
# Os dados observados serão distribuídos uniformemente em todos os inervalos de classes
percentile_bins = np.linspace(0,100,nbins)
percentile_cutoffs = np.percentile(y, percentile_bins)
observed_frequency, bins = (np.histogram(y, bins=percentile_cutoffs))
cum_observed_frequency = np.cumsum(observed_frequency)
# Repetir para as distribuições candidatas
for distribution in dist_names:
# Configurando a distribuição e obtendo os parâmetros ajustados da distribuição
dist = getattr(scipy.stats, distribution)
param = dist.fit(y)
#
# KS TEST
#
# Criando percentil
percentile = np.linspace(0,100,len(y))
percentile_cut = np.percentile(y, percentile)
# Criando CDF da teórica
Ft = dist.cdf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Criando CDF Inversa
Ft_ = dist.ppf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Adicionando dados do trace
t_Fe = y
# Criando listas para armazenar as ECDFs
Fe = []
Fe_ = []
# Criando ECDFs
for i in range(len(y)):
# ecdf i-1/n
Fe.append((i-1)/len(y))
# ecdf i/n
Fe_.append(i/len(y))
# Transformando listas em np.arrays()
Fe = np.array(Fe)
Fe_ = np.array(Fe_)
Ft = np.array(Ft)
Ft_ = np.array(Ft_)
# Ordenando dados
t_Fe.sort()
Ft.sort()
Ft_.sort()
Fe.sort()
Fe_.sort()
# Inicio cálculo de rejeição
#
# Ft(t)-FE-(i),FE+(i)-Ft(t)
Ft_Fe_ = np.subtract(Ft, Fe_)
Fe_Ft = np.subtract(Fe, Ft)
# Max(Ft(t)-FE-(i),FE+(i)-Ft(t))
Dobs_max = np.maximum(Ft_Fe_, Fe_Ft)
# Dobs= Max(Max (Ft(t)-FE-(i),FE+(i)-Ft(t)))
Dobs = np.max(Dobs_max)
#
# Fim cálculo de rejeição
# Definir intervalo de confiança
# IC = 99.90 -> alpha = 0.10
# IC = 99.95 -> alpha = 0.05
# IC = 99.975 -> alpha = 0.025
# IC = 99.99 -> alpha = 0.01
# IC = 99.995 -> alpha = 0.005
# IC = 99.999 -> alpha = 0.001
IC = 99.90
# Condição para definir o D_critico de acordo com o tamanho dos dados
if size > 35:
if IC == 99.90:
D_critico = 1.22/np.sqrt(len(y))
if IC == 99.95:
D_critico = 1.36/np.sqrt(len(y))
if IC == 99.975:
D_critico = 1.48/np.sqrt(len(y))
if IC == 99.99:
D_critico = 1.63/np.sqrt(len(y))
if IC == 99.995:
D_critico = 1.73/np.sqrt(len(y))
if IC == 99.999:
D_critico = 1.95/np.sqrt(len(y))
# Condição para aceitar a hipótese nula do teste KS
if Dobs > D_critico:
rejects = "Reject the Null Hypothesis"
else:
rejects = "Fails to Reject the Null Hypothesis"
# Imprimindo resultados do KS Test
print(" ")
print("KS TEST:")
print("Confidence degree: ", IC,"%")
print(rejects, " of ", distribution)
print("D observed: ", Dobs)
print("D critical: ", D_critico)
print(" ")
# Obtém a estatística do teste KS e arredonda para 5 casas decimais
Dobs = np.around(Dobs, 5)
ks_values.append(Dobs)
#
# CHI-SQUARE
#
# Obter contagens esperadas nos percentis
# Isso se baseia em uma 'função de distribuição acumulada' (cdf)
cdf_fitted = dist.cdf(percentile_cutoffs, *param[:-2], loc=param[-2], scale=param[-1])
# Definindo a frequência esperada
expected_frequency = []
for bin in range(len(percentile_bins)-1):
expected_cdf_area = cdf_fitted[bin+1] - cdf_fitted[bin]
expected_frequency.append(expected_cdf_area)
# Calculando o qui-quadrado
expected_frequency = np.array(expected_frequency) * size
cum_expected_frequency = np.cumsum(expected_frequency)
ss = sum (((cum_expected_frequency - cum_observed_frequency) ** 2) / cum_observed_frequency)
chi_square.append(ss)
# Set x² with IC
IC = IC/100
x2 = chi2.ppf(IC, nbins-1)
# Imprimindo resultados do teste Chi-square
print(" ")
print("Chi-square test: ")
print("Confidence degree: ", IC,"%")
print("CS: ", ss)
print("X²: ", x2)
# Condição para aceitar a hipótese nula do teste Chi-square
if x2 > ss:
print("Fails to Reject the Null Hipothesis of ", distribution)
else:
print("Rejects the Null Hipothesis of ", distribution)
print(" ")
# Agrupar os resultados e classificar por qualidade de ajuste de acordo com o teste KS (melhor na parte superior)
results = pd.DataFrame()
results['Distribution'] = dist_names
results['ks_value'] = ks_values
results['chi_square'] = chi_square
results.sort_values(['ks_value'], inplace=True, ascending=True)
# Apresentar os resultados em uma tabela
print ('\nDistributions sorted by KS Test:')
print ('----------------------------------------')
print (results)
# Divida os dados observados em N posições para plotagem (isso pode ser alterado)
bin_cutoffs = np.linspace(np.percentile(y,0), np.percentile(y,99), nbins)
# Crie o gráfico
h = plt.hist(y, bins = bin_cutoffs, color='0.75')
# Receba as principais distribuições da fase anterior
# e seleciona a quantidade de distribuições.
number_distributions_to_plot = 1
dist_names = results['Distribution'].iloc[0:number_distributions_to_plot]
# Crie uma lista vazia para armazenar parâmetros de distribuição ajustada
parameters = []
# Faça um loop pelas distribuições para obter o ajuste e os parâmetros da linha
for dist_name in dist_names:
# Chamando variáveis globais
global arg_time
global loc_time
global scale_time
global dist_time
global arg_size
global loc_size
global scale_size
global dist_size
# Obtendo distribuições e seus parametros de acordo com o trace
dist = getattr(scipy.stats, dist_name)
param = dist.fit(y)
parameters.append(param)
arg = param[:-2]
loc = param[-2]
scale = param[-1]
print(parameters)
if parameter == "Time":
dist_time = dist_name
loc_time = loc
scale_time = scale
arg_time = arg
if parameter == "Size":
dist_size = dist_name
loc_size = loc
scale_size = scale
arg_size = arg
# Obter linha para cada distribuição (e dimensionar para corresponder aos dados observados)
pdf_fitted = dist.pdf(x, *param[:-2], loc=param[-2], scale=param[-1])
scale_pdf = np.trapz (h[0], h[1][:-1]) / np.trapz (pdf_fitted, x)
pdf_fitted *= scale_pdf
# Adicione a linha ao gráfico
plt.plot(pdf_fitted, label=dist_name)
# Defina o eixo gráfico x para conter 99% dos dados
# Isso pode ser removido, mas, às vezes, dados fora de padrão tornam o gráfico menos claro
plt.xlim(0,np.percentile(y,99))
plt.title("Histogram of trace (" + parameter + ") + theorical distribuition " + dist_name)
# Adicionar legenda
plt.legend()
plt.show()
# Armazenar parâmetros de distribuição em um quadro de dados (isso também pode ser salvo)
dist_parameters = pd.DataFrame()
dist_parameters['Distribution'] = (
results['Distribution'].iloc[0:number_distributions_to_plot])
dist_parameters['Distribution parameters'] = parameters
# Printar os parâmetros
print ('\nDistribution parameters:')
print ('------------------------')
for row in dist_parameters.iterrows():
print ('\nDistribution:', row[0])
print ('Parameters:', row[1] )
# Plotando gráficos de inferência
data = y_std.copy()
# data = y
data.sort()
# Loop through selected distributions (as previously selected)
for distribution in dist_names:
# Set up distribution
dist = getattr(scipy.stats, distribution)
param = dist.fit(y)
#
# KS TEST
#
# Criando percentil
percentile = np.linspace(0,100,len(y))
percentile_cut = np.percentile(y, percentile)
# Criando CDF da teórica
Ft = dist.cdf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Criando CDF Inversa
Ft_ = dist.ppf(percentile_cut, *param[:-2], loc=param[-2], scale=param[-1])
# Adicionando dados do trace
t_Fe = y
# Ordenando dados
t_Fe.sort()
Ft.sort()
Ft_.sort()
# Criando listas para armazenar as ECDFs
Fe = []
Fe_ = []
# Criando ECDFs
for i in range(len(y)):
# ecdf i-1/n
Fe.append((i-1)/len(y))
# ecdf i/n
Fe_.append(i/len(y))
# Transformando listas em np.arrays()
Fe = np.array(Fe)
Fe_ = np.array(Fe_)
Ft = np.array(Ft)
Ft_ = np.array(Ft_)
# Inicio cálculo de rejeição
#
# Ft(t)-FE-(i),FE+(i)-Ft(t)
Ft_Fe_ = np.subtract(Ft, Fe_)
Fe_Ft = np.subtract(Fe, Ft)
# Max(Ft(t)-FE-(i),FE+(i)-Ft(t))
Dobs_max = np.maximum(Ft_Fe_, Fe_Ft)
# Dobs= Max(Max (Ft(t)-FE-(i),FE+(i)-Ft(t)))
Dobs = np.max(Dobs_max)
#
# Fim cálculo de rejeição
# Definir intervalo de confiança
# IC = 99.90 -> alpha = 0.10
# IC = 99.95 -> alpha = 0.05
# IC = 99.975 -> alpha = 0.025
# IC = 99.99 -> alpha = 0.01
# IC = 99.995 -> alpha = 0.005
# IC = 99.999 -> alpha = 0.001
IC = 99.95
# Condição para definir o D_critico de acordo com o tamanho dos dados
if size > 35:
if IC == 99.90:
D_critico = 1.22/np.sqrt(len(y))
if IC == 99.95:
D_critico = 1.36/np.sqrt(len(y))
if IC == 99.975:
D_critico = 1.48/np.sqrt(len(y))
if IC == 99.99:
D_critico = 1.63/np.sqrt(len(y))
if IC == 99.995:
D_critico = 1.73/np.sqrt(len(y))
if IC == 99.999:
D_critico = 1.95/np.sqrt(len(y))
# Condição para aceitar a hipótese nula do teste KS
if Dobs > D_critico:
rejects = "Reject the Null Hypothesis"
else:
rejects = "Fails to Reject the Null Hypothesis"
# Imprimindo resultados do KS Test
print("KS TEST:")
print("Confidence degree: ", IC,"%")
print(rejects, " of ", distribution)
print("D observed: ", Dobs)
print("D critical: ", D_critico)
print(" ")
# Plotando resultados do teste KS
plt.plot(t_Fe, Ft, 'o', label='Teorical Distribution')
plt.plot(t_Fe, Fe, 'o', label='Empirical Distribution')
# plt.plot(t_Fe, Fe, 'o', label='Real Trace')
# plt.plot(Ft, Fe, 'o', label='Syntatic Trace')
# Definindo titulo
plt.title("KS Test of Real Trace with " + distribution + " Distribution (" + parameter + ")")
plt.legend()
plt.show()
global first_tcdf_time
global first_tcdf_size
if parameter == "Size":
first_tcdf_size = 1
if parameter == "Time":
first_tcdf_time = 1
# Função de geração de variáveis aleatórias por meio da TCDF
def tcdf_generate(dist, loc, scale, arg, parameter):
# Setar distribuição escolhida.
dist_name = getattr(scipy.stats, dist)
# Gerar número aleatório de acordo com a distribuição escolhida e seus parametros.
r_N = dist_name.rvs(loc=loc, scale=scale, *arg)
# Condição para retorno do valor de acordo com o parametro de rede.
if parameter == "Size":
# print("SIZE R_N:", r_N)
return(int(abs(r_N)))
if parameter == "Time":
# print("TIME R_N:", r_N)
return(float(abs(r_N)))
# Função de geração de variáveis aleatórias de acordo com distribuições
# de probabilidade e parametros definidos
def wgwnet_PD(parameter):
# Mais distribuições podem ser encontradas no site da lib "scipy"
# Veja https://docs.scipy.org/doc/scipy/reference/stats.html para mais detalhes
if parameter == "Size":
# Selecionando distribuição de probabilidade para o parametro Size
dist_name = 'uniform'
# Definindo parametros da distribuição
loc = 500
scale = 500
arg = []
# Setando distribuição a escolhida e seus parametros
dist = getattr(scipy.stats, dist_name)
# Gerando número aleatório de acordo com a distribuiução e os parametros definidos
r_N = dist.rvs(loc=loc, scale=scale, *arg, size=1)
print("Size: ", r_N)
return(int(r_N))
if parameter == "Time":
# Selecionando distribuição de probabilidade para o parametro Size
dist_name = 'uniform'
# Definindo parametros da distribuição
loc = 0.5
scale = 0.8
arg = []
# Setando distribuição a escolhida e seus parametros
dist = getattr(scipy.stats, dist_name)
# Gerando número aleatório de acordo com a distribuiução e os parametros definidos
r_N = dist.rvs(loc=loc, scale=scale, *arg, size=1)
return(float(r_N))
# Classe de criação da aplicação do NS3
class MyApp(ns3.Application):
# Criando variáveis auxiliares
tid = ns3.TypeId("MyApp")
tid.SetParent(ns3.Application.GetTypeId())
m_socket = m_packetSize = m_nPackets = m_dataRate = m_packetsSent = 0
m_peer = m_sendEvent = None
m_running = False
count_Setup = count_Start = count_Stop = count_SendPacket = count_ScheduleTx = count_GetSendPacket = count_GetTypeId = 0
# Inicializador da simulação
def __init__(self):
super(MyApp, self).__init__()
# def Setup(self, socket, address, packetSize, nPackets, dataRate):
# Função de configuração da aplicação
def Setup(self, socket, address, nPackets):
self.count_Setup = self.count_Setup + 1
self.m_socket = socket
self.m_peer = address
# self.m_packetSize = packetSize
self.m_nPackets = nPackets
# self.m_dataRate = dataRate
# Função de inicialização da aplicação
def StartApplication(self):
self.count_Start = self.count_Start + 1
if self.m_nPackets > 0 and self.m_nPackets > self.m_packetsSent:
self.m_running = True
self.m_packetsSent = 0
self.m_socket.Bind()
self.m_socket.Connect(self.m_peer)
self.SendPacket()
else:
self.StopApplication()
# Função de parada da aplicação
def StopApplication(self):
self.count_Stop = self.count_Stop + 1
self.m_running = False
if self.m_sendEvent != None and self.m_sendEvent.IsRunning() == True:
ns3.Simulator.Cancel(self.m_sendEvent)
if self.m_socket:
self.m_socket.Close()
# Função de envio de pacotes
def SendPacket(self):
# Contabiliza a quantidade de pacotes enviados
self.count_SendPacket = self.count_SendPacket + 1
# Chamando variáveis globais
# Método de Geração de RN
global mt_RG
# Metodo de geração de RN por trace
global tr_RG
# Vetor com dados do parametro de tamanho dos pacotes obtidos do trace
global t_size
global parameter
global arg_size
global scale_size
global loc_size
global dist_size
global first_tcdf_size
global first_trace_size
global reader
parameter = "Size"
# Condição de escolha do método de geração de variáveis aleatórias
# diretamente por uma distribuição de probabiidade
if mt_RG == "PD":
# Chamando a função wgwnet_PD() e retornando valor gerado para uma variável auxiliar
aux_packet = wgwnet_PD(parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Condição de escolha do método de geração de variáveis aleatórias
# baseado nos dados do trace
if mt_RG == "Trace":
if first_trace_size == 0:
# Definindo o método de leitura do arquivo trace
if reader == "txt":
read_txt(parameter)
if reader == "xml":
read_xml(parameter)
# Condição de escolha do método por distribuições teórica equivalentes aos dados do trace
if tr_RG == "tcdf":
# Condição de chamada única da função tcdf()
if first_tcdf_size == 0:
# Chamando a função tcdf para definir a distribuição de probabilidade compatível ao trace e
# seus respectivos parametros para geração de números aleatórios
tcdf(t_size, parameter)
# Chamando a função tcdf_generate e retornando valor gerado para uma variável auxiliar
aux_packet = tcdf_generate(dist_size, loc_size, scale_size, arg_size, parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Condição de escolha do método pela distribuição empírica dos dados do trace
if tr_RG == "ecdf":
# Chamando a função ecdf e retornando valor gerado para uma variável auxiliar
aux_packet = ecdf(t_size, parameter)
# Transformando a variávei auxiliar em um metadado de pacote
packet = ns3.Packet(aux_packet)
# Imprimindo o tempo de envio do pacote e a quantidade de pacotes enviados
print ("SendPacket(): ", str(ns3.Simulator.Now().GetSeconds()), "s,\t send ", str(self.m_packetsSent), " Size ", packet.GetSize(), "#")
# Configurando o socket da rede para enviar o pacote
self.m_socket.Send(packet, 0)
# Incrementando a quantidade de pacotes enviados
self.m_packetsSent = self.m_packetsSent + 1
# Condição de parada da aplicação pela quantidade máxima de pacotes
if self.m_packetsSent < self.m_nPackets:
self.ScheduleTx()
else:
self.StopApplication()
# Função que prepara os eventos de envio de pacotes
def ScheduleTx(self):
# Contabiliza a quantidade eventos que ocorrem na simulação
self.count_ScheduleTx = self.count_ScheduleTx + 1
# Condição que define se a aplicação ainda terá eventos
if self.m_running:
# Chamando variáveis globais
# Auxiliar de tempo
global aux_global_time
# Método de Geração de RN
global mt_RG
# Metodo de geração de RN por trace
global tr_RG
# Vetor com dados do parametro de tamanho dos pacotes obtidos do trace
global t_time
global parameter
global arg_time
global scale_time
global loc_time
global dist_time
global first_tcdf_time
global first_trace_time
global reader
parameter = "Time"
# Condição de escolha do método de geração de variáveis aleatórias
# diretamente por uma distribuição de probabiidade
if mt_RG == "PD":
# Chamando a função wgwnet_PD() e retornando valor gerado para uma variável auxiliar
aux_global_time = wgwnet_PD(parameter)
# Condição de escolha do método de geração de variáveis aleatórias
# baseado nos dados do trace
if mt_RG == "Trace":
# Definindo o método de leitura do arquivo trace
if first_trace_time == 0:
if reader == "txt":
read_txt(parameter)
if reader == "xml":
read_xml(parameter)
# Condição de escolha do método por distribuições teórica equivalentes aos dados do trace
if tr_RG == "tcdf":
# Condição de chamada única da função tcdf()
if first_tcdf_time == 0:
# Chamando a função tcdf para definir a distribuição de probabilidade compatível ao trace e
# seus respectivos parametros para geração de números aleatórios
tcdf(t_time, parameter)
# Chamando a função tcdf_generate e retornando valor gerado para uma variável auxiliar
aux_global_time = tcdf_generate(dist_time, loc_time, scale_time, arg_time, parameter)
# Condição de escolha do método pela distribuição empírica dos dados do trace
if tr_RG == "ecdf":
# Chamando a função ecdf e retornando valor gerado para uma variável auxiliar
aux_global_time = ecdf(t_time, parameter)
# Transformando a variávei auxiliar em um metadado de tempo
tNext = ns3.Seconds(aux_global_time)
# dataRate = "1Mbps"
# packetSize = 1024
# tNext = ns3.Seconds(packetSize * 8.0 / ns3.DataRate(dataRate).GetBitRate())
# print("tNEXT: ", tNext)
# Criando evento de envio de pacote
self.m_sendEvent = ns3.Simulator.Schedule(tNext, MyApp.SendPacket, self)
def GetSendPacket(self):
self.count_GetSendPacket = self.count_GetSendPacket + 1
return self.m_packetsSent
def GetTypeId(self):
self.count_GetTypeId = self.count_GetTypeId + 1
return self.tid
# Função de definição da janela de congestionamento
def CwndChange(app):
# CwndChange():
# n = app.GetSendPacket()
# print ('CwndChange(): ' + str(ns3.Simulator.Now().GetSeconds()) + 's, \t sum(send packets) = ' + str(n))
ns3.Simulator.Schedule(ns3.Seconds(1), CwndChange, app)
# def ChangeRate(self, ns3.DataRate newrate):
# newrate = "1Mbps"
# self.m_dataRate = newrate
# def IncRate(self, app):
# app.ChangeRate(self.m_dataRate)
# Função de impressão dos resultados da simulação do NS3
def print_stats(os, st):
# os = open("stats.txt", "w")
print (os, " Duration: ", (st.timeLastRxPacket.GetSeconds()-st.timeFirstTxPacket.GetSeconds()))
print (os, " Last Packet Time: ", st.timeLastRxPacket.GetSeconds(), " Seconds")
print (os, " Tx Bytes: ", st.txBytes)
print (os, " Rx Bytes: ", st.rxBytes)
print (os, " Tx Packets: ", st.txPackets)
print (os, " Rx Packets: ", st.rxPackets)
print (os, " Lost Packets: ", st.lostPackets)
if st.rxPackets > 0:
print (os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets))
print (os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets)))
print (os, " Throughput ", (st.rxBytes * 8.0 / (st.timeLastRxPacket.GetSeconds()-st.timeFirstTxPacket.GetSeconds())/1024/1024), "MB/S")
print (os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1)
# std::cout<<"Duration : "<<()<<std::endl;
# std::cout<<"Last Received Packet : "<< stats->second.timeLastRxPacket.GetSeconds()<<" Seconds"<<std::endl;
# std::cout<<"Throughput: " << stats->second.rxBytes * 8.0 / (stats->second.timeLastRxPacket.GetSeconds()-stats->second.timeFirstTxPacket.GetSeconds())/1024/1024 << " Mbps"<<std::endl;
if st.rxPackets == 0:
print (os, "Delay Histogram")
for i in range(st.delayHistogram.GetNBins()):
print (os, " ", i, "(", st.delayHistogram.GetBinStart(i), "-", st.delayHistogram.GetBinEnd(i), "): ", st.delayHistogram.GetBinCount(i))
print (os, "Jitter Histogram")
for i in range(st.jitterHistogram.GetNBins()):
print (os, " ", i, "(", st.jitterHistogram.GetBinStart(i), "-", st.jitterHistogram.GetBinEnd(i), "): ", st.jitterHistogram.GetBinCount(i))
print (os, "PacketSize Histogram")
for i in range(st.packetSizeHistogram.GetNBins()):
print (os, " ", i, "(", st.packetSizeHistogram.GetBinStart(i), "-", st.packetSizeHistogram.GetBinEnd(i), "): ", st.packetSizeHistogram.GetBinCount(i))
for reason, drops in enumerate(st.packetsDropped):
print (" Packets dropped by reason ", reason ,": ", drops)
# for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
# Função de comparação dos resultados obtidos com o NS3 com os dados dos traces
# Esta função é utilizada apenas quando o método de geração variáveis aleatórias selecionado é por "Trace"
def compare(app_protocol):
compare = ""
# Chamando variáveis globais
global t_time
global t_size
# global time_ns3
# global size_ns3
if app_protocol == "tcp":
############################# SIZE #############################
# Abrindo arquivos .txt
rd_size_ns3 = np.loadtxt("scratch/tcp_size.txt", usecols=0)
rd_tsval_ns3 = np.loadtxt("scratch/tcp_tsval.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
# plt.hist(size_ns3)
# plt.title("Histogram of trace (size) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# size_ns3_df = pd.DataFrame(size_ns3, columns=['TSVAL','Size'])
size_ns3_df = pd.DataFrame(list(zip(rd_tsval_ns3,rd_size_ns3)), columns=['TSVAL','Size'])
size_ns3_df = size_ns3_df[size_ns3_df.Size != 0]
size_ns3_df = size_ns3_df.groupby("TSVAL").sum()
size_ns3_df["Size"] = pd.to_numeric(size_ns3_df["Size"])
# print(size_ns3_df)
# print(size_ns3_df.describe())
size_ns3 = np.array(size_ns3_df['Size'])
# print(size_ns3)
############################# END SIZE #############################
############################# TIME #############################
# Abrindo arquivos .txt
rd_time_ns3 = np.loadtxt("scratch/tcp_time.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
# plt.hist(time_ns3)
# plt.title("Histogram of trace (time) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
time_ns3_df = pd.DataFrame(rd_time_ns3, columns=['Time'])
time_ns3_df["Time"] = pd.to_numeric(time_ns3_df["Time"])
# print(time_ns3_df)
# print(time_ns3_df.describe())
# Métodos de comparação dos traces
# Opções: "qq_e_pp", "Graphical" ou "KS"
time_ns3 = np.array(time_ns3_df['Time'])
# print(time_ns3)
############################# END TIME #############################
if app_protocol == "udp":
############################# SIZE #############################
# Abrindo arquivos .txt
rd_size_ns3 = np.loadtxt("scratch/udp_size.txt", usecols=0)
# rd_tsval_ns3 = np.loadtxt("scratch/tcp_tsval.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
# plt.hist(size_ns3)
# plt.title("Histogram of trace (size) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
# size_ns3_df = pd.DataFrame(size_ns3, columns=['TSVAL','Size'])
# size_ns3_df = pd.DataFrame(list(zip(rd_tsval_ns3,rd_size_ns3)), columns=['TSVAL','Size'])
size_ns3_df = pd.DataFrame(rd_size_ns3, columns=['Size'])
size_ns3_df["Size"] = pd.to_numeric(size_ns3_df["Size"])
# print(size_ns3_df)
# print(size_ns3_df.describe())
size_ns3 = np.array(size_ns3_df['Size'])
# print(size_ns3)
############################# END SIZE #############################
############################# TIME #############################
# Abrindo arquivos .txt
rd_time_ns3 = np.loadtxt("scratch/udp_time.txt", usecols=0)
# print("Trace Size: ", t_size)
# Plot histograma de t_size:
# plt.hist(time_ns3)
# plt.title("Histogram of trace (time) in NS3")
# plt.show()
# Com ajuda da lib Pandas podemos encontrar algumas estatísticas importantes.
time_ns3_df = pd.DataFrame(rd_time_ns3, columns=['Time'])
time_ns3_df["Time"] = pd.to_numeric(time_ns3_df["Time"])
# print(time_ns3_df)
# print(time_ns3_df.describe())
time_ns3 = np.array(time_ns3_df['Time'])
# print(time_ns3)
############################# END TIME #############################
# Métodos de comparação dos traces
# Opções: "qq_e_pp", "Graphical" ou "KS"
# compare = "qq_e_pp"
if compare == "qq_e_pp":
#
# qq and pp plots
#
# Dados do Traces:
# Time
sc_time = StandardScaler()
# Tornando dados do vetor np.array()
t_time = np.array(t_time)
# Normalizando valores
yy_time = t_time.reshape (-1,1)
sc_time.fit(yy_time)
y_std_time = sc_time.transform(yy_time)
y_std_time = y_std_time.flatten()
data_time = y_std_time.copy()
data_time.sort()
# Size
sc_size = StandardScaler()
# Tornando dados do vetor np.array()
t_size = np.array(t_size)
# Normalizando valores
yy_size = t_size.reshape (-1,1)
sc_size.fit(yy_size)
y_std_size = sc_size.transform(yy_size)
y_std_size = y_std_size.flatten()
data_size = y_std_size.copy()
data_size.sort()
# Dados gerados no NS3:
# Time
sc_time_ns3 = StandardScaler()
time_ns3 = np.array(time_ns3)
yy_time_ns3 = time_ns3.reshape (-1,1)
sc_time_ns3.fit(yy_time_ns3)
y_std_time_ns3 = sc_time_ns3.transform(yy_time_ns3)
y_std_time_ns3 = y_std_time_ns3.flatten()
data_time_ns3 = y_std_time_ns3.copy()
data_time_ns3.sort()
# Size
sc_size_ns3 = StandardScaler()
size_ns3 = np.array(size_ns3)
yy_size_ns3 = size_ns3.reshape (-1,1)
sc_size_ns3.fit(yy_size_ns3)
y_std_size_ns3 = sc_size_ns3.transform(yy_size_ns3)
y_std_size_ns3 = y_std_size_ns3.flatten()
data_size_ns3 = y_std_size_ns3.copy()
data_size_ns3.sort()
#
# SIZE
#
# Definindo o parametro da rede a ser comparado
parameter = "Size"
distribution = 'real trace of '+ parameter
# Adicionando valores gerados pelo NS3
x = size_ns3
# x = data_size_ns3
# Adicionando valores do trace
y = t_size
# y = data_size
# Ordenando dados
x.sort()
y.sort()
# Tornando vetores do mesmo tamanho
if len(x) > len(y):
x = x[0:len(y)]
if len(x) < len(y):
y = y[0:len(x)]
# Criando variável com tamanho dos dados
S_size = len(x)
# Criando variável com o número de bins (classes)
S_nbins = int(np.sqrt(S_size))
# Criando figura
fig = plt.figure(figsize=(8,5))
# Adicionando subplot com método "qq plot"
ax1 = fig.add_subplot(121) # Grid of 2x2, this is suplot 1
# Plotando dados comparados
ax1.plot(x,y,"o")
# Definindo valor máximo e mínimo dos dados
min_value = np.floor(min(min(x),min(y)))
max_value = np.ceil(max(max(x),max(y)))
# Plotando linha qua segue do minimo ao máximo
ax1.plot([min_value,max_value],[min_value,max_value],'r--')
# Setando limite dos dados dentro do valor máximo e mínimo
ax1.set_xlim(min_value,max_value)
# Definindo os títulos dos eixos x e y
ax1.set_xlabel('Real Trace quantiles')
ax1.set_ylabel('Observed quantiles in NS3')
# Definindo o título do gráfico
title = 'qq plot for ' + distribution +' distribution'
ax1.set_title(title)
# Adicionando subplot com método "pp plot"
ax2 = fig.add_subplot(122)
# Calculate cumulative distributions
# Criando classes dos dados por percentis
S_bins = np.percentile(x,range(0,100))
# Obtendo conunts e o número de classes de um histograma dos dados
y_counts, S_bins = np.histogram(y, S_bins)
x_counts, S_bins = np.histogram(x, S_bins)
# print("y_COUNTS: ",y_counts)
# print("x_Counts: ",x_counts)
# print("y_Counts: ",y_counts)
# Gerando somatória acumulada dos dados
cum_y = | np.cumsum(y_counts) | numpy.cumsum |
from .mcmcposteriorsamplernorm import fit
from scipy.stats import norm
import pandas as pd
import numpy as np
import pickle as pk
from sklearn.cluster import KMeans
from ..shared_functions import *
class mcmcsamplernorm:
"""
Class for the mcmc sampler of the deconvolution gaussian model
"""
def __init__(self, K=1, Kc=1):
"""
Constructor of the class
Parameters
-------------
K: int, Number of components of the noise distribution
Kc: int, Number of components of the convolved distribution
**kwargs:
alpha: float, parameter to determine the hyperprior of the noise weight components
alphac: float, parameter to determine the hyperprior of the target weight components
"""
self.K = K
self.Kc = Kc
self.fitted = False
return
def fit(self, dataNoise, dataConvolution, iterations = 1000, ignored_iterations = 1000, chains = 1, priors = None, method_initialisation = "kmeans", initial_conditions = [], show_progress = True, seed = 0):
"""
Fit the model to the posterior distribution
Parameters
-------------
dataNoise: list/npArray, 1D array witht he data of the noise
dataConvolution: list/npArray, 1D array witht he data of the convolution
iterations: int, number of samples to be drawn and stored for each chain during the sampling
ignored_iterations: int, number of samples to be drawn and ignored for each chain during the sampling
chains: int, number of independently initialised realisations of the markov chain
priors: array, parameter of the priors gamma distribution acording to the definition of the wikipedia
kconst: float, parameter k of the prior gamma distribution
initialConditions: list, 1D array with all the parameters required to initialise manually all the components of all the chains the chains
show_progress: bool, indicate if the method should show the progress in the generation of the new data
seed: int, value to initialise the random generator and obtain reproducible results
Returns
---------------
Nothing
"""
self.data = dataNoise
self.datac = dataConvolution
self.iterations = iterations
self.ignored_iterations = ignored_iterations
self.chains = chains
if priors == None:
self.priors = np.zeros(10)
self.priors[0] = 1/self.K
self.priors[1] = (np.max(dataNoise)+np.min(dataNoise))/2
self.priors[2] = 3*(np.max(dataNoise)-np.min(dataNoise))
self.priors[3] = 10*(np.max(dataNoise)-np.min(dataNoise))
self.priors[4] = 1.1
self.priors[5] = 1/self.Kc
self.priors[6] = (np.max(dataConvolution)+np.min(dataConvolution))/2
self.priors[7] = 3*(np.max(dataConvolution)-np.min(dataConvolution))
self.priors[8] = 10*(np.max(dataConvolution)-np.min(dataConvolution))
self.priors[9] = 1.1
else:
self.priors = priors
if initial_conditions != []:
self.initial_conditions = initial_conditions
elif method_initialisation == "kmeans":
K =self.K
Kc = self.Kc
y = np.zeros([chains,(K+Kc)*3])
model = KMeans(n_clusters=K)
model.fit(dataNoise.reshape(-1,1))
ids = model.predict(dataNoise.reshape(-1,1))
#Add weights autofluorescence
for i in range(K):
for j in range(chains):
y[j,i] = | np.sum(ids==i) | numpy.sum |
import numpy
import sys
import math
import logic
from scipy.integrate import odeint
import scipy.optimize as optim
import NNEX_DEEP_NETWORK as NNEX
import NNEX_DEEP_NETWORKY as NNEXY
#import NNEX
def DISCON(avrSWAP_py, from_SC_py, to_SC_py):
if logic.counter == 0:
import globalDISCON
import OBSERVER
import yawerrmeas
logic.counter = logic.counter + 1
elif logic.counter == 1:
import globalDISCON1 as globalDISCON
import OBSERVER1 as OBSERVER
import yawerrmeas1 as yawerrmeas
logic.counter = logic.counter + 1
elif logic.counter == 2:
import globalDISCON2 as globalDISCON
import OBSERVER2 as OBSERVER
import yawerrmeas2 as yawerrmeas
logic.counter = 0
#print("SIAMO ENTRATI IN DISCON.py")
#print("from_SC_py in DISCON.py: ", from_SC_py)
#print(avrSWAP_py[95], avrSWAP_py[26])
VS_RtGnSp = 121.6805
VS_SlPc = 10.00
VS_Rgn2K = 2.332287
VS_Rgn2Sp = 91.21091
VS_CtInSp = 70.16224
VS_RtPwr = 5296610.0
CornerFreq = 1.570796 #1.570796
PC_MaxPit = 1.570796 # ERA 1.570796 rad
PC_DT = 0.000125
VS_DT = 0.000125
OnePlusEps = 1 + sys.float_info.epsilon
VS_MaxTq = 47402.91
BlPitch = numpy.zeros(3)
PitRate = numpy.zeros(3)
VS_Rgn3MP = 0.01745329
PC_KK = 0.1099965
PC_KI = 0.008068634
PC_KP = 0.01882681
PC_RefSpd = 122.9096
VS_MaxRat = 15000.0
PC_MaxRat = 0.1396263 #0.1396263
YawSpr = 9.02832e9
YawDamp = 1.916e7
YawIn = 2.60789e6
kdYaw = 1e7
kpYaw = 5e7
kiYaw = 1e9
tauF = (1/3) * ((2 * numpy.pi) / 1.2671)
Ts = 0.005
iStatus = int(round(avrSWAP_py[0]))
NumBl = int(round(avrSWAP_py[60]))
PC_MinPit = 0.0
#print("PC_MinPit in DISCON.py: ", PC_MinPit)
#print("NumBl in DISCON.py: ", NumBl)
#print("OnePLUSEps ", OnePlusEps)
BlPitch[0] = min( max( avrSWAP_py[3], PC_MinPit ), PC_MaxPit )
BlPitch[1] = min( max( avrSWAP_py[32], PC_MinPit ), PC_MaxPit )
BlPitch[2] = min( max( avrSWAP_py[33], PC_MinPit ), PC_MaxPit )
GenSpeed = avrSWAP_py[19]
HorWindV = avrSWAP_py[26]
Time = avrSWAP_py[1]
aviFAIL_py = 0
if iStatus == 0:
globalDISCON.VS_SySp = VS_RtGnSp/( 1.0 + 0.01*VS_SlPc )
globalDISCON.VS_Slope15 = ( VS_Rgn2K*VS_Rgn2Sp*VS_Rgn2Sp )/( VS_Rgn2Sp - VS_CtInSp )
globalDISCON.VS_Slope25 = ( VS_RtPwr/VS_RtGnSp )/( VS_RtGnSp - globalDISCON.VS_SySp )
if VS_Rgn2K == 0:
globalDISCON.VS_TrGnSp = globalDISCON.VS_SySp
else:
globalDISCON.VS_TrGnSp = ( globalDISCON.VS_Slope25 - math.sqrt(globalDISCON.VS_Slope25*( globalDISCON.VS_Slope25 - 4.0*VS_Rgn2K*globalDISCON.VS_SySp ) ) )/( 2.0*VS_Rgn2K )
globalDISCON.GenSpeedF = GenSpeed
globalDISCON.PitCom = BlPitch
#print("PitCom: ", globalDISCON.PitCom)
#print("BlPitch: ", BlPitch)
GK = 1.0/( 1.0 + globalDISCON.PitCom[0]/PC_KK )
globalDISCON.IntSpdErr = globalDISCON.PitCom[0]/( GK*PC_KI )
globalDISCON.LastTime = Time
globalDISCON.LastTimePC = Time - PC_DT
globalDISCON.LastTimeVS = Time - VS_DT
print("0")
if iStatus >= 0 and aviFAIL_py >= 0:
avrSWAP_py[35] = 0.0
avrSWAP_py[40] = 0.0
avrSWAP_py[45] = 0.0
avrSWAP_py[47] = 0.0
avrSWAP_py[64] = 0.0
avrSWAP_py[71] = 0.0
avrSWAP_py[78] = 0.0
avrSWAP_py[79] = 0.0
avrSWAP_py[80] = 0.0
Alpha = math.exp( ( globalDISCON.LastTime - Time )*CornerFreq )
globalDISCON.GenSpeedF = ( 1.0 - Alpha )*GenSpeed + Alpha*globalDISCON.GenSpeedF
ElapTime = Time - globalDISCON.LastTimeVS
print("1 ", ElapTime)
print("globalDISCON.LastTimeVS: ", globalDISCON.LastTimeVS)
print("Time*OnePlusEps - globalDISCON.LastTimeVS: ", Time*OnePlusEps - globalDISCON.LastTimeVS)
if ( Time*OnePlusEps - globalDISCON.LastTimeVS ) >= VS_DT:
print("GenSPeedF: ", globalDISCON.GenSpeedF)
print("PitCom: ", globalDISCON.PitCom[0])
if globalDISCON.GenSpeedF >= VS_RtGnSp or globalDISCON.PitCom[0] >= VS_Rgn3MP:
GenTrq = VS_RtPwr/globalDISCON.GenSpeedF
print("A")
print("GenTrq: ", GenTrq)
elif globalDISCON.GenSpeedF <= VS_CtInSp:
GenTrq = 0.0
print("B")
elif globalDISCON.GenSpeedF < VS_Rgn2Sp:
GenTrq = globalDISCON.VS_Slope15*( globalDISCON.GenSpeedF - VS_CtInSp )
print("C")
elif globalDISCON.GenSpeedF < globalDISCON.VS_TrGnSp:
GenTrq = VS_Rgn2K*globalDISCON.GenSpeedF*globalDISCON.GenSpeedF
print("D")
else:
GenTrq = globalDISCON.VS_Slope25*( globalDISCON.GenSpeedF - globalDISCON.VS_SySp )
print("E")
GenTrq = min(GenTrq, VS_MaxTq)
print("2: ", GenTrq)
if iStatus == 0:
globalDISCON.LastGenTrq = GenTrq
TrqRate = ( GenTrq - globalDISCON.LastGenTrq )/ElapTime
TrqRate = min( max( TrqRate, -VS_MaxRat ), VS_MaxRat )
GenTrq = globalDISCON.LastGenTrq + TrqRate*ElapTime
globalDISCON.LastTimeVS = Time
globalDISCON.LastGenTrq = GenTrq
print("3")
avrSWAP_py[34] = 1.0
avrSWAP_py[55] = 0.0
avrSWAP_py[46] = globalDISCON.LastGenTrq
print("Time ", Time)
ElapTime = Time - globalDISCON.LastTimePC
print("ELAP Time ", ElapTime)
print("LASTTIMEPC Time ", globalDISCON.LastTimePC)
if ( Time*OnePlusEps - globalDISCON.LastTimePC ) >= PC_DT:
GK = 1.0/( 1.0 + globalDISCON.PitCom[0]/PC_KK )
SpdErr = globalDISCON.GenSpeedF - PC_RefSpd
globalDISCON.IntSpdErr = globalDISCON.IntSpdErr + SpdErr*ElapTime
globalDISCON.IntSpdErr = min( max( globalDISCON.IntSpdErr, PC_MinPit/( GK*PC_KI ) ), PC_MaxPit/( GK*PC_KI ) )
PitComP = GK*PC_KP* SpdErr
PitComI = GK*PC_KI*globalDISCON.IntSpdErr
PitComT = PitComP + PitComI
PitComT = min( max( PitComT, PC_MinPit ), PC_MaxPit )
for i in range(NumBl):
PitRate[i] = ( PitComT - BlPitch[i] )/ElapTime
PitRate[i] = min( max( PitRate[i], -PC_MaxRat ), PC_MaxRat )
globalDISCON.PitCom[i] = BlPitch[i] + PitRate[i]*ElapTime
globalDISCON.PitCom[i] = min( max( globalDISCON.PitCom[i], PC_MinPit ), PC_MaxPit )
globalDISCON.LastTimePC = Time
print("4")
#print("PitCom: ", globalDISCON.PitCom)
avrSWAP_py[54] = 0.0
avrSWAP_py[41] = globalDISCON.PitCom[0]
avrSWAP_py[42] = globalDISCON.PitCom[1]
avrSWAP_py[43] = globalDISCON.PitCom[2]
avrSWAP_py[44] = globalDISCON.PitCom[0]
# COMMANDING YAW RATE
globalDISCON.YawAngleGA = from_SC_py
#if Time > 70.0:
if logic.counter < 4:
if Time > 40.0 and Time < 55.0:
avrSWAP_py[28] = 1 # --> YAW CONTROL 0 = SPEED CONTROL, 1 = TORQUE CONTROL
# SETTING POSITION TO BE REACHED AT 0.1 rad --> PI CONTROLLER ( I is INTEGRAL of 0.1rad in time)
# avrSwap_py[23] --> YawRate Good for PID -- Derivative term
if not numpy.isclose(abs(avrSWAP_py[36]), 0.174533) and globalDISCON.flagyaw == False:
#if (not numpy.isclose(avrSWAP_py[36], globalDISCON.PosYawRef)) and (not numpy.isclose(avrSWAP_py[23], 0.0)) and globalDISCON.flag_yaw == False:
#globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
#globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
#avrSWAP_py[47] = kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
if abs(globalDISCON.PosYawRef) < 0.174533:
globalDISCON.VelYawRef = 0.0349066/3
globalDISCON.PosYawRef = globalDISCON.PosYawRef + globalDISCON.VelYawRef*ElapTime
else:
if Time > 54.0:
globalDISCON.flagyaw = True
globalDISCON.VelYawRef = 0.0
avrSWAP_py[47] = kiYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kpYaw * (globalDISCON.VelYawRef - avrSWAP_py[23]) - YawDamp * avrSWAP_py[23]
else: # HERE I CONSIDER PERTURBATIONS ABOUT THE NEW WORKING POSITION
#globalDISCON.flagyaw = True
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
else:
avrSWAP_py[28] = 1 # --> YAW CONTROL 0 = SPEED CONTROL, 1 = TORQUE CONTROL
# SETTING POSITION TO BE REACHED AT 0.1 rad --> PI CONTROLLER ( I is INTEGRAL of 0.1rad in time)
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
# avrSwap_py[23] --> YawRate Good for PID -- Derivative term
if globalDISCON.counterY >= 2.0:
avrSWAP_py[28] = 1
if not numpy.isclose(abs(avrSWAP_py[36]), abs(globalDISCON.PosYawRef - globalDISCON.PosFin)) and globalDISCON.flagyaw == False:
#if (not numpy.isclose(avrSWAP_py[36], globalDISCON.PosYawRef)) and (not numpy.isclose(avrSWAP_py[23], 0.0)) and globalDISCON.flag_yaw == False:
#globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
#globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
#avrSWAP_py[47] = kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
#if numpy.sign(globalDISCON.PosFin - globalDISCON.PosYawRef) == globalDISCON.signold:
if abs(globalDISCON.PosYawRef - globalDISCON.PosFin) > 0.004:
globalDISCON.VelYawRef = globalDISCON.signold * 0.0349066/3
globalDISCON.PosYawRef = globalDISCON.PosYawRef + globalDISCON.VelYawRef*ElapTime
else:
#if Time > 72.0:
globalDISCON.flagyaw = True
globalDISCON.VelYawRef = 0.0
avrSWAP_py[47] = kiYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kpYaw * (globalDISCON.VelYawRef - avrSWAP_py[23]) - YawDamp * avrSWAP_py[23]
else: # HERE I CONSIDER PERTURBATIONS ABOUT THE NEW WORKING POSITION
#globalDISCON.flagyaw = True
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
#globalDISCON.signold = numpy.sign(globalDISCON.PosFin - globalDISCON.PosYawRef)
print("TOTAL TORQUE TERM PASSED TO SERVODYN FOR YAW CONTROL ----> ", avrSWAP_py[47])
'''if Time > 70.0 and Time < 85.0:
avrSWAP_py[47] = 0.0349066/3
else:
avrSWAP_py[47] = 0.0'''
else:
avrSWAP_py[28] = 0
#else:
# avrSWAP_py[28] = 0
'''avrSWAP_py[28] = 0 # DOPO LEVALO
avrSWAP_py[47] = 0.0'''
# END OF COMMANDED YAW RATE ON TURBINE 1
#YAW LOGIC BLOCK
globalDISCON.LastTime = Time
print("globalDISCON.LastTime: ", globalDISCON.LastTime)
# INPUTS FOR SUPERCONTROLLER
to_SC_py = avrSWAP_py[14] # MEASURED POWER OUTPUT
avrSWAP_py = numpy.append(avrSWAP_py,to_SC_py)
to_SC_py = avrSWAP_py[36] # ACTUAL YAW ANGLE
avrSWAP_py = numpy.append(avrSWAP_py,to_SC_py)
# END OF SECTION
# WIND SPEED OBSERVER SECTION
file = open("Bl1outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[29], avrSWAP_py[68], Time))
file.close()
file = open("Bl2outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[30], avrSWAP_py[69], Time))
file.close()
file = open("Bl3outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[31], avrSWAP_py[70], Time))
file.close()
#file = open("Azimuth.txt","a+")
#file.write("%f, %f, %f, %f \n" % (avrSWAP_py[59], avrSWAP_py[20], avrSWAP_py[26], Time))
#file.close()
#if from_SC_py == 0:
tmp = float(OBSERVER.tmp) #POSG
acc = float(OBSERVER.acc) #POSR
OBSERVER.y = avrSWAP_py[19]
#print("tmp: ", OBSERVER.tmp)
#print("acc: ", OBSERVER.acc)
#print("y: ", OBSERVER.y)
OBSERVER.Qg = avrSWAP_py[22]
#print("Qg: ", avrSWAP_py[22])
if numpy.isclose(Time, 0.0):
x0 = numpy.array([1.5, 120, 0, 0])
xsol = numpy.array([1.5, 120, 0, 0])
OBSERVER.xsol = xsol
xppsolin = numpy.array([0, 0, 1.5, 120])
#print(xsol)
Qasol = OBSERVER.Qacalc(xppsolin, xsol, float(OBSERVER.y), float(OBSERVER.tmp))
error = 0.0
errorposg = 0.0
errorposr = 0.0
errorwr = 0.0
errorwg = 0.0
pitch_obs = (avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*numpy.pi)
if pitch_obs > 17.9:
pitch_obs = 17.9
elif pitch_obs < -10:
pitch_obs = -10
num = (2*Qasol)/(numpy.pi*OBSERVER.rho*(xsol[0]**2)*(OBSERVER.R**5))
tsr_obs = optim.fsolve(OBSERVER.func_impl, 4.5, args=(num, pitch_obs))
vento_obs = xsol[0]*OBSERVER.R/tsr_obs
file = open("EXSOL.txt","a+")
file.write("%f, %f, %f, %f, %f \n" % (xsol[0], xsol[1], xsol[2], xsol[3], Time))
file.close()
file = open("Azimuth.txt","a+")
file.write("%f, %f, %f, %f \n" % (xsol[2], xsol[0], vento_obs, Time))
file.close()
else:
x0 = OBSERVER.xsol
if numpy.isclose(ElapTime, 0.0):
ElapTime = 0.005
#print(OBSERVER.xsolold)
#input("ELAP TIME = 0.0 PROBLEM")
ts = numpy.linspace(Time - ElapTime, Time, 2)
xsol = odeint(OBSERVER.dx_dt, x0, ts, args=(float(OBSERVER.y), float(OBSERVER.tmp)))
#print("SOL SHAPE: ", numpy.shape(xsol))
OBSERVER.xsol = xsol[-1,:]
OBSERVER.xsolold = numpy.vstack((OBSERVER.xsolold, OBSERVER.xsol))
xppsolin = numpy.gradient(OBSERVER.xsolold, ElapTime, axis=0)
#print("SOL: ", xsol)
#print("XOLD: ", OBSERVER.xsolold)
xppsol = OBSERVER.xpp(xsol[-1,:], float(OBSERVER.y), float(OBSERVER.tmp))
#print("INERTIA: ", xppsol)
#print("INERTIA: ", xppsolin[-1,:])
Qasol = OBSERVER.Qacalc(xppsolin[-1,:], xsol[-1,:], float(OBSERVER.y), float(OBSERVER.tmp))
error = (Qasol - (avrSWAP_py[13]/avrSWAP_py[20]))/(avrSWAP_py[13]/avrSWAP_py[20])
errorposg = (OBSERVER.tmp-xsol[-1,3])/xsol[-1,3]
errorposr = (OBSERVER.acc-xsol[-1,2])/xsol[-1,2]
errorwr = (avrSWAP_py[20]-xsol[-1,0])/avrSWAP_py[20]
errorwg = (avrSWAP_py[19]-xsol[-1,1])/avrSWAP_py[19]
pitch_obs = (avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*numpy.pi)
if pitch_obs > 17.9:
pitch_obs = 17.9
elif pitch_obs < -10:
pitch_obs = -10
num = (2*Qasol)/(numpy.pi*OBSERVER.rho*(xsol[-1,0]**2)*(OBSERVER.R**5))
tsr_obs = optim.fsolve(OBSERVER.func_impl, 4.5, args=(num, pitch_obs))
vento_obs = xsol[-1,0]*OBSERVER.R/tsr_obs
file = open("EXSOL.txt","a+")
file.write("%f, %f, %f, %f, %f \n" % (xsol[-1,0], xsol[-1,1], xsol[-1,2], xsol[-1,3], Time))
file.close()
file = open("Azimuth.txt","a+")
file.write("%f, %f, %f, %f \n" % (xsol[-1,2], xsol[-1,0], vento_obs, Time))
file.close()
if vento_obs > 25:
vento_obs = 25
elif vento_obs < 3:
vento_obs = 3
file = open("Error.txt","a+")
file.write("%f, %f \n" % (error, Time))
file.close()
file = open("ErrorPosg.txt","a+")
file.write("%f, %f \n" % (errorposg, Time))
file.close()
file = open("ErrorPosr.txt","a+")
file.write("%f, %f \n" % (errorposr, Time))
file.close()
file = open("ErrorWG.txt","a+")
file.write("%f, %f \n" % (errorwg, Time))
file.close()
file = open("ErrorWR.txt","a+")
file.write("%f, %f \n" % (errorwr, Time))
file.close()
file = open("EWR.txt","a+")
file.write("%f, %f \n" % (avrSWAP_py[20], Time))
file.close()
file = open("EWG.txt","a+")
file.write("%f, %f \n" % (avrSWAP_py[19], Time))
file.close()
file = open("EPOSG.txt","a+")
file.write("%f, %f \n" % (tmp, Time))
file.close()
file = open("EPOSR.txt","a+")
file.write("%f, %f \n" % (acc, Time))
file.close()
file = open("EPitch.txt","a+")
file.write("%f, %f, %f \n" % ((avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*numpy.pi), pitch_obs, Time))
file.close()
file = open("EWIND.txt","a+")
file.write("%f, %f, %f \n" % (vento_obs, Time, HorWindV))
file.close()
file = open("EQasol.txt","a+")
file.write("%f, %f \n" % (Qasol, Time))
file.close()
file = open("ENum.txt","a+")
file.write("%f, %f \n" % (num, Time))
file.close()
OBSERVER.tmp = float(avrSWAP_py[19]*ElapTime + tmp)
OBSERVER.acc = float(avrSWAP_py[20]*ElapTime + acc)
#print("ERROR: ", error)
#print("Qa: ", Qasol)
#print("Qareal: ", avrSWAP_py[13]/avrSWAP_py[20])
#print("POWER: ", avrSWAP_py[13])
#WIND YAW ERROR OBSERVER SECTION
blmom1 = numpy.array([avrSWAP_py[29], avrSWAP_py[68]])
blmom2 = numpy.array([avrSWAP_py[30], avrSWAP_py[69]])
blmom3 = numpy.array([avrSWAP_py[31], avrSWAP_py[70]])
N = 1
if numpy.isclose(Time, 0.0):
azimuth = numpy.array([xsol[2],xsol[2] + 2*numpy.pi/3, xsol[2] + 4*numpy.pi/3])
wryaw = xsol[0]
globalDISCON.wr_old = wryaw # (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.m_out1f_old + Ts*(m_out1 + globalDISCON.m_out1_old))
globalDISCON.wrf_old = wryaw
globalDISCON.azimuth_old = azimuth
globalDISCON.azimuthf_old = azimuth
m_out1 = 1
m_out2 = 0
m_out3 = 0
m_in1 = 1
m_in2 = 0
m_in3 = 0
yawerrmeas.bl1_old = blmom1
yawerrmeas.bl2_old = blmom2
yawerrmeas.bl3_old = blmom3
yawerrmeas.azimuth_old = azimuth[0]
else:
#azimuth = (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.azimuthf_old + Ts*(numpy.array([xsol[-1,2], xsol[-1,2] + 2*numpy.pi/3, xsol[-1,2] + 4*numpy.pi/3]) + globalDISCON.azimuth_old))
#wryaw = (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.wrf_old + Ts*(xsol[-1,0] + globalDISCON.wr_old))
azimuth = numpy.array([xsol[-1,2], xsol[-1,2] + 2*numpy.pi/3, xsol[-1,2] + 4*numpy.pi/3])
wryaw = xsol[-1,0]
globalDISCON.wr_old = xsol[-1,0]
globalDISCON.azimuth_old = numpy.array([xsol[-1,2], xsol[-1,2] + 2*numpy.pi/3, xsol[-1,2] + 4*numpy.pi/3])
globalDISCON.wrf_old = wryaw
globalDISCON.azimuthf_old = azimuth
yawerrmeas.bl1_old = numpy.vstack((yawerrmeas.bl1_old, blmom1))
yawerrmeas.bl2_old = numpy.vstack((yawerrmeas.bl2_old, blmom2))
yawerrmeas.bl3_old = numpy.vstack((yawerrmeas.bl3_old, blmom3))
yawerrmeas.azimuth_old = numpy.hstack((yawerrmeas.azimuth_old, azimuth[0]))
#if ((azimuth[0] - 2*N*numpy.pi) > yawerrmeas.azimuth_old[0]) and ((azimuth[0] - 2*N*numpy.pi) > yawerrmeas.azimuth_old[1]):
inddel = numpy.where(yawerrmeas.azimuth_old < azimuth[0] - 2*N*numpy.pi)
#print("INDDEL: ", inddel[0])
if inddel[0].size > 1:
#print(yawerrmeas.azimuth_old.size)
yawerrmeas.bl1_old = numpy.delete(yawerrmeas.bl1_old, [inddel[0][:-2]], 0)
yawerrmeas.bl2_old = numpy.delete(yawerrmeas.bl2_old, [inddel[0][:-2]], 0)
yawerrmeas.bl3_old = | numpy.delete(yawerrmeas.bl3_old, [inddel[0][:-2]], 0) | numpy.delete |
import numpy as np
import numpy.linalg as npl
from dipy.core.triangle_subdivide import create_half_unit_sphere
from dipy.reconst.dti import design_matrix, lower_triangular
from nose.tools import assert_equal, assert_raises, assert_true, assert_false
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.core.geometry import cart2sphere
from dipy.reconst.shm import real_sph_harm, \
sph_harm_ind_list, _closest_peak, SlowAdcOpdfModel, \
normalize_data, ClosestPeakSelector, QballOdfModel, hat, lcr_matrix, \
smooth_pinv, bootstrap_data_array, bootstrap_data_voxel, \
ResidualBootstrapWrapper
def test_sph_harm_ind_list():
m_list, n_list = sph_harm_ind_list(8)
assert_equal(m_list.shape, n_list.shape)
assert_equal(m_list.shape, (45,))
assert_true(np.all(np.abs(m_list) <= n_list))
assert_array_equal(n_list % 2, 0)
assert_raises(ValueError, sph_harm_ind_list, 1)
def test_real_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
# where real spherical harmonic $Y^m_n$ is defined to be:
# Real($Y^m_n$) * sqrt(2) if m > 0
# $Y^m_n$ if m == 0
# Imag($Y^m_n$) * sqrt(2) if m < 0
rsh = real_sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
assert_array_almost_equal(rsh(0,0,0,0),
0.5/sqrt(pi))
assert_array_almost_equal(rsh(2,2,pi/3,pi/5),
0.25*sqrt(15./(2.*pi))*
(sin(pi/5.))**2.*cos(0+2.*pi/3)*sqrt(2))
assert_array_almost_equal(rsh(-2,2,pi/3,pi/5),
0.25*sqrt(15./(2.*pi))*
(sin(pi/5.))**2.*sin(0-2.*pi/3)*sqrt(2))
assert_array_almost_equal(rsh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi))*
cos(2.*pi)*sin(pi/2.)**2.*sqrt(2))
assert_array_almost_equal(rsh(-2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi))*
sin(0-2.*pi/4.)*
sin(pi/3.)**2.*
(7.*cos(pi/3.)**2.-1)*sqrt(2))
assert_array_almost_equal(rsh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi))*
cos(0+4.*pi/8.)*sin(pi/6.)**4.*sqrt(2))
assert_array_almost_equal(rsh(-4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi))*
sin(0-4.*pi/8.)*sin(pi/6.)**4.*sqrt(2))
aa = np.ones((3,1,1,1))
bb = np.ones((1,4,1,1))
cc = np.ones((1,1,5,1))
dd = np.ones((1,1,1,6))
assert_equal(rsh(aa, bb, cc, dd).shape, (3, 4, 5, 6))
def test_closest_peak():
peak_values = np.array([1, .9, .8, .7, .6, .2, .1])
peak_points = np.array([[1., 0., 0.],
[0., .9, .1],
[0., 1., 0.],
[.9, .1, 0.],
[0., 0., 1.],
[1., 1., 0.],
[0., 1., 1.]])
norms = np.sqrt((peak_points*peak_points).sum(-1))
peak_points = peak_points/norms[:, None]
prev = np.array([1, -.9, 0])
prev = prev/np.sqrt(np.dot(prev, prev))
cp = _closest_peak(peak_points, prev, .5)
assert_array_equal(cp, peak_points[0])
cp = _closest_peak(peak_points, -prev, .5)
assert_array_equal(cp, -peak_points[0])
assert_raises(StopIteration, _closest_peak, peak_points, prev, .75)
def test_set_angle_limit():
bval = np.ones(100)
bval[0] = 0
bvec = np.ones((3, 100))
sig = np.zeros(100)
v = np.ones((200, 3)) / np.sqrt(3)
e = None
opdf_fitter = SlowAdcOpdfModel(bval, bvec.T, 6, odf_vertices=v,
odf_edges=e)
norm_sig = sig[..., 1:]
stepper = ClosestPeakSelector(opdf_fitter, norm_sig, angle_limit=55)
assert_raises(ValueError, stepper._set_angle_limit, 99)
assert_raises(ValueError, stepper._set_angle_limit, -1.1)
def test_smooth_pinv():
v, e, f = create_half_unit_sphere(3)
m, n = sph_harm_ind_list(4)
r, pol, azi = cart2sphere(*v.T)
B = real_sph_harm(m, n, azi[:, None], pol[:, None])
L = np.zeros(len(m))
C = smooth_pinv(B, L)
D = np.dot(npl.inv(np.dot(B.T, B)), B.T)
assert_array_almost_equal(C, D)
L = n*(n+1)*.05
C = smooth_pinv(B, L)
L = np.diag(L)
D = np.dot(npl.inv(np.dot(B.T, B) + L*L), B.T)
assert_array_almost_equal(C, D)
L = np.arange(len(n))*.05
C = smooth_pinv(B, L)
L = np.diag(L)
D = np.dot(npl.inv(np.dot(B.T, B) + L*L), B.T)
assert_array_almost_equal(C, D)
def test_normalize_data():
sig = np.arange(1, 66)[::-1]
bval = np.repeat([0, 1000], [2, 20])
assert_raises(ValueError, normalize_data, sig, bval)
bval = np.ones(65)*1000
assert_raises(ValueError, normalize_data, sig, bval)
bval = np.repeat([0, 1], [1, 64])
d = normalize_data(sig, bval, 1)
assert_raises(ValueError, normalize_data, None, bval, 0)
bval[[0, 1]] = [0, 1]
norm_sig = normalize_data(sig, bval, min_signal=1)
assert_array_equal(norm_sig, sig/65.)
norm_sig = normalize_data(sig, bval, min_signal=5)
assert_array_equal(norm_sig[-5:], 5/65.)
bval[[0, 1]] = [0, 0]
norm_sig = normalize_data(sig, bval, min_signal=1)
assert_array_equal(norm_sig, sig/64.5)
norm_sig = normalize_data(sig, bval, min_signal=5)
assert_array_equal(norm_sig[-5:], 5/64.5)
sig = sig*np.ones((2,3,1))
bval[[0, 1]] = [0, 1]
norm_sig = normalize_data(sig, bval, min_signal=1)
assert_array_equal(norm_sig, sig/65.)
norm_sig = normalize_data(sig, bval, min_signal=5)
assert_array_equal(norm_sig[..., -5:], 5/65.)
bval[[0, 1]] = [0, 0]
norm_sig = normalize_data(sig, bval, min_signal=1)
assert_array_equal(norm_sig, sig/64.5)
norm_sig = normalize_data(sig, bval, min_signal=5)
assert_array_equal(norm_sig[..., -5:], 5/64.5)
def make_fake_signal():
v, e, f = create_half_unit_sphere(4)
vecs_xy = v[np.flatnonzero(v[:, 2] == 0)]
evals = np.array([1.8, .2, .2])*10**-3*1.5
evecs_moveing = np.empty((len(vecs_xy), 3, 3))
evecs_moveing[:, :, 0] = vecs_xy
evecs_moveing[:, :, 1] = [0, 0, 1]
evecs_moveing[:, :, 2] = np.cross(evecs_moveing[:, :, 0],
evecs_moveing[:, :, 1])
assert ((evecs_moveing * evecs_moveing).sum(1) - 1 < .001).all()
assert ((evecs_moveing * evecs_moveing).sum(2) - 1 < .001).all()
gtab = np.empty((len(v) + 1, 3))
bval = np.empty(len(v) + 1)
bval[0] = 0
bval[1:] = 2000
gtab[0] = [0, 0, 0]
gtab[1:] = v
bvec = gtab.T
B = design_matrix(bvec, bval)
tensor_moveing = np.empty_like(evecs_moveing)
for ii in xrange(len(vecs_xy)):
tensor_moveing[ii] = np.dot(evecs_moveing[ii]*evals,
evecs_moveing[ii].T)
D_moveing = lower_triangular(tensor_moveing, 1)
tensor_fixed = np.diag(evals)
D_fixed = lower_triangular(tensor_fixed, 1)
sig = .45*np.exp(np.dot(D_moveing, B.T)) + .55*np.exp(np.dot(B, D_fixed))
assert sig.max() <= 1
assert sig.min() > 0
return v, e, vecs_xy, bval, bvec, sig
def test_ClosestPeakSelector():
v, e, vecs_xy, bval, bvec, sig = make_fake_signal()
opdf_fitter = SlowAdcOpdfModel(bval, bvec.T, 6, odf_vertices=v, odf_edges=e)
opdf_fitter.angular_distance_threshold = 0.
norm_sig = sig
stepper = ClosestPeakSelector(opdf_fitter, norm_sig, angle_limit=49)
C = opdf_fitter.fit_data(norm_sig)
S = opdf_fitter.evaluate_odf(norm_sig)
for ii in xrange(len(vecs_xy)):
if np.dot(vecs_xy[ii], [0, 1., 0]) < .56:
assert_raises(StopIteration, stepper.next_step, ii, [0, 1., 0])
else:
step = stepper.next_step(ii, [0, 1., 0])
s2 = stepper.next_step(ii, vecs_xy[ii])
assert_array_equal(vecs_xy[ii], step)
step = stepper.next_step(ii, [1., 0, 0.])
assert_array_equal([1., 0, 0.], step)
norm_sig.shape = (2, 2, 4, -1)
stepper = ClosestPeakSelector(opdf_fitter, norm_sig, angle_limit=49)
step = stepper.next_step((0, 0, 0), [1, 0, 0])
assert_array_equal(step, [1, 0, 0])
def testQballOdfModel():
v, e, vecs_xy, bval, bvec, sig = make_fake_signal()
qball_fitter = QballOdfModel(bval, bvec.T, 6, odf_vertices=v,
odf_edges=e)
qball_fitter.angular_distance_threshold = 0.
norm_sig = sig
C = qball_fitter.fit_data(norm_sig)
S = qball_fitter.evaluate_odf(norm_sig)
stepper = ClosestPeakSelector(qball_fitter, norm_sig, angle_limit=39)
for ii in xrange(len(vecs_xy)):
if np.dot(vecs_xy[ii], [0, 1., 0]) < .84:
assert_raises(StopIteration, stepper.next_step, ii, [0, 1., 0])
else:
step = stepper.next_step(ii, [0, 1., 0])
s2 = stepper.next_step(ii, vecs_xy[ii])
assert step is not None
assert np.dot(vecs_xy[ii], step) > .98
step = stepper.next_step(ii, [1., 0, 0.])
assert_array_equal([1., 0, 0.], step)
def test_hat_and_lcr():
v, e, f = create_half_unit_sphere(6)
m, n = sph_harm_ind_list(8)
r, pol, azi = cart2sphere(*v.T)
B = real_sph_harm(m, n, azi[:, None], pol[:, None])
H = hat(B)
B_hat = np.dot(H, B)
assert_array_almost_equal(B, B_hat)
R = lcr_matrix(H)
d = np.arange(len(azi))
r = d - np.dot(H, d)
lev = np.sqrt(1-H.diagonal())
r /= lev
r -= r.mean()
r2 = np.dot(R, d)
assert_array_almost_equal(r, r2)
r3 = np.dot(d, R.T)
assert_array_almost_equal(r, r3)
def test_bootstrap_array():
B = np.array([[4, 5, 7, 4, 2.],
[4, 6, 2, 3, 6.]])
H = hat(B.T)
R = np.zeros((5,5))
d = np.arange(1, 6)
dhat = np.dot(H, d)
assert_array_almost_equal(bootstrap_data_voxel(dhat, H, R), dhat)
assert_array_almost_equal(bootstrap_data_array(dhat, H, R), dhat)
H = np.zeros((5,5))
def test_ResidualBootstrapWrapper():
B = np.array([[4, 5, 7, 4, 2.],
[4, 6, 2, 3, 6.]])
B = B.T
H = hat(B)
d = | np.arange(10) | numpy.arange |
#!/usr/bin/python
# coding:utf-8
import numpy as np
import random
import string
from requests import Request, Session
from MyDecision import Decision
from MyWord2Vec import Word2Vec
PROXY = {'http': '127.0.0.1:8083'}
# CredentialsTBLのカラム情報
str_col_credentialstbl = "site_id, " \
"type, " \
"credential_info1, " \
"credential_info2, " \
"credential_info3, " \
"credential_info4, " \
"credential_info5, " \
"secret_question, " \
"secret_answer, " \
"temp"
# WordSimilarityTBLのカラム情報
str_col_wordsimilaritytbl = "page_type, " \
"word, " \
"value"
# 環境の定義
class Environment(object):
int_max_learning_episode = 30 # 学習回数
#int_max_learning_episode = 100
int_which_episode = 0
int_learning_episode = 0
int_total_reward = 0
int_total_step = 0
# multipartリクエスト用のダミーファイル
str_dummy_file_path = ".\\dummy.png"
# パラメータ値の集合(16種類)
str_3num = str(random.randint(100, 999))
str_6num = str(random.randint(100000, 999999))
str_8num = str(random.randint(10000000, 99999999))
str_9num = str(random.randint(100000000, 999999999))
str_12num = str(random.randint(100000000000, 999999999999))
str_16num = str(random.randint(1000000000000000, 9999999999999999))
#lst_param_value_collections = [str_3num, str_6num, str_9num, str_12num, str_16num,
# "abc", "abcdef", "abcdefghi",
# str_3num + "abc", str_6num + "abcdef",
# str_3num + "@@@", str_6num + "@@@@@@",
# "abc@@@", str_16num + "@hoge.com",
# str_3num + "ab@@", str_6num + "abcd@@@@"]
# パラメータ値の集合(4種類)
lst_param_value_collections = [str_8num, str_16num, str_3num + "abc", str_16num + "@hoge.com"]
def __init__(self, str_train_action='TRAIN', obj_browser=None):
self.obj_browser = obj_browser
self.bol_use_dqn_flag = False
self.lst_create_param = []
self.lst_default_param = []
self.str_train = str_train_action
# 対象URLの設定
def set_url(self, str_url):
self.str_target_url = str_url
# パラメータ値の集合の更新
def update_random_params(self):
self.str_3num = str(random.randint(100, 999))
self.str_6num = str(random.randint(100000, 999999))
self.str_8num = str(random.randint(10000000, 99999999))
self.str_9num = str(random.randint(100000000, 999999999))
self.str_12num = str(random.randint(100000000000, 999999999999))
self.str_16num = str(random.randint(1000000000000000, 9999999999999999))
#self.lst_param_value_collections = [self.str_3num, self.str_6num, self.str_9num, self.str_12num, self.str_16num,
# "abc", "abcdef", "abcdefghi",
# self.str_3num + "abc", self.str_6num + "abcdef",
# self.str_3num + "@@@", self.str_6num + "@@@@@@",
# "abc@@@", self.str_16num + "@hoge.com",
# self.str_3num + "ab@@", self.str_6num + "abcd@@@@"]
# パラメータ値の集合(4種類)
self.lst_param_value_collections = [self.str_8num, self.str_16num,
self.str_3num + "abc", self.str_16num + "@hoge.com"]
# 最適パラメータの取得
def index_to_action(self, int_index_action, dic_post_param):
lst_param_name = dic_post_param.keys()
for str_param_name in lst_param_name:
# 値が未設定の場合
if dic_post_param[str_param_name] == '':
# ε-greedyで選択した値をパラメータ値に設定
dic_post_param[str_param_name] = self.lst_param_value_collections[int_index_action]
return dic_post_param
# 初回アクセス時のパラメータ構成を定義
def create_init_param(self, dic_post_params=''):
# 行動パターンの算出:パラメータと全候補パラメータ値の組み合わせ
# 但し、元値を持っているパラメータには候補パラメータ値を設定しない
lst_param_name = dic_post_params.keys()
lst_param_collections = []
lst_value_collections2 = []
lst_param_collections_no_value = []
int_idx = 0
int_repeat = 0
# パラメータ毎の取り得る値を設定
for str_param_name in lst_param_name:
# 値が未設定のパラメータを抽出
if dic_post_params[str_param_name] == '':
lst_param_collections_no_value.append(str_param_name)
int_repeat += 1
# 値が設定されているパラメータを抽出
else:
lst_param_collections.append(tuple([str_param_name, dic_post_params[str_param_name]]))
int_idx += 1
# 値が未設定のパラメータが存在する場合
if int_repeat != 0:
lst_param_temp = []
for str_value_collection in self.lst_param_value_collections:
for int_idx in range(int_repeat):
lst_param_temp.append(tuple([lst_param_collections_no_value[int_idx], str_value_collection]))
lst_value_collections2.append(lst_param_temp)
lst_param_temp = []
# 全パラメータ組み合わせを行動パターンとする
for int_idx in range(len(lst_value_collections2)):
lst_value_collections2[int_idx] += lst_param_collections
self.bol_use_dqn_flag = True
return lst_value_collections2, self.bol_use_dqn_flag
# 既存のパラメータに全て値が入力されている、または、POSTパラメータが無い場合
else:
# print "Not Use DQN."
self.lst_default_param = lst_param_collections
self.bol_use_dqn_flag = False
return lst_param_collections, self.bol_use_dqn_flag
# HTTPレスポンス内容から状態を判定
def judge_state(self, obj_response, int_group_id, int_parent_seq):
obj_decision = Decision()
# 正常遷移の可否を判定
#int_result, int_score = obj_decision.decide_flow_okng(obj_response, int_group_id, int_parent_seq)
int_result = obj_decision.decide_flow_okng(obj_response, int_group_id, int_parent_seq)
int_next_state = 0
if int_result == 1:
int_reward = 1000 # 正常遷移した場合は報酬「100」を与える
int_next_state = 10 # 正常遷移した場合は状態を成功「1」にする
elif int_result == 0:
int_reward = -1000 # 正常遷移しない場合は報酬「-100」を与える
int_next_state = 1 # 正常遷移しない場合は状態を変化なし「0」にする
else:
int_reward = -1000 # エラーが発生した場合は報酬「-1000」を与える
int_next_state = 0 # エラーが発生した場合は状態をエラー「-1」にする
return int_reward, int_next_state
# 当該ページへの遷移方法を学習
def flow_learning(self,
obj_db_control,
obj_session,
obj_agent,
obj_web_inspect,
lst_row,
lst_flow,
dic_post_params):
obj_decision = Decision()
lst_temp_action = []
int_reward = 0
# 学習済みデータの有無を確認
# エージェントの学習
int_learning_episode = 0
int_total_cost = 0.0 # 10/1 追加
int_total_reward = 0.0 # 10/1 追加
int_frame = 0 # 10/1 追加
while int_learning_episode < self.int_max_learning_episode:
int_frame += 1
int_reward = 0
obj_request = None
# POSTリクエストの送信
if lst_row[19] == "multipart/form-data":
# マルチパートリクエスト
dic_post_files = {}
dic_post_data = {}
# ファイルを保持するパラメータと通常のパラメータに切り分ける
dic_post_files, dic_post_data = obj_decision.divide_params(lst_row[21],
dic_post_params,
self.str_dummy_file_path)
obj_request = Request("POST",
obj_decision.assemble_url(lst_row),
files=dic_post_files,
data=dic_post_data
)
else:
# 通常のリクエスト
if lst_row[8].upper() == 'POST':
obj_request = Request("POST",
obj_decision.assemble_url(lst_row),
data=dic_post_params
)
else:
obj_request = Request("GET",
obj_decision.assemble_url(lst_row),
params=dic_post_params
)
obj_prepped = obj_session.prepare_request(obj_request)
obj_response = obj_session.send(obj_prepped,
verify=True,
timeout=60,
proxies=PROXY,
allow_redirects=False
)
# レスポンスをブラウザに表示(デモ用)
if obj_response is not None:
self.obj_browser.write_response_to_html(obj_response.text, obj_response.encoding)
self.obj_browser.refresh_browser()
# レスポンスがリダイレクトの場合
# リダイレクトが終了するまで繰り返しリダイレクト
while obj_response.is_redirect is True:
# Locationヘッダから遷移先URLを取得
dic_res_headers = obj_response.headers._store
tpl_location_header = dic_res_headers["location"]
str_redirect_url = tpl_location_header[1]
if 'http://' not in str_redirect_url and 'https://' not in str_redirect_url:
str_fqdn = lst_row[9].encode() + "://" + lst_row[10].encode() + ":" + str(lst_row[11])
if str_redirect_url.startswith('/') is False:
str_redirect_url = '/' + str_redirect_url
str_redirect_url = str_fqdn + str_redirect_url
# リダイレクト
obj_request = Request("GET", str_redirect_url)
obj_prepped = obj_session.prepare_request(obj_request)
obj_response = obj_session.send(obj_prepped,
verify=True,
timeout=60,
proxies=PROXY,
allow_redirects=False
)
# レスポンスをブラウザに表示(デモ用)
if obj_response is not None:
self.obj_browser.write_response_to_html(obj_response.text, obj_response.encoding)
self.obj_browser.refresh_browser()
# レスポンス内容から報酬を決定
int_reward, int_next_state = self.judge_state(obj_response, lst_row[2], lst_row[6])
# 対象が会員情報変更の場合、かつ、遷移に成功した場合、認証情報を入れ替える
if lst_row[3] == 5 and int_reward > 0:
# 認証情報の取得
lst_credentials, str_temp_label_value = obj_decision.decide_get_credentials(
obj_decision.change_dictionary_to_list(dic_post_params),
lst_row[22])
# 認証情報が取得できた場合、認証情報を入れ替える
if lst_credentials[0] is not None and lst_credentials[1] is not None:
str_sql = "DELETE FROM CredentialsTBL WHERE site_id = 1;"
obj_db_control.delete(str_sql)
str_sql = "INSERT INTO CredentialsTBL(" + str_col_credentialstbl + ") " \
"VALUES (1, 1, ?, ?, ?, ?, ?, '', '', '')"
lst_value = [lst_credentials[0],
lst_credentials[1],
lst_credentials[2],
lst_credentials[3],
lst_credentials[4]]
obj_db_control.insert(str_sql, lst_value)
# 認証情報が取得できない場合、一旦temp領域にパラメータ情報を入れておく
else:
str_sql = "DELETE FROM CredentialsTBL WHERE site_id = 1;"
obj_db_control.delete(str_sql)
str_sql = "INSERT INTO CredentialsTBL(" + str_col_credentialstbl + ") " \
"VALUES (1, 1, '', '', '', '', '', '', '', ?)"
lst_value = [str_temp_label_value]
obj_db_control.insert(str_sql, lst_value)
# 次の状態を設定
lst_state_dash = [lst_row[6], lst_row[2], int_next_state]
int_total_cost += obj_agent.observe(int_reward)
int_total_reward += int_reward
obj_agent.new_episode()
nd_state = np.array(lst_state_dash, dtype=np.uint8)
int_index_action, nd_values = obj_agent.act(nd_state)
print('frame:%d / total reward:%d / total cost:%f / action:%d / reward:%d' %
(int_frame, int_total_reward, int_total_cost, int_index_action, int_reward))
# 次の学習の準備:ルートから当該ページの一つ手前まで遷移
obj_session = None
obj_session = Session() # 新しいセッションで遷移を再生
if obj_web_inspect.flow_replay(obj_session, obj_db_control, obj_agent, lst_row, lst_flow) is False:
continue
# 次の学習の準備:パラメータ値の更新
# POSTパラメータが存在する場合
if lst_row[13] != '':
# 最新のパラメータ構成を取得
self.update_random_params()
dic_post_params, bol_skip_flag = obj_decision.update_post_parameter(obj_db_control, lst_row)
dic_post_params = self.index_to_action(int_index_action, dic_post_params)
int_learning_episode += 1
# 学習済みの単語・値のセットを単語類似度管理テーブルに格納
dic_post_params, bol_skip_flag = obj_decision.update_post_parameter(obj_db_control, lst_row)
lst_state = [lst_row[6], lst_row[2], 0]
int_action_index, nd_values = obj_agent.act(np.array(lst_state, dtype=np.uint8))
lst_params = np.argsort(nd_values)[0].tolist()
int_action_index = len(lst_params) - 1
self.update_random_params()
dic_post_params = self.index_to_action(int_action_index, dic_post_params)
int_idx = 0
lst_param_names = lst_row[13].split('&')
lst_param_types = lst_row[21].split(',')
lst_label_names = lst_row[22].split(',')
while int_idx < int(lst_row[14]):
if lst_label_names[int_idx] != '@':
lst_param_names_tmp = lst_param_names[int_idx].split('=')
str_param_value = dic_post_params[lst_param_names_tmp[0]]
str_sql = "INSERT INTO WordSimilarityTBL(" + str_col_wordsimilaritytbl + ") " \
"VALUES (?, ?, ?)"
lst_value = [int(lst_row[3]), lst_label_names[int_idx], str_param_value]
obj_db_control.insert(str_sql, lst_value)
int_idx += 1
# 対象URLにPOSTリクエストを送信し、正常遷移の可否を返却。
def send_message(self, obj_db_control, obj_session, obj_agent, obj_web_inspect, lst_state, lst_row, lst_flow):
obj_decision = Decision()
# DQNを使用する場合、かつ、ログイン処理ではない場合
if self.bol_use_dqn_flag is True:
# 最新のパラメータ構成を取得
dic_post_params, bol_skip_flag = obj_decision.update_post_parameter(obj_db_control, lst_row)
int_idx = 0
bol_relearn_flag = False
lst_param_names = lst_row[13].split('&')
lst_param_types = lst_row[21].split(',')
lst_label_names = lst_row[22].split(',')
while int_idx < int(lst_row[14]):
if lst_label_names[int_idx] != '@':
str_sql = "SELECT value from WordSimilarityTBL where word like '%"\
+ lst_label_names[int_idx] + "%';"
obj_cursor = obj_db_control.select(str_sql)
lst_all_row = obj_cursor.fetchall()
lst_param_names_tmp = lst_param_names[int_idx].split('=')
# 類似単語が類似単語管理テーブルに存在する
if len(lst_all_row) != 0:
lst_candidate = list(lst_all_row[0])
str_candidate_value = str(lst_candidate[0])
int_find_idx = str_candidate_value.find('@')
if int_find_idx > 0:
str_random_value = ''.join([random.choice(string.digits) for i in range(int_find_idx)])
str_candidate_value = str_random_value + str_candidate_value[int_find_idx:]
dic_post_params[lst_param_names_tmp[0]] = str_candidate_value
# 類似単語が類似単語管理テーブルに存在しない
else:
obj_word2vec = Word2Vec()
obj_result = obj_word2vec.get_candidate_word(lst_label_names[int_idx])
if obj_result is not False:
for r in obj_result:
str_sql = "SELECT value from WordSimilarityTBL where word like '%" \
+ r[0] + "%';"
obj_cursor = obj_db_control.select(str_sql)
lst_all_row = obj_cursor.fetchall()
if len(lst_all_row) != 0:
lst_candidate = list(lst_all_row[0])
str_candidate_value = str(lst_candidate[0])
int_find_idx = str_candidate_value.find('@')
if int_find_idx > 0:
str_random_value = \
''.join([random.choice(string.digits) for i in range(int_find_idx)])
str_candidate_value = str_random_value + str_candidate_value[int_find_idx:]
dic_post_params[lst_param_names_tmp[0]] = str_candidate_value
break
if dic_post_params[lst_param_names_tmp[0]] == '':
bol_relearn_flag = True
else:
bol_relearn_flag = True
int_idx += 1
if bol_relearn_flag is True and self.str_train == 'TRAIN':
# エージェントの学習
obj_response = self.flow_learning(obj_db_control,
obj_session,
obj_agent,
obj_web_inspect,
lst_row,
lst_flow,
dic_post_params
)
# 学習結果に基づき遷移に最適なパラメータ値を設定
int_action_index, nd_values = obj_agent.act(np.array(lst_state, dtype=np.uint8))
lst_params = | np.argsort(nd_values) | numpy.argsort |
import numpy as np
import scipy
from scipy import optimize as opt
from sklearn.decomposition import PCA
from utils import *
from functools import partial
class PNS(object):
"""
Fit nested_spheres to data. This is a python code to PNS matlab code
See Sungkyu Jung et al, 2012 for the original PNS.
For Kurtosis test, see Byungwon Kim et al., 2020 for reference.
For an application on shape analysis, refer to Liu et al., Non-Euclidean Analysis of Joint Variations in Multi-object Shapes.
There might be some small differences than the matlab implementation due to the different optimization methods and other numerical issues (e.g., non-uniqueness of singular vectors from SVD).
Author: <NAME>
Data: Oct. 10, 2020
"""
def __init__(self, data=None, itype=9, alpha=0.05):
"""
Initialize an object of PNS model for data with fitting type itype.
Args:
data (numpy.ndarray): A 2D matrix of dimension d-by-N, where d is the number of features and N is the number of cases
itype (integer): The type of the fitting strategy
################### test type ######################
## itype = 1: always small circle
## itype = 2: always great circle
## itype = 9 (default): apply Kurtosis test to automatically choose between great and small circle fitting
alpha (float): significance level for testing of whether great or small circle
Attributes:
output (tuple): The result of PNS fitting, including
resmat (numpy.ndarray): The Euclideanized features in a matrix of dimension (k-1)-by-k, where k = min(d, N)
PNS (tuple): The configuration of the fitted coordinate system, which is composed of
0. radii (list): denote the size of each fitted subsphere for the use of normalize residuals
1. orthaxis (list): one of the parameters of every fitted subsphere. Centers of fitted subspheres
2. dist (list): another parameter (geodesic distance in radians) of every fitting subsphere
3. pvalues (list): intermediate results from hypothesis testing for every subsphere fitting. It's empty if itype != 9
4. gsphere (list): types (great sphere:1 or small sphere:0) of fitted subspheres
5. basisu (list): the loadings in the embedding feature space
6. mean (vector): PNS mean. This is the center of the distribution
7. itype (list): user-selected types (great sphere:2 or small sphere:1)
Returns:
An instance of a PNS model
"""
## Input: d x n matrix, where d is the number of features
self.data = data
self.itype = itype
## significance level for testing of whether great or small circle
self.alpha = alpha
## output: (resmat, PNS)
self.output = None
def emsvd(self, Y, k=None, tol=1E-3, maxiter=None):
"""
Approximate SVD on data with missing values via expectation-maximization
Inputs:
-----------
Y: (nobs, ndim) data matrix, missing values denoted by NaN/Inf
k: number of singular values/vectors to find (default: k=ndim)
tol: convergence tolerance on change in trace norm
maxiter: maximum number of EM steps to perform (default: no limit)
Returns:
-----------
Y_hat: (nobs, ndim) reconstructed data matrix
mu_hat: (ndim,) estimated column means for reconstructed data
U, s, Vt: singular values and vectors (see np.linalg.svd and
scipy.sparse.linalg.svds for details)
"""
if k is None:
svdmethod = partial(np.linalg.svd, full_matrices=False)
else:
svdmethod = partial(svds, k=k)
if maxiter is None:
maxiter = np.inf
# initialize the missing values to their respective column means
mu_hat = np.nanmean(Y, axis=0, keepdims=1)
valid = np.isfinite(Y)
Y_hat = np.where(valid, Y, mu_hat)
halt = False
ii = 1
v_prev = 0
while not halt:
# SVD on filled-in data
U, s, Vt = svdmethod(Y_hat - mu_hat)
# impute missing values
Y_hat[~valid] = (U.dot(np.diag(s)).dot(Vt) + mu_hat)[~valid]
# update bias parameter
mu_hat = Y_hat.mean(axis=0, keepdims=1)
# test convergence using relative change in trace norm
v = s.sum()
if ii >= maxiter or ((v - v_prev) / v_prev) < tol:
halt = True
ii += 1
v_prev = v
return Y_hat, mu_hat, U, s, Vt
def fit(self):
"""
This is the main entry of fitting PNS to data
"""
## 0. make sure the data are distributed on a unit sphere
d, n = self.data.shape
if not is_on_unit_sphere(self.data):
print("Mapping data to preshape space")
data_in_3d = np.reshape(self.data, (-1, 3, n))
_, k_landmarks, _ = data_in_3d.shape
from geomstats.geometry.pre_shape import PreShapeSpace
preshape = PreShapeSpace(m_ambient=3, k_landmarks=k_landmarks)
data_preshape = preshape.projection(data_in_3d)
base_point = data_preshape[0]
data_shape = preshape.align(point=data_preshape, base_point=base_point)
self.data = np.reshape(data_shape, (d, n))
## 1. rotate data to get a tight space, excluding the null space
eps = 1e-15
u, s, _ = np.linalg.svd(self.data, full_matrices=False)
small_singular_val = np.where(s < eps)[0]
maxd = len(small_singular_val)
if maxd == 0:
maxd = np.min([d, n]) + 1
## the dimension of null space
nullspdim = d - maxd + 1
## 2. intrinsic dimension of sphere is 1 dimension lower than extrinsic_dim
dm = maxd - 2
basisu = []
if nullspdim > 0:
basisu = u[:, :dm+1]
## extract the signal by projecting to the kernel space (complementary of the null space)
currentSphere = np.matmul(u[:, :dm+1].T, self.data)
else:
currentSphere = self.data
if self.itype == 9:
## Use hypothesis testing (Kurtosis test) to decide whether great or small circle for EACH subsphere
self.output = self.automatic_fit_subspheres(currentSphere, dm, nullspdim, basisu)
else:
## Otherwise, always fit data with one particular circle type (great or small)
self.output = self.fit_with_subspheres(currentSphere, dm, nullspdim, basisu)
def automatic_fit_subspheres(self, data, dm, nullspdim, basisu=[]):
"""
Automatically decide which type (great or small) spheres to fit the data
Args:
data (numpy.ndarray): A 2D matrix of dimension d-by-N, where d is the number of features and N is the number of cases
dm (integer): the intrinsic dimension of the hypersphere
nullspdim (integer): the dimension of the null space
basisu (list): the input basis
Returns:
resmat (numpy.ndarray): The Euclideanized features in a matrix of dimension (k-1)-by-k, where k = min(d, N)
PNS (tuple): The configuration of the fitted coordinate system, which is composed of
0. radii (list): denote the size of each fitted subsphere for the use of normalize residuals
1. orthaxis (list): one of the parameters of every fitted subsphere. Centers of fitted subspheres
2. dist (list): another parameter (geodesic distance in radians) of every fitting subsphere
3. pvalues (list): intermediate results from hypothesis testing for every subsphere fitting. It's empty if itype != 9
4. gsphere (list): types (great sphere:1 or small sphere:0) of fitted subspheres
5. basisu (list): the loadings in the embedding feature space
6. mean (vector): PNS mean. This is the center of the distribution
7. itype (list): user-selected types (great sphere:2 or small sphere:1)
"""
def LRTpval(res_great, res_small, n):
chi2 = n * np.log(np.sum(res_great ** 2) / np.sum(res_small ** 2))
chi2 = max(chi2, 0)
return 1 - scipy.stats.chi2.cdf(chi2, 1)
def decide_circle_type(dim, small_circle=True):
circle_type = 'SMALL' if small_circle else 'GREAT'
print(str(dim) + '-sphere to ' + str(dim-1) + '-sphere by fitting a '+ circle_type +' sphere')
dist = []
resmat = []
orthaxis = []
gsphere = []
pvalues = []
iso = []
_, num_cases = data.shape
nan = float('nan')
print('Testing with kurtosis using alpha: ' + str(self.alpha))
is_isotropic = False
for i in range(dm - 1):
center, r = None, None
if is_isotropic:
decide_circle_type(dm-i, False)
center, r = self.get_subsphere(data)
gsphere.append(1)
pvalues.append((nan, nan))
else:
center_small, r_small = self.get_subsphere(data, True)
small_rot_data = np.matmul(center_small.T, data)
res_small = np.arccos(np.clip(small_rot_data, -1, 1)) - r_small
center_great, r_great = self.get_subsphere(data)
great_rot_data = np.matmul(center_great.T, data)
res_great = np.arccos(np.clip(great_rot_data, -1, 1)) - r_great
## Chi-squared statistic for a likelihood test
pval1 = LRTpval(res_great, res_small, num_cases)
if pval1 > self.alpha:
center, r = center_great, r_great
pvalues.append((pval1, nan))
gsphere.append(1)
decide_circle_type(dm-i, False)
else:
## Kurtosis test
data_centered_around_np = rotate_to_north_pole(center_small.squeeze()) @ data
data_in_tangent = log_north_pole(data_centered_around_np)
d, n = data_in_tangent.shape
norm_data = np.sum(data_in_tangent ** 2, axis=0)
kurtosis = np.sum(norm_data ** 2) / float(n) / (np.sum(norm_data) / (d*(n-1))) ** 2
M_kurt = d * (d + 2) ** 2 / (d + 4)
V_kurt = (1/n) * (128*d*(d+2)^4) / ((d+4)^3*(d+6)*(d+8))
pval2 = scipy.stats.norm.cdf((kurtosis - M_kurt) / np.sqrt(V_kurt))
pvalues.append((pval1, pval2))
if pval2 > self.alpha:
center, r = center_great, r_great
gsphere.append(1)
decide_circle_type(dm - i, False)
is_isotropic = True
else:
center, r = center_small, r_small
gsphere.append(0)
decide_circle_type(dm - i)
res_angle = np.matmul(center.T, data)
res = np.arccos(np.clip(res_angle, -1, 1)) - r
orthaxis.append(center)
dist.append(r)
resmat.append(res.squeeze())
iso.append(is_isotropic)
nested_sphere = np.matmul(rotate_to_north_pole(center.squeeze()), data)
data = nested_sphere[:dm-i, :] / np.sqrt(1-nested_sphere[dm-i, :] ** 2)[np.newaxis,:]
## parameterize 1-sphere to angles
if True: #nullspdim + 1 - (dm - 1) <= 0:
s1_to_radian = np.arctan2(data[1, :], data[0, :])
mean_theta, _ = self.geod_mean_s1(s1_to_radian.T)
orthaxis.append(mean_theta)
last_res = (s1_to_radian - mean_theta + np.pi) % (2*np.pi) - np.pi
resmat.append(last_res)
## scale resmat according to the sizes of subspheres
radii = [1.0]
for i in range(1, dm):
radii.append(np.prod(np.sin(dist[:i])))
resmat = np.flipud(np.array(radii)[:, np.newaxis] * resmat)
PNS = {'radii': radii, 'orthaxis': orthaxis, 'dist': dist, 'pvalues': pvalues, \
'gsphere': gsphere, 'basisu': basisu, 'mean': [], 'itype': self.itype}
PNS['mean'] = self.inv(np.zeros((dm, 1)), PNS)
return (resmat, PNS)
def fit_with_subspheres(self, data, dm, nullspdim, basisu=[]):
"""
Fit the data with user-selected types (great or small sphere) of subspheres
Args:
data (numpy.ndarray): A 2D matrix of dimension d-by-N, where d is the number of features and N is the number of cases
dm (integer): the intrinsic dimension of the hypersphere
nullspdim (integer): the dimension of the null space
basisu (list): the input basis
Returns:
resmat (numpy.ndarray): The Euclideanized features in a matrix of dimension (k-1)-by-k, where k = min(d, N)
PNS (tuple): The configuration of the fitted coordinate system, which is composed of
0. radii (list): denote the size of each fitted subsphere for the use of normalize residuals
1. orthaxis (list): one of the parameters of every fitted subsphere. Centers of subspheres.
2. dist (list): another parameter (geodesic distance in radians) of every fitting subsphere
3. pvalues (list): intermediate results from hypothesis testing for every subsphere fitting. It's empty if itype != 9
4. gsphere (list): types (great sphere:1 or small sphere:0) of fitted subspheres
5. basisu (list): the loadings in the embedding feature space
6. mean (vector): PNS mean. This is the center of the distribution
7. itype (list): user-selected types (great sphere:2 or small sphere:1)
"""
dist = []
resmat = []
orthaxis = []
gsphere = []
pvalues = []
for i in range(dm-1):
circle_type = 'SMALL' if self.itype == 1 else 'GREAT'
print(str(dm-i) + '-sphere to ' + str(dm-i-1) + '-sphere by fitting a ' + circle_type +' sphere')
center, r = self.get_subsphere(data, small_circle=(self.itype==1))
curr_angle = np.matmul(center.T, data)
res = np.arccos(np.clip(curr_angle, -1, 1)) - r
orthaxis.append(center)
dist.append(r)
resmat.append(res.squeeze())
nested_sphere = np.matmul(rotate_to_north_pole(center.squeeze()), data)
data = nested_sphere[:dm-i, :] / np.sqrt(1-nested_sphere[dm-i, :] ** 2)[np.newaxis,:]
gsphere.append(self.itype - 1)
## parameterize 1-sphere to angles
if True: #nullspdim + 1 - (dm - 1) <= 0:
s1_to_radian = np.arctan2(data[1, :], data[0, :])
mean_theta, _ = self.geod_mean_s1(s1_to_radian.T)
orthaxis.append(mean_theta)
last_res = (s1_to_radian - mean_theta + np.pi) % (2*np.pi) - np.pi
resmat.append(last_res)
## scale resmat according to the sizes of subspheres
radii = [1.0]
for i in range(1, dm):
radii.append(np.prod(np.sin(dist[:i])))
resmat = np.flipud(np.array(radii)[:, np.newaxis] * resmat)
PNS = {'radii': radii, 'orthaxis': orthaxis, 'dist': dist, 'pvalues': pvalues, \
'gsphere': gsphere, 'basisu': basisu, 'mean': [], 'itype': self.itype}
PNS['mean'] = self.inv(np.zeros((dm, 1)), PNS)
return (resmat, PNS)
def geod_mean_sk(self, data, tol=1e-10):
"""
Geodesic mean of data on S^k (Sphere) use Log map and Exp
Args:
data (numpy.ndarray): a matrix (k+1)-by-n: a column vector represents a point on S^k
tol (float): tolerance that stops the iteration
Returns:
vini (numpy.ndarray): A vector of dimension (k-1)-by-1, geodesic mean on the hypersphere S^(k-1)
"""
vini = data[:, 0]
diff = 1
while dff > tol:
rot = rotate_to_north_pole(vini)
rot_data = rot @ data
mean_in_tangent = np.mean(rot_data, axis=1)
v_new = exp_north_pole(mean_in_tangent)
pull_back_v_new = np.linalg.inv(rot) @ v_new
diff = np.linalg.norm(pull_back_v_new - vini)
vini = pull_back_v_new
return vini
def geod_mean_s1(self, theta):
"""
Geodesic mean of data on S^1 (Circle) by <NAME> and <NAME>
method - gives all multiples of geodesic mean set.
Args:
theta (float): a column vector of angles
Returns:
geod_mean (numpy.ndarray): geodesic mean on S^1
geod_var (numpy.ndarray): geodesic variance on S^2
"""
n = len(theta.squeeze())
mean_cand = (abs(np.mean(theta)) + 2*np.pi*np.arange(n) / n) % (2*np.pi)
theta = theta % (2*np.pi)
geod_var = np.zeros((n, 1))
for i in range(n):
v = mean_cand[i]
var1 = (theta - v) ** 2
var2 = (theta - v + 2 * np.pi) ** 2
var3 = (v - theta + 2 * np.pi) ** 2
dist2 = np.min(np.vstack((var1[None,:], var2[None,:], var3[None,:])), axis=0)
geod_var[i] = np.sum(dist2)
ind = np.argmin(geod_var)
geod_mean = mean_cand[ind] % (2*np.pi)
geod_var = geod_var[ind] / n
return geod_mean, geod_var
def get_subsphere(self, data, small_circle=False):
"""
The least square estimates of the best fitting subsphere
to the data on the unit hyper-sphere.
[center, r]= getSubSphere(data), with d x n data matrix with each
column having unit length, returns the center and the
radius.
Args:
data (numpy.ndarray): A 2D matrix of dimension d-by-N, where d is the number of features and N is the number of cases
small_circle (bool): True if the subsphere is parameterized by small circle
Returns:
center (numpy.ndarray): the vector of the center of the fitted subsphere
r (float): the radius of the fitted subsphere
"""
def obj_fun(center, r, data):
"""
the objective function that we want to minimize: sum of squared distances
from the data to the subsphere
"""
test = np.matmul(center.T, data)
test = np.clip(test, -1, 1)
return np.mean((np.arccos(test) - r) ** 2)
def est_subsphere(data, c0):
tol = 1e-9
cnt = 0
err = 1
d, n = data.shape
g_now = 1e10
center = None
r = None
while err > tol:
c0 = c0 / np.linalg.norm(c0)
rot = rotate_to_north_pole(c0)
tp_data = log_north_pole(np.matmul(rot, data))
new_center_tp, r = self.least_square_fit_sphere(tp_data, np.zeros(d-1), small_circle)
if r > np.pi:
r = np.pi / 2
u, s, _ = scipy.linalg.svd(tp_data, lapack_driver='gesvd')
## add minus sign to keep consistent with the results from MATLAB
last_singular_vect = u[:, -1]
new_center_tp = last_singular_vect * np.pi / 2
new_center = exp_north_pole(x=new_center_tp[:, np.newaxis])
center = np.matmul(np.linalg.inv(rot), new_center)
g_next = obj_fun(center, r, data)
err = abs(g_now - g_next)
g_now = g_next
c0 = center.squeeze()
cnt += 1
if cnt > 30:
print('Fit subspheres iteration reached 30th with residuals: {}'.format(err))
break
return (g_now, center, r)
if np.any(np.isnan(data)):
#Y_hat, mu_hat, u, s, Vt = self.emsvd(data)
data = np.nan_to_num(data)
u, s, _ = scipy.linalg.svd(data, lapack_driver='gesvd')
initial_center = u[:, -1]
### Zhiyuan: Keep initial_center in north hemisphere
north_pole = np.zeros_like(initial_center)
north_pole[-1] = 1
# if np.inner(north_pole, initial_center) < 0:
# initial_center = -initial_center
c0 = initial_center
i1_save = est_subsphere(data, c0)
pca = PCA()
pca.fit(data.T)
u = pca.components_.T
### Zhiyuan: Here find the last "effective" eigenvector of COVARIANCE matrix
initial_center = u[:, -1]
for i_vector in range(len(pca.explained_variance_) - 1, -1, -1):
if pca.explained_variance_[i_vector] > 1e-15:
initial_center = u[:, i_vector]
break
# if np.inner(north_pole, initial_center) < 0:
# initial_center = -initial_center
c0 = initial_center
i2_save = est_subsphere(data, c0)
if i1_save[0] <= i2_save[0]:
center = i1_save[1]
r = i1_save[2]
else:
center = i2_save[1]
r = i2_save[2]
if r > np.pi / 2:
center = -center
r = np.pi - r
return center, r
# def geodesic_dist(self, r1, r2):
# """
# Geodesic distance
# Input r1, r2: n x 1 vector
# """
# k = (np.linalg.norm(r1)) ** 2 + (np.linalg.norm(r2)) ** 2
# theta = 2 * np.inner(r1, r2) / k
# if theta < -1:
# theta = -1
# elif theta > 1:
# theta = 1
# return np.abs(np.arccos(theta))
def least_square_fit_sphere(self, data, initial_center=None, small_circle=False):
"""
The least square estimates of the sphere to the data.
the Levenberg-Marquardt method in Fletcher's modification
(<NAME>., (1971): A Modified Marquardt Subroutine for
Nonlinear Least Squares. Rpt. AERE-R 6799, Harwell)
and implemented for MATLAB by <NAME>'s "LMFnlsq.m"
Args:
data (numpy.ndarray): A 2D matrix of dimension d-by-N, where d is the number of features and N is the number of cases
initial_center (numpy.ndarray): The intial guess of the center
small_circle (bool): True if the subsphere is parameterized by small circle
Returns:
center (numpy.ndarray): the vector of the center of the fitted subsphere
r (float): the radius of the fitted subsphere
"""
if initial_center is None:
initial_center = np.mean(data, axis=1)
def compute_residuals(x):
x = x[:, np.newaxis]
di = np.sqrt(np.sum((data - x) ** 2, axis=0))
r = np.pi / 2
if small_circle:
r = np.sum(di) / len(di)
di = di - r
return di
opt_solution = None
opt_solution = opt.least_squares(compute_residuals, initial_center, method='lm', max_nfev=50, xtol=1e-15)
# if small_circle:
# opt_solution = opt.least_squares(compute_residuals, initial_center, max_nfev=50, xtol=1e-9)
# else:
# opt_solution = opt.least_squares(compute_residuals, initial_center, method='lm', max_nfev=50, xtol=1e-9)
center = opt_solution.x
di = np.sqrt(np.sum((data - center[:, np.newaxis]) ** 2, axis=0))
if small_circle:
r = np.mean(di)
else:
r = np.pi / 2
return center, r
@staticmethod
def inv(resmat, coords):
"""
Invert PNS that converts Euclidean representation from PNS to coords in extrinsic coords
Args:
resmat (numpy.ndarray): Euclideanized features of dimension (k-1)-by-k from PNS.fit
coords (tuple): PNS configurations (subspheres) from PNS.fit
Returns:
T (numpy.ndarray): A d-by-N matrix representing with extrinsic coords, where d is the number of features in the embedding space and N is the number of cases
"""
d, n = resmat.shape
ns_orthaxis = np.flipud(np.array(coords['orthaxis'][:-1], dtype="object"))
ns_radius = np.flipud(np.array(coords['dist'], dtype="object"))
geodmean = coords['orthaxis'][-1]
res = resmat / np.flipud(coords['radii'])[:, np.newaxis]
## convert coords for S^1 (i.e., a circle)
## by adding the mean value to each residual (also degrees)
if d > 0:
T = np.vstack(( | np.cos(geodmean + res[0, :]) | numpy.cos |
# ******************************************************************************
# Copyright 2017-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
from tests import xfail_issue_40957
def test_concat():
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
axis = 0
expected = np.concatenate((a, b), axis=0)
runtime = get_runtime()
parameter_a = ng.parameter(list(a.shape), name="A", dtype=np.float32)
parameter_b = ng.parameter(list(b.shape), name="B", dtype=np.float32)
node = ng.concat([parameter_a, parameter_b], axis)
computation = runtime.computation(node, parameter_a, parameter_b)
result = computation(a, b)
assert np.allclose(result, expected)
@xfail_issue_40957
@pytest.mark.parametrize(
"val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))]
)
def test_constant_from_bool(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"val_type, value",
[
pytest.param(np.float32, np.float32(0.1234), marks=xfail_issue_40957),
pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_40957),
pytest.param(np.int8, np.int8(-63), marks=xfail_issue_40957),
pytest.param(np.int16, | np.int16(-12345) | numpy.int16 |
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
XC functional, the interface to xcfun (https://github.com/dftlibs/xcfun)
U. Ekstrom et al, J. Chem. Theory Comput., 6, 1971
'''
import copy
import ctypes
import math
import numpy
from pyscf import lib
_itrf = lib.load_library('libxcfun_itrf')
XC = XC_CODES = {
'SLATERX' : 0, # Slater LDA exchange
'VWN5C' : 1, # VWN5 LDA Correlation functional
'BECKEX' : 2, # Becke 88 exchange
'BECKECORRX' : 3, # Becke 88 exchange correction
'BECKESRX' : 4, # Short range Becke 88 exchange
'OPTX' : 5, # OPTX Handy & Cohen exchange
'LYPC' : 6, # LYP correlation
'PBEX' : 7, # PBE Exchange Functional
'REVPBEX' : 8, # Revised PBE Exchange Functional
'RPBEX' : 9, # RPBE Exchange Functional
'PBEC' : 10, # PBE correlation functional
'SPBEC' : 11, # sPBE correlation functional
'VWN_PBEC' : 12, # PBE correlation functional using VWN LDA correlation.
#'RANGESEP_MU' : 16, # Error function range separation parameter (1/a0)
'KTX' : 17, # KT exchange GGA correction
#'TFK' : 18, # Thomas-Fermi Kinetic Energy Functional
'PW91X' : 19, # Perdew-Wang 1991 GGA Exchange Functional
#'PW91K' : 20, # PW91 GGA Kinetic Energy Functional
'PW92C' : 21, # PW92 LDA correlation
'M05X' : 22, # M05 exchange
'M05X2X' : 23, # M05-2X exchange
'M06X' : 24, # M06 exchange
'M06X2X' : 25, # M06-2X exchange
'M06LX' : 26, # M06-L exchange
'M06HFX' : 27, # M06-HF exchange
'BRX' : 28, # BR exchange. Becke-Roussels exchange functional.
'M05X2C' : 29, # M05-2X Correlation
'M05C' : 30, # M05 Correlation
'M06C' : 31, # M06 Correlation
'M06LC' : 32, # M06-L Correlation
'M06X2C' : 33, # M06-2X Correlation
'TPSSC' : 34, # TPSS original correlation functional
'TPSSX' : 35, # TPSS original exchange functional
'REVTPSSC' : 36, # Revised TPSS correlation functional
'REVTPSSX' : 37, # Reviewed TPSS exchange functional
#
# alias
#
'SLATER' : 0, # SLATERX
'LDA' : 0, # SLATERX
'VWN' : 1, # VWN5C
'VWN5' : 1, # VWN5C
'B88' : 2, # BECKEX
'LYP' : 6, # LYP correlation
'P86' : None,
'BLYP' : 'BECKEX + LYP',
'BP86' : None,
'BPW91' : 'BECKEX + PW91C',
'BPW92' : 'BECKEX + PW92C',
'OLYP' : '2.4832*SLATER - 1.43169*OPTX + LYP', # CPL, 341, 319
'KT1' : '1.006*SLATER - .006*KTX + VWN5', # JCP, 119, 3015
'KT2' : '1.07773*SLATER - .006*KTX + 0.576727*VWN5', # JCP, 119, 3015
'KT3' : '2.021452*SLATER - .004*KTX - .925452*OPTX + .864409*LYP', # JCP, 121, 5654
'PBE0' : '.25*HF + .75*PBEX + PBEC', # JCP, 110, 6158
'PBE1PBE' : 'PBE0',
'B3PW91' : None,
'B3P86' : None,
# Note, use VWN5 for B3LYP. It is different to the libxc default B3LYP
'B3LYP' : 'B3LYP5',
'B3LYP5' : '.2*HF + .08*SLATER + .72*BECKE + .81*LYP + .19*VWN5',
'B3LYPG' : None, # B3LYP-VWN3 used by Gaussian and libxc
'O3LYP' : '.1161*HF + .1129*SLATER + .8133*OPTX + .81*LYP + .19*VWN5', # Mol. Phys. 99 607
'M062X' : 'M06X2X, M062XC',
'CAMB3LYP' : None,
}
LDA_IDS = set([0, 1, 13, 14, 15, 16, 18, 21])
GGA_IDS = set([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 17, 19, 20])
MGGA_IDS = set([22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 36, 37])
MLGGA_IDS = set([28])
HYB_XC = set(('PBE0' , 'PBE1PBE' , 'B3PW91' , 'B3P86' , 'B3LYP' ,
'B3LYPG' , 'O3LYP' , 'M062X' , 'CAMB3LYP',))
MAX_DERIV_ORDER = 3
def xc_type(xc_code):
if isinstance(xc_code, str):
hyb, fn_facs = parse_xc(xc_code)
else:
fn_facs = [(xc_code, 1)] # mimic fn_facs
if not fn_facs:
return 'HF'
elif all(xid in LDA_IDS for xid, val in fn_facs):
return 'LDA'
elif any(xid in MGGA_IDS or xid in MLGGA_IDS for xid, val in fn_facs):
return 'MGGA'
else:
# all((xid in GGA_IDS or xid in LDA_IDS for xid, val in fn_fns)):
# include hybrid_xc
return 'GGA'
def is_lda(xc_code):
return xc_type(xc_code) == 'LDA'
def is_hybrid_xc(xc_code):
if isinstance(xc_code, str):
return ('HF' in xc_code or xc_code in HYB_XC or
hybrid_coeff(xc_code) != 0)
elif isinstance(xc_code, int):
return False
else:
return any((is_hybrid_xc(x) for x in xc_code))
def is_meta_gga(xc_code):
return xc_type(xc_code) == 'MGGA'
def is_gga(xc_code):
return xc_type(xc_code) == 'GGA'
def max_deriv_order(xc_code):
hyb, fn_facs = parse_xc(xc_code)
return MAX_DERIV_ORDER
def test_deriv_order(xc_code, deriv, raise_error=False):
support = deriv <= max_deriv_order(xc_code)
if not support and raise_error:
raise NotImplementedError('xcfun library does not support derivative '
'order %d for %s' % (deriv, xc_code))
return support
def hybrid_coeff(xc_code, spin=0):
return parse_xc(xc_code)[0]
def parse_xc_name(xc_name):
fn_facs = parse_xc(xc_name)[1]
return fn_facs[0][0], fn_facs[1][0]
def parse_xc(description):
'''Rules to input functional description:
* The given functional description must be a one-line string.
* The functional description is case-insensitive.
* The functional description string has two parts, separated by ",". The
first part describes the exchange functional, the second is the correlation
functional.
- If "," was not appeared in string, the entire string is considered as
X functional.
- To neglect X functional (just apply C functional), leave blank in the
first part, eg description=',vwn' for pure VWN functional
* The functional name can be placed in arbitrary order. Two name needs to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not in support.
* A functional name is associated with one factor. If the factor is not
given, it is assumed equaling 1.
* String "HF" stands for exact exchange (HF K matrix). It is allowed to
put in C functional part.
* Be careful with the xcfun convention on GGA functional, in which the LDA
contribution is included.
'''
if isinstance(description, int):
return 0, ((description, 1.))
elif not isinstance(description, str): #isinstance(description, (tuple,list)):
return parse_xc('%s,%s' % tuple(description))
if ',' in description:
x_code, c_code = description.replace(' ','').replace('_','').upper().split(',')
else:
x_code, c_code = description.replace(' ','').replace('_','').upper(), ''
hyb = [0]
fn_facs = []
def parse_token(token, suffix):
if token:
if '*' in token:
fac, key = token.split('*')
if fac[0].isalpha():
fac, key = key, fac
fac = float(fac)
else:
fac, key = 1, token
if key == 'HF':
hyb[0] += fac
elif key.isdigit():
fn_facs.append((int(key), fac))
else:
if key in XC_CODES:
x_id = XC_CODES[key]
elif key+suffix in XC_CODES:
x_id = XC_CODES[key+suffix]
else:
raise KeyError('Unknown key %s' % key)
if isinstance(x_id, str):
hyb1, fn_facs1 = parse_xc(x_id)
# Recursively scale the composed functional, to support '0.5*b3lyp'
hyb[0] += hyb1 * fac
fn_facs.extend([(xid, c*fac) for xid, c in fn_facs1])
elif x_id is None:
raise NotImplementedError(key)
else:
fn_facs.append((x_id, fac))
def remove_dup(fn_facs):
fn_ids = []
facs = []
n = 0
for key, val in fn_facs:
if key in fn_ids:
facs[fn_ids.index(key)] += val
else:
fn_ids.append(key)
facs.append(val)
n += 1
return list(zip(fn_ids, facs))
for token in x_code.replace('-', '+-').split('+'):
parse_token(token, 'X')
for token in c_code.replace('-', '+-').split('+'):
parse_token(token, 'C')
return hyb[0], remove_dup(fn_facs)
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):
r'''Interface to call xcfun library to evaluate XC functional, potential
and functional derivatives.
See also :func:`pyscf.dft.libxc.eval_xc`
'''
hyb, fn_facs = parse_xc(xc_code)
return _eval_xc(fn_facs, rho, spin, relativity, deriv, verbose)
XC_D0 = 0
XC_D1 = 1
XC_D2 = 2
XC_D3 = 3
XC_D4 = 4
XC_D00 = 0
XC_D10 = 1
XC_D01 = 2
XC_D20 = 3
XC_D11 = 4
XC_D02 = 5
XC_D30 = 6
XC_D21 = 7
XC_D12 = 8
XC_D03 = 9
XC_D40 = 10
XC_D31 = 11
XC_D22 = 12
XC_D13 = 13
XC_D04 = 14
XC_D000 = 0
XC_D100 = 1
XC_D010 = 2
XC_D001 = 3
XC_D200 = 4
XC_D110 = 5
XC_D101 = 6
XC_D020 = 7
XC_D011 = 8
XC_D002 = 9
XC_D300 = 10
XC_D210 = 11
XC_D201 = 12
XC_D120 = 13
XC_D111 = 14
XC_D102 = 15
XC_D030 = 16
XC_D021 = 17
XC_D012 = 18
XC_D003 = 19
XC_D400 = 20
XC_D310 = 21
XC_D301 = 22
XC_D220 = 23
XC_D211 = 24
XC_D202 = 25
XC_D130 = 26
XC_D121 = 27
XC_D112 = 28
XC_D103 = 29
XC_D040 = 30
XC_D031 = 31
XC_D022 = 32
XC_D013 = 33
XC_D004 = 34
XC_D00000 = 0
XC_D10000 = 1
XC_D01000 = 2
XC_D00100 = 3
XC_D00010 = 4
XC_D00001 = 5
XC_D20000 = 6
XC_D11000 = 7
XC_D10100 = 8
XC_D10010 = 9
XC_D10001 = 10
XC_D02000 = 11
XC_D01100 = 12
XC_D01010 = 13
XC_D01001 = 14
XC_D00200 = 15
XC_D00110 = 16
XC_D00101 = 17
XC_D00020 = 18
XC_D00011 = 19
XC_D00002 = 20
XC_D30000 = 21
XC_D21000 = 22
XC_D20100 = 23
XC_D20010 = 24
XC_D20001 = 25
XC_D12000 = 26
XC_D11100 = 27
XC_D11010 = 28
XC_D11001 = 29
XC_D10200 = 30
XC_D10110 = 31
XC_D10101 = 32
XC_D10020 = 33
XC_D10011 = 34
XC_D10002 = 35
XC_D03000 = 36
XC_D02100 = 37
XC_D02010 = 38
XC_D02001 = 39
XC_D01200 = 40
XC_D01110 = 41
XC_D01101 = 42
XC_D01020 = 43
XC_D01011 = 44
XC_D01002 = 45
XC_D00300 = 46
XC_D00210 = 47
XC_D00201 = 48
XC_D00120 = 49
XC_D00111 = 50
XC_D00102 = 51
XC_D00030 = 52
XC_D00021 = 53
XC_D00012 = 54
XC_D00003 = 55
XC_D40000 = 56
XC_D31000 = 57
XC_D30100 = 58
XC_D30010 = 59
XC_D30001 = 60
XC_D22000 = 61
XC_D21100 = 62
XC_D21010 = 63
XC_D21001 = 64
XC_D20200 = 65
XC_D20110 = 66
XC_D20101 = 67
XC_D20020 = 68
XC_D20011 = 69
XC_D20002 = 70
XC_D13000 = 71
XC_D12100 = 72
XC_D12010 = 73
XC_D12001 = 74
XC_D11200 = 75
XC_D11110 = 76
XC_D11101 = 77
XC_D11020 = 78
XC_D11011 = 79
XC_D11002 = 80
XC_D10300 = 81
XC_D10210 = 82
XC_D10201 = 83
XC_D10120 = 84
XC_D10111 = 85
XC_D10102 = 86
XC_D10030 = 87
XC_D10021 = 88
XC_D10012 = 89
XC_D10003 = 90
XC_D04000 = 91
XC_D03100 = 92
XC_D03010 = 93
XC_D03001 = 94
XC_D02200 = 95
XC_D02110 = 96
XC_D02101 = 97
XC_D02020 = 98
XC_D02011 = 99
XC_D02002 = 100
XC_D01300 = 101
XC_D01210 = 102
XC_D01201 = 103
XC_D01120 = 104
XC_D01111 = 105
XC_D01102 = 106
XC_D01030 = 107
XC_D01021 = 108
XC_D01012 = 109
XC_D01003 = 110
XC_D00400 = 111
XC_D00310 = 112
XC_D00301 = 113
XC_D00220 = 114
XC_D00211 = 115
XC_D00202 = 116
XC_D00130 = 117
XC_D00121 = 118
XC_D00112 = 119
XC_D00103 = 120
XC_D00040 = 121
XC_D00031 = 122
XC_D00022 = 123
XC_D00013 = 124
XC_D00004 = 125
XC_D0000000 = 0
XC_D1000000 = 1
XC_D0100000 = 2
XC_D0010000 = 3
XC_D0001000 = 4
XC_D0000100 = 5
XC_D0000010 = 6
XC_D0000001 = 7
XC_D2000000 = 8
XC_D1100000 = 9
XC_D1010000 = 10
XC_D1001000 = 11
XC_D1000100 = 12
XC_D1000010 = 13
XC_D1000001 = 14
XC_D0200000 = 15
XC_D0110000 = 16
XC_D0101000 = 17
XC_D0100100 = 18
XC_D0100010 = 19
XC_D0100001 = 20
XC_D0020000 = 21
XC_D0011000 = 22
XC_D0010100 = 23
XC_D0010010 = 24
XC_D0010001 = 25
XC_D0002000 = 26
XC_D0001100 = 27
XC_D0001010 = 28
XC_D0001001 = 29
XC_D0000200 = 30
XC_D0000110 = 31
XC_D0000101 = 32
XC_D0000020 = 33
XC_D0000011 = 34
XC_D0000002 = 35
XC_D3000000 = 36
XC_D2100000 = 37
XC_D2010000 = 38
XC_D2001000 = 39
XC_D2000100 = 40
XC_D2000010 = 41
XC_D2000001 = 42
XC_D1200000 = 43
XC_D1110000 = 44
XC_D1101000 = 45
XC_D1100100 = 46
XC_D1100010 = 47
XC_D1100001 = 48
XC_D1020000 = 49
XC_D1011000 = 50
XC_D1010100 = 51
XC_D1010010 = 52
XC_D1010001 = 53
XC_D1002000 = 54
XC_D1001100 = 55
XC_D1001010 = 56
XC_D1001001 = 57
XC_D1000200 = 58
XC_D1000110 = 59
XC_D1000101 = 60
XC_D1000020 = 61
XC_D1000011 = 62
XC_D1000002 = 63
XC_D0300000 = 64
XC_D0210000 = 65
XC_D0201000 = 66
XC_D0200100 = 67
XC_D0200010 = 68
XC_D0200001 = 69
XC_D0120000 = 70
XC_D0111000 = 71
XC_D0110100 = 72
XC_D0110010 = 73
XC_D0110001 = 74
XC_D0102000 = 75
XC_D0101100 = 76
XC_D0101010 = 77
XC_D0101001 = 78
XC_D0100200 = 79
XC_D0100110 = 80
XC_D0100101 = 81
XC_D0100020 = 82
XC_D0100011 = 83
XC_D0100002 = 84
XC_D0030000 = 85
XC_D0021000 = 86
XC_D0020100 = 87
XC_D0020010 = 88
XC_D0020001 = 89
XC_D0012000 = 90
XC_D0011100 = 91
XC_D0011010 = 92
XC_D0011001 = 93
XC_D0010200 = 94
XC_D0010110 = 95
XC_D0010101 = 96
XC_D0010020 = 97
XC_D0010011 = 98
XC_D0010002 = 99
XC_D0003000 = 100
XC_D0002100 = 101
XC_D0002010 = 102
XC_D0002001 = 103
XC_D0001200 = 104
XC_D0001110 = 105
XC_D0001101 = 106
XC_D0001020 = 107
XC_D0001011 = 108
XC_D0001002 = 109
XC_D0000300 = 110
XC_D0000210 = 111
XC_D0000201 = 112
XC_D0000120 = 113
XC_D0000111 = 114
XC_D0000102 = 115
XC_D0000030 = 116
XC_D0000021 = 117
XC_D0000012 = 118
XC_D0000003 = 119
def _eval_xc(fn_facs, rho, spin=0, relativity=0, deriv=1, verbose=None):
assert(deriv < 4)
if spin == 0:
rho_u = rho_d = numpy.asarray(rho, order='C')
else:
rho_u = numpy.asarray(rho[0], order='C')
rho_d = numpy.asarray(rho[1], order='C')
if rho_u.ndim == 2:
ngrids = rho_u.shape[1]
else:
ngrids = len(rho_u)
fn_ids = [x[0] for x in fn_facs]
facs = [x[1] for x in fn_facs]
if all((is_lda(x) for x in fn_ids)): # LDA
if spin == 0:
nvar = 1
else:
nvar = 2
elif any((is_meta_gga(x) for x in fn_ids)):
raise RuntimeError('xcfun MGGA interface not correct')
if spin == 0:
nvar = 3
else:
nvar = 7
else: # GGA
if spin == 0:
nvar = 2
else:
nvar = 5
outlen = (math.factorial(nvar+deriv) //
(math.factorial(nvar) * math.factorial(deriv)))
outbuf = | numpy.empty((ngrids,outlen)) | numpy.empty |
import skimage.feature
import skimage.transform
import skimage.filters
import scipy.interpolate
import scipy.ndimage
import scipy.spatial
import scipy.optimize
import numpy as np
import pandas
import plot
class ParticleFinder:
def __init__(self, image):
"""
Class for finding circular particles
:param image:
"""
self.image = image
self.n = 100
self.size_range = (5, 30)
self.mean = np.mean(self.image)
self.min = np.min(self.image)
self.max = np.max(self.image)
def locate_particles(self, n=100, size_range=(5, 30)):
"""
Find circular particles in the image
:param size_range:
:rtype : pandas.DataFrame
:param n:
:return:
"""
self.n = int(np.round(n))
self.size_range = size_range
# 1. Detect blobs in image
blobs = self.locate_circles()
if blobs.empty:
return pandas.DataFrame(columns=['r', 'y', 'x', 'dev'])
# 2. Find circles
fit = pandas.DataFrame(columns=['r', 'y', 'x', 'dev'])
for i, blob in blobs.iterrows():
fit = pandas.concat([fit, self.find_circle(blob)], ignore_index=True)
return fit
def locate_circles(self):
"""
Locate blobs in the image by using a Laplacian of Gaussian method
:rtype : pandas.DataFrame
:return:
"""
radii = np.linspace(self.size_range[0], self.size_range[1],
num=min(abs(self.size_range[0] - self.size_range[1]) * 2.0, 30), dtype=np.float)
# Find edges
edges = skimage.feature.canny(self.image)
circles = skimage.transform.hough_circle(edges, radii)
fit = pandas.DataFrame(columns=['r', 'y', 'x', 'accum'])
for radius, h in zip(radii, circles):
peaks = skimage.feature.peak_local_max(h, threshold_rel=0.5, num_peaks=self.n)
accumulator = h[peaks[:, 0], peaks[:, 1]]
fit = pandas.concat(
[fit, pandas.DataFrame(data={'r': [radius] * peaks.shape[0], 'y': peaks[:, 0], 'x': peaks[:, 1],
'accum': accumulator})], ignore_index=True)
fit = self.merge_hough_same_values(fit)
return fit
@staticmethod
def flatten_multi_columns(col):
"""
:param col:
:param sep:
:return:
"""
if not type(col) is tuple:
return col
else:
return col[0]
def merge_hough_same_values(self, data):
"""
:param data:
:return:
"""
while True:
# Rescale positions, so that pairs are identified below a distance
# of 1. Do so every iteration (room for improvement?)
positions = data[['x', 'y']].values
mass = data['accum'].values
duplicates = scipy.spatial.cKDTree(positions, 30).query_pairs(np.mean(data['r']), p=2.0, eps=0.1)
if len(duplicates) == 0:
break
to_drop = []
for pair in duplicates:
# Drop the dimmer one.
if np.equal(*mass.take(pair, 0)):
# Rare corner case: a tie!
# Break ties by sorting by sum of coordinates, to avoid
# any randomness resulting from cKDTree returning a set.
dimmer = np.argsort(np.sum(positions.take(pair, 0), 1))[0]
else:
dimmer = np.argmin(mass.take(pair, 0))
to_drop.append(pair[dimmer])
data.drop(to_drop, inplace=True)
# Keep only brightest n circles
data = data.sort_values(by=['accum'], ascending=False)
data = data.head(self.n)
return data
def find_circle(self, blob):
"""
Find a circle based on the blob
:rtype : pandas.DataFrame
:param blob:
:return:
"""
# Get intensity in spline representation
rad_range = (-blob.r, blob.r)
intensity, (x, y, step_x, step_y) = self.get_intensity_interpolation(blob, rad_range)
if not self.check_intensity_interpolation(intensity):
return pandas.DataFrame(columns=['r', 'y', 'x', 'dev'])
# Find the coordinates of the edge
edge_coords = self.find_edge(intensity)
if np.isnan(edge_coords.x).any():
return pandas.DataFrame(columns=['r', 'y', 'x', 'dev'])
# Set outliers to mean of rest of x coords
edge_coords = self.remove_outliers(edge_coords)
# Convert to cartesian
coords = self.spline_coords_to_cartesian(edge_coords, rad_range, x, y, step_x, step_y)
# Fit the circle
fit = self.fit_circle(coords)
return fit
def get_intensity_interpolation(self, blob, rad_range):
"""
Create a spline representation of the intensity
:param r:
:param xc:
:param yc:
:param n:
:param rad_range:
:param spline_order:
:return:
"""
n = int(np.round(2 * np.pi * np.sqrt(blob.r ** 2)))
spline_order = 3
t = np.linspace(-np.pi, np.pi, n, endpoint=False)
normal_angle = np.arctan2(blob.r * np.sin(t), blob.r * np.cos(t))
x = blob.r * np.cos(t) + blob.x
y = blob.r * | np.sin(t) | numpy.sin |
from __future__ import division
import torch
import torch.nn.functional as F
from utils import setup_logger
from model import agentNET
from torch.autograd import Variable
from env import *
import numpy as np
import time
import random
S_INFO = 6 # bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end
S_LEN = 8 # take how many frames in the past
A_DIM = 6
NUM_AGENTS = 1
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
HD_REWARD = [1, 2, 3, 12, 15, 20]
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
M_IN_K = 1000.0
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps
SMOOTH_PENALTY = 1
DEFAULT_QUALITY = 0 # default video quality without agent
TEST_INTERVAL = 70
def test(args, shared_model, all_cooked_time, all_cooked_bw):
logger = setup_logger("test_log", "./logs/test_log")
torch.manual_seed(args.seed)
env = Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw,
random_seed=50
)
model = agentNET()
model.eval()
test_time = 0
reward_num = 0
max_reward = 0
time_stamp = 0
last_bit_rate = DEFAULT_QUALITY
bit_rate = DEFAULT_QUALITY
while True:
model.load_state_dict(shared_model.state_dict())
if args.gpu:
model = model.cuda()
cx = Variable(torch.zeros(1, 96).cuda())
hx = Variable(torch.zeros(1, 96).cuda())
else:
cx = Variable(torch.zeros(1, 96))
hx = Variable(torch.zeros(1, 96))
state = np.zeros([S_INFO, S_LEN])
for i in range(S_LEN):
# do an default action
bit_rate = random.randint(0, 5)
delay, sleep_time, buffer_size, rebuf, \
video_chunk_size, next_video_chunk_sizes, \
end_of_video, video_chunk_remain = \
env.get_video_chunk(bit_rate)
time_stamp += delay # in ms
time_stamp += sleep_time # in ms
# get new state
state[0][i] = VIDEO_BIT_RATE[last_bit_rate] / float( | np.max(VIDEO_BIT_RATE) | numpy.max |
#Core Imports for experiments
import shap
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
import random
import itertools
from statistics import mean
from sklearn.datasets import make_blobs
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import StandardScaler
from operator import truediv
from collections import defaultdict
import time
#Imports for saving exp files
from datetime import datetime
import json
import pickle
#Imports to parallelize experiment iterations
from multiprocessing import cpu_count
from multiprocessing import Pool
#DIFFI Imports
import utils
import sklearn_mod_functions
import interpretability_module as interp
#Suppress deprecation warnings
import warnings
warnings.filterwarnings("ignore")
#Local DIFFI
def generate_diffi_explanations(model, dataset):
"""
Returns a list of Local DIFFI explanation vectors.
Also, returns lists of ordering indices (for plotting) and execution time
per instance, for the passed dataset.
Parameters
----------
model : Isolation Forest
The fit Isolation Forest instance
dataset : numpy array
The instances that need to be explained
"""
#Produce the explanation vectors as per official DIFFI function
diffi_l, ord_idx_l, exec_time_l = utils.local_diffi_batch(model, dataset)
return diffi_l, ord_idx_l, exec_time_l
#SHAP
def generate_shap_explanations(model, dataset):
"""
Returns a list of SHAP explanation vectors.
Also, returns lists of ordering indices (for plotting) and execution time
per instance, for the passed dataset.
Parameters
----------
model : Isolation Forest
The fit Isolation Forest instance
dataset : numpy array
The instances that need to be explained
"""
##Initialize the result lists
shap_values_l = []
ord_idx_l= []
exec_time_l = []
#Iterate over each passed instance
for i, data in enumerate(dataset):
#Start tracking execution time
start = time.time()
#Generate the SHAP explanation for an instance
explainer = shap.TreeExplainer(model, feature_perturbation='tree_path_dependent')
shap_values = explainer.shap_values(data)
#Stop tracking execution time
end = time.time()
exec_time = end - start
#Append the results to the results list
shap_values_l.append(shap_values)
ord_idx_l.append(np.argsort(shap_values)[::-1])
exec_time_l.append(exec_time)
return shap_values_l, ord_idx_l, exec_time_l
"""
Experiment Functions
As per the methodology outlined in the overview, this section houses the relevant functions to:
1. Generate normal datasets.
2. Convert randomly chosen normal instances from the dataset to anomalies.
3. Compute and normalize the difference between expected and actual explanation vectors.
4. Generate random explanations as a baseline
5. Generate ground truth for explanations of selected anomalies
6. Evaluate RMSE loss between actual and expected explanation vectors.
7. Run the experiments.
8. Save the results.
9. Plot the results.
"""
#Generating the normal dataset
def generate_normal_points(points=1000, dimensionality=2, clusters=1, max=20, random_state=42):
"""
Returns a dataset of normal instances.
Normal instances are grouped into a certain number of clusters.
Parameters
----------
points : int
The number of normal instances to be produced
dimensionality : int
The number of features of the dataset
clusters : int
The number of clusters normal instances are grouped into
max : int
A variable that controls range of values from which samples are drawn
random_state: int
Setting the random seed for the sampling process
"""
X, _ = make_blobs(n_samples=points, n_features=dimensionality,
centers=clusters, random_state=random_state,
center_box=(0, max))
return X
#Anomalising a passed dataset
def anomaliser(dataset, indices, n=1, r_mode='t'):
"""
Converts the passed normal dataset into an anomalised dataset.
For the passed instances, certain attributes are randomly but systematically
picked and their values along these attributes are set to be anomalous.
Parameters
----------
dataset: numpy array
The normal dataset
indices: numpy vector
The indices of the normal instances, picked to be anomalised
n : int
The features of the normal dataset
r_mode : str
(Ignore) A flag used to control the behaviour of the anomaliser
"""
#For each index, randomly pick a feature OR a combination of n features
N = range(0, n)
if r_mode == 't':
setting_l = random.choices(N, k=len(indices))
else:
setting_l = [n-1] * len(indices)
#Generate a list of list of attributes, where each inner list contains
#the attributes along which the instances will be anomalised across
picked_l = [random.sample(range(dataset.shape[1]), setting+1) for setting in setting_l]
#Make the instances anomalous across those feature(s). We use noise in the
#multiplicand to ensure that the anomalous points do not have same values
ano_rand_points = np.array([[np.random.normal(3, 0.3) * np.amax(dataset[:,el]) if el in picked
else dataset[index, el] for el in range(dataset.shape[1])]
for (index, picked) in zip(indices, picked_l)])
#Create a copy of the dataset and save the points
new_dataset=np.copy(dataset)
new_dataset[indices]=ano_rand_points
return new_dataset, setting_l, picked_l
#Generating the normalised difference vector
def generate_normed_diff(m1, m2):
"""
Returns the normalised row-wise difference of 2 matrix.
Parameters
----------
m1 : numpy matrix
The first matrix
m2 : numpy matrix
The second matrix, which is to be subtracted from the first matrix
"""
#Calculate the difference between the 2 matrices
diff = m2 - m1
#We make negative elements in the difference matrix to be positive.
#This is to make normalization possible as otherwise, all positive
#elements will be set to 1 and all negative elements will be set to 0.
pos_diff = np.abs(diff)
#Normalize the difference matrix
row_sum = np.sum(pos_diff, axis=1)
norm_diff = pos_diff / row_sum[:, np.newaxis]
return diff, pos_diff, norm_diff
#Model a random explainer
def random_explainer(rand_indices, n_features):
"""
Returns a matrix of row-normalized random vectors.
Parameters
----------
rand_indices: numpy vector
The indices of the normal instances, picked to be anomalised
n_features: int
The number of features of the instances
"""
#Sample from a uniform distribution to get values between 0 and 1
uniform = np.random.uniform(0, 1, size=(len(rand_indices), n_features))
#Normalise them to get a probability distribution
random_explanations = uniform/np.sum(uniform, axis=1)[:, np.newaxis]
return random_explanations
#Generate ground truth
def generate_ground_truth(rand_indices, picked_feature_l, n_features):
"""
Generate ground truth from the list of picked features.
If only 1 feature is picked, that receives a value of 1, rest 0
If 2 features are picked, both receive a value of 0.5, rest 0. So on.
Parameters
----------
rand_indices : numpy vector
The indices of the normal instances, picked to be anomalised
picked_feature_l: list
The list of picked (list of) attributes for each picked normal instance
n_features: int
The number of features of the instances
"""
ground_truth = np.zeros(shape=(len(rand_indices), n_features))
for i, picked in enumerate(picked_feature_l):
ground_truth[i, picked] = 1/len(picked)
return ground_truth
#Evaluate loss
def evaluate_loss(model_outputs, ground_truth, n_ano_points):
"""
Return the RMSE loss between Isolation Forest outputs and ground truth.
Parameters
----------
model_outputs: numpy matrix
The anomaly scores for those anomalous instances by the fit IF model
ground_truth: numpy matrix
The generated ground truth
n_ano_points: int
The number of normal instances, picked to be anomalised
"""
#Initialize the result list
rmse_l = []
#Iterate over each instance and compare with ground truth
for (output, label) in zip(model_outputs, ground_truth):
#Calculate RMSE loss for the instance
rmse = np.linalg.norm(label - output)
rmse_l.append(rmse)
#Append the results to the results list
total_rmse_loss = np.sum(rmse_l)/np.sqrt(n_ano_points)
return total_rmse_loss
#Main experiment function per iteration
def run_experiment_single(iter_count, exp_string, n_points=None, n_features=None, n=None,
data=None, n_est=100, contam_rate=0.1, r_mode='t', max_samples=None):
"""
Runs an experiment for a single instance.
We are interested in running experiments that study the influence of
increasing dataset size, dimensionality and number of features. Additionally,
we also want to study how the AWS method fares to SHAP and DIFFI fordifferent feature subsets in real
datasets mentioned in the DIFFI paper.
Parameters
----------
iter_count : int
The iteration count for printing purposes.
exp_string : str
The codified flag that communicates the experiment we are executing.
Codebook:
syn1 - Experiment to study the effect of increasing dataset size
syn2 - Experiment to study the effect of increasing dimensionality
syn3 - Experiment to study the effect of increasing anomalised features
real1 & real2 - Experiment to study real-world datasets
n_points : int, bool
The size of the real world dataset. None for synthetic.
n_features : int, bool
The number of features of the real world dataset. None for synthetic.
n : int, bool
The number of features that can be picked to anomalise. None for synthetic.
data : Pandas dataframe
The real world dataset. None for synthetic.
n_est : int
The number of Isolation Trees in the Forest
contam_rate: float
The proportion of instances that are anomalous
r_mode : str
(Ignore) A flag used to control the behaviour of the anomaliser
max_samples: int
The bootstrap sample size for each Isolation Tree of the Forest
"""
#Initialize the result dictionary
single_result_dict = {'index': iter_count}
#Use iter_count to initialize subprocess RNG instance
rng = np.random.RandomState(iter_count)
#Print status updates
if exp_string == 'syn1':
print(f"[INFO] Iteration {iter_count+1}: Dataset Size - {n_points}...")
elif exp_string == 'syn2':
print(f"[INFO] Iteration {iter_count+1}: Dataset Dimensionality - {n_features} features...")
elif exp_string == 'syn3':
print(f"[INFO] Iteration {iter_count+1}: Upto {n} features anomalised for dataset of {n_points:,} samples with {n_features} features...")
elif exp_string[:5] == 'real1':
print(f"[INFO] Iteration {iter_count+1}: {exp_string[6:]} dataset ({data.shape[0]} samples and {data.shape[1]} features)...")
else:
print(f"[INFO] Iteration {iter_count+1}: Upto {n} features anomalised for {exp_string[6:]} dataset ({data.shape[0]} samples and {data.shape[1]} features)...")
#Generate synthetic dataset for synthetic experiments
if isinstance(data, type(None)):
n_clusters= n_features // 2
max_box = 20 * (n_clusters//4 + 1)
data = generate_normal_points(points=n_points, dimensionality=n_features,
clusters=n_clusters, max=max_box, random_state=rng)
single_result_dict['orig_dataset'] = data
max_samples = min(256, data.shape[0])
n = data.shape[1]
data = pd.DataFrame(data)
else:
single_result_dict['orig_dataset'] = []
#Pick n_ano_points instances at random from the dataset
n_ano_points = int(np.ceil(contam_rate * data.shape[0]))
rand_indices = rng.randint(low=0, high=int(data.shape[0]), size=n_ano_points)
single_result_dict['rand_indices'] = rand_indices
#Fit the model to the dataset
clf_orig = IsolationForest(n_estimators=n_est, max_samples=max_samples, random_state=rng)
clf_orig.fit(data.values)
#Generate original explanation matrices
orig_aws_l, _, orig_aws_exec_time_l = generate_aws_explanations(clf_orig, data.values[rand_indices])
orig_aws_clem_l, _, orig_aws_clem_exec_time_l = generate_aws_explanations(clf_orig, data.values[rand_indices], mode='clement')
orig_aws_dif_l, _, orig_aws_dif_exec_time_l = generate_aws_explanations(clf_orig, data.values[rand_indices], mode='diffi')
orig_shap_l, _, orig_shap_exec_time_l = generate_shap_explanations(clf_orig, data.values[rand_indices])
orig_diffi_l, _, orig_diffi_exec_time_l = generate_diffi_explanations(clf_orig, data.values[rand_indices])
#Convert the list to numpy array
orig_aws_exp_matrix = | np.array(orig_aws_l) | numpy.array |
#########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import pandas as pd
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndividualPlots = True
from matplotlib import pyplot as plt
def file_len(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):
Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)
return Phs.decompose().to(units.day)
#similar to field, but limiting by the hard-soft boundary
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
def RagNormal(x, cdf = False):
mean = 5.03
std = 2.28
if (cdf):
return scipy.stats.norm.cdf(x,mean,std)
return scipy.stats.norm.pdf(x,mean,std)
def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']):
c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)
c2 = '#A62B1F' #Dai Red
c3 = '#BF8A26' #Dali Beige
fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2
histAll = np.insert(histAll,0,0)
histObs = np.insert(histObs,0,0)
for f in filters:
histRec[f] = np.insert(histRec[f],0,0)
#PDF
ax1.step(bin_edges, histAll/np.sum(histAll), color=c1)
ax1.step(bin_edges, histObs/np.sum(histObs), color=c2)
for f in filters:
lw = 1
if (f == 'all'):
lw = 0.5
ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw)
ax1.set_ylabel('PDF')
ax1.set_yscale('log')
ax1.set_title('Globular Clusters - Baseline', fontsize = 16)
ax1.set_xlabel(xtitle)
#CDF
#cdfAll = []
#cdfObs = []
#cdfRec = dict()
#for f in filters:
# cdfRec[f] = []
# for i in range(len(histAll)):
# cdfAll.append(np.sum(histAll[:i])/np.sum(histAll))
# for i in range(len(histObs)):
# cdfObs.append(np.sum(histObs[:i])/np.sum(histObs))
# for f in filters:
# for i in range(len(histRec[f])):
# cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f]))
#ax2.step(bin_edges, cdfAll, color=c1)
#ax2.step(bin_edges, cdfObs, color=c2)
#for f in filters:
# lw = 1
# if (f == 'all'):
# lw = 0.5
# ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw)
#ax2.set_ylabel('CDF')
#ax2.set_xlabel(xtitle)
fig.subplots_adjust(hspace=0)
fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight')
#write to a text file
with open('./eblsst_files/' + fname+'.csv','w') as fl:
outline = 'binEdges,histAll,histObs'
for f in filters:
outline += ','+f+'histRec'
outline += '\n'
fl.write(outline)
for i in range(len(bin_edges)):
outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])
for f in filters:
outline += ','+str(histRec[f][i])
outline += '\n'
fl.write(outline)
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']
#get the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#assumed mean stellar mass
mMean = 0.5
#minimum number of lines to consider in file
Nlim = 3
if (doIndividualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa dataframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - append everything to these
peccAll = pd.DataFrame(columns = ['e', 'p'])
peccObs = pd.DataFrame(columns = ['e', 'p'])
peccRec = pd.DataFrame(columns = ['e', 'p'])
#Read in all the data and make the histograms
d = "./input_files/"
files = os.listdir(d)
IDs = []
for i, f in enumerate(files):
print(round(i/len(files),4), f)
fl = file_len(d+f)
if (fl >= 4):
#read in the header
header = pd.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header['clusterMass'][0]/mMean
#Nmult = 1.
RA.append(header['OpSimRA'])
Dec.append(header['OpSimDec'])
#read in rest of the file
data = pd.read_csv(d+f, header = 2).fillna(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
NallPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.append(prsa['e'].values)
pAll.append(prsa['p'].values)
NallPrsa = len(prsa.index)
if (Nall >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndividualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal( | np.log10(Phs) | numpy.log10 |
import time
import sys
import json
import argparse
from tqdm import trange
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import numpy as np
from scipy.spatial.distance import jensenshannon
import gym
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.ticker import MaxNLocator
from matplotlib.lines import Line2D
import pandemic_simulator as ps
from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType
from pandemic_simulator.environment.interfaces import InfectionSummary
from pandemic_simulator.viz import PandemicViz
from pandemic_simulator.environment import PandemicSimOpts
from stable_baselines3.common import base_class
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
def hellinger(p, q):
# distance between p and q
# p and q are np array probability distributions
return (1.0 / np.sqrt(2.0)) * np.sqrt(np.sum(np.square(np.sqrt(p) - np.sqrt(q)), axis=1))
def evaluate_policy(
name: str,
model: "base_class.BaseAlgorithm",
base_model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 32,
deterministic: bool = True,
render: bool = False,
viz: Optional[PandemicViz] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
different elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env])
episode_rewards = []
reward_std = []
episode_true_rewards = []
true_reward_std = []
episode_true_rewards2 = []
true_reward_std2 = []
vfs = []
log_probs = []
ents = []
base_vfs = []
base_log_probs = []
base_ents = []
kls = []
js = []
h = []
numpy_obs = env.reset()
states = None
for t in range(200):
actions, states = model.predict(numpy_obs, state=states, deterministic=True)
vf, logp, ent = model.policy.evaluate_actions(torch.as_tensor(numpy_obs), torch.as_tensor(actions))
base_vf, base_logp, base_ent = base_model.policy.evaluate_actions(torch.as_tensor(numpy_obs), torch.as_tensor(actions))
vfs.append(torch.mean(vf).detach().item())
log_probs.append(torch.mean(logp).detach().item())
ents.append(torch.mean(ent).detach().item())
base_vfs.append(torch.mean(base_vf).detach().item())
base_log_probs.append(torch.mean(base_logp).detach().item())
base_ents.append(torch.mean(base_ent).detach().item())
# Distances
log_ratio = logp - base_logp
# Estimator of KL from http://joschu.net/blog/kl-approx.html
kls.append(torch.mean(torch.exp(log_ratio) - 1 - log_ratio).item())
latent_pi, _, latent_sde = model.policy._get_latent(torch.as_tensor(numpy_obs))
model_dist = model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().numpy()
latent_pi, _, latent_sde = base_model.policy._get_latent(torch.as_tensor(numpy_obs))
base_dist = base_model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().numpy()
js.append(np.mean(jensenshannon(model_dist, base_dist, axis=1)).item())
h.append(np.mean(hellinger(model_dist, base_dist)).item())
numpy_obs, _, done, info = env.step(actions)
rew = env.get_attr("last_reward")
true_rew = env.get_attr("get_true_reward")
true_rew2 = env.get_attr("get_true_reward2")
episode_rewards.append(np.mean(rew))
reward_std.append(rew)
episode_true_rewards.append(np.mean(true_rew))
true_reward_std.append(true_rew)
episode_true_rewards2.append(np.mean(true_rew2))
true_reward_std2.append(true_rew2)
obs = env.get_attr("observation")
infection_data = np.zeros((1, 5))
threshold_data = np.zeros(len(obs))
for o in obs:
infection_data += o.global_infection_summary[-1]
gis = np.array([o.global_infection_summary[-1] for o in obs]).squeeze(1)
gts = np.array([o.global_testing_summary[-1] for o in obs]).squeeze(1)
stage = np.array([o.stage[-1].item() for o in obs])
if viz:
viz.record_list(obs[0], gis, gts, stage, rew, true_rew, true_rew2=true_rew2)
reward = np.sum(episode_rewards).item()
true_reward = np.sum(episode_true_rewards).item()
true_reward2 = | np.sum(episode_true_rewards2) | numpy.sum |
from ..meshio import form_mesh
import numpy as np
import logging
def merge_meshes(input_meshes):
""" Merge multiple meshes into a single mesh.
Args:
input_meshes (``list``): a list of input :class:`Mesh` objects.
Returns:
A :py:class:`Mesh` consists of all vertices, faces and voxels
from ``input_meshes``. The following mesh attributes are defined:
* ``vertex_sources``: Indices of source vertices from the input mesh.
* ``face_sources``: Indices of source faces from the input mesh if the
output contains at least 1 face.
* ``voxel_sources``: Indices of source voxels from the input mesh if the
output contains at least 1 voxel.
"""
logger = logging.getLogger(__name__)
vertices = []
faces = []
voxels = []
vertex_count = 0
vertex_sources = []
face_sources = []
voxel_sources = []
for i,mesh in enumerate(input_meshes):
vertices.append(mesh.vertices)
vertex_sources.append(np.ones(mesh.num_vertices) * i)
if mesh.num_faces > 0:
faces.append(mesh.faces + vertex_count)
face_sources.append(np.ones(mesh.num_faces) * i)
if mesh.num_voxels > 0:
voxels.append(mesh.voxels + vertex_count)
voxel_sources.append(np.ones(mesh.num_voxels) * i)
vertex_count += mesh.num_vertices
if len(vertices) > 0:
vertices = np.vstack(vertices)
vertex_sources = np.concatenate(vertex_sources)
else:
vertices = np.zeros((0, 3), dtype=float)
vertex_sources = np.array([])
if len(faces) > 0:
faces = np.vstack(faces)
face_sources = np.concatenate(face_sources)
else:
faces = np.zeros((0, 3), dtype=int)
face_sources = np.array([])
if len(voxels) > 0 and len(voxels) == len(input_meshes):
voxels = | np.vstack(voxels) | numpy.vstack |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
======================================
Clustering by rotation of eigenvectors
======================================
cluster by rotating eigenvectors to align with the canonical coordinate system
usage:
nc = cluster_rotate(evecs, evals, group_num, method, verbose)
Input:
- evecs = array of eigenvectors
- evals = eigenvalues associated with eigenvectors
- group_num - an array of group numbers to test it is assumed to be a continuous set
- method - 1 gradient descent
2 approximate gradient descent
- verbose
Output:
- the highest number with best alignment quality
"""
# Code source by <NAME> (2018) adapted for Python from <NAME> (2005) (Matlab)
# License: ISC
import numpy as np
import os, sys
def evqual(X, dim, ndata, verbose):
#compute alignement quality
max_values = np.amax(abs(X), axis=1)
if verbose: print('Found max of each row')
#compute cost
if verbose: print('dim:', dim, 'ndata:', ndata)
J=0
for i in range(ndata):
R = X[i,:] / max_values[i]
J += np.sum(R**2)
J = 1 - (J/ ndata - 1) / dim
if np.isnan(J):
print('J is nan number')
sys.exit()
if verbose: print('computed quality:', J)
return J
def evqualitygrad(X, theta, ik, jk, angle_num, angle_index, dim, ndata, verbose):
#compute gradient quality
V = gradU(theta, angle_index, ik, jk, dim)
U1 = build_Uab(theta, 0, angle_index-1, ik, jk, dim)
U2 = build_Uab(theta, angle_index+1, angle_num-1, ik, jk, dim)
A = buildA(X, U1, V, U2)
#get rid of no longer needed arrays
del V, U1, U2
#rotate vecs according to current angles
Y = rotate_givens(X, theta, ik, jk, angle_num, dim)
#find max of each row
max_values = np.amax(abs(Y), axis=1)
max_index = np.argmax(abs(Y), axis=1)
if verbose: print('Found max of each row')
#compute cost # Yij = Zij et Aij = Aij with mi = max_index[i]
dJ = 0
for j in range(dim): # loop over all columns
for i in range(ndata): #loop over all rows
tmp1 = A[i,j]*Y[i,j] / (max_values[i] * max_values[i])
tmp2 = A[i, max_index[i]]*Y[i,j]*Y[i,j] / (max_values[i] * max_values[i] * max_values[i])
dJ += 2*(tmp1 - tmp2)
dJ = - dJ / (ndata *dim) # on normalize
if verbose: print('Computed gradient:', dJ)
del Y, A, max_values, max_index
return dJ
def cluster_assign(X, ik, jk, dim, ndata):
#take the square of all entries and find max of each row
max_values = np.zeros(shape=[ndata], dtype=float)
max_index = np.zeros(shape=[ndata], dtype=int)
cluster_count = np.zeros(shape=[dim], dtype=int)
for j in range(dim): #loop over columns
for i in range(ndata): #loop over rows
if j == 0:
max_index[i] = -1
if max_values[i] <= X[i,j]*X[i,j]:
if max_index[i] >= 0:
cluster_count[max_index[i]] -= 1
cluster_count[j] += 1
max_values[i] = X[i,j] * X[i,j]
max_index[i] = j
#allocate memory for cluster assignements
cluster_cell_array = np.empty(shape=[dim], dtype=object)
for j in range(dim): #loop over all columns
cluster = np.empty(shape=[cluster_count[j]], dtype=float)
cind = 0
for i in range(ndata): # loop over all rows
if max_index[i] == j:
cluster[cind] = i+1
cind += 1
cluster_cell_array[j] = cluster
del max_values, max_index, cluster_count
return cluster_cell_array
def gradU(theta, k, ik, jk, dim):
#compute V as the Gradient of a single Givens rotation
V = np.zeros(shape=[dim, dim], dtype=float)
V[ik[k], ik[k]] = -np.sin(theta[k])
V[ik[k], jk[k]] = -np.cos(theta[k])
V[jk[k], ik[k]] = np.cos(theta[k])
V[jk[k], jk[k]] = -np.sin(theta[k])
return V
# Givens rotation for angles a to b
def build_Uab(theta, a, b, ik, jk, dim):
if not (type(a) is int) & (type(b) is int):
print('Angles are not integers')
sys.exit()
#set Uab to be an identity matrix
Uab = np.identity(dim, dtype=float)
if b < a:
return Uab
else:
for k in range(a,b+1):
#tt = theta[k]
c = np.cos(theta[k])
s = np.sin(theta[k])
for i in range(dim):
u_jk = Uab[ik[k],i] * s + Uab[jk[k],i] * c
Uab[jk[k],i] = u_jk
Uab[ik[k],i] = Uab[ik[k],i] * c - Uab[jk[k],i] * s
return Uab
def buildA(X, U1, Vk, U2): # A(k) = X U(1,k-1) V(k) U(k+1,K) indexes correspond to angles.
A1 = np.dot(Vk, U2)
A2 = np.dot(U1, A1)
A = np.dot(X, A2)
del A1, A2
return A
def rotate_givens(X, theta, ik, jk, angle_num, dim):
#Rotate vectors in X with Givens rotation according to angles in theta
G = build_Uab(theta, 0, angle_num -1, ik, jk, dim)
#print(G)
Y = np.dot(X,G)
del G
return Y
def evrot(evecs, method, verbose):
#get the number and length of eigenvectors dimensions
ndata, dim = evecs.shape
if verbose: print('Got {0} vectors of length {1}'.format(dim, ndata))
#get the number of angles
angle_num = int(dim* (dim -1) /2) #K
angle_step = np.pi/angle_num
if verbose: print('Angle number is:', angle_num)
#print(angle_step)
#build index mapping
ik = np.empty(shape=[angle_num], dtype=int)
jk = np.empty(shape=[angle_num], dtype=int)
#print('shapes:', theta.shape, ik.shape)
k=0
for i in range(dim):
for j in range(i+1,dim):
ik[k] = i
jk[k] = j
k += 1
theta = np.random.uniform(-np.pi/2, np.pi/2-0.001, size=angle_num)
if verbose: print('Built index mapping for {0} angles'.format(k))
#definitions
max_it = 20
#evaluate intial quality
Q = evqual(evecs,ik,jk,dim,ndata, verbose)
Q_old1 = Q
Q_old2 = Q
it = 0
while it < max_it: #iterate to refine quality
it += 1
for d in range(angle_num):
td = theta[d]
if verbose: print('----------------------d=', d, it)
if method == 1: # descend through true derivative
alpha = 1
dQ = evqualitygrad(evecs, theta, ik, jk, angle_num, d, dim, ndata, verbose)
theta_new = np.array([td - alpha * dQ if k == d else t for k,t in enumerate(theta) ])
if theta_new[d]-theta[d] == 0:
print('(it, d)', it, d, theta_new[d]-theta[d])
sys.exit()
evecsRot = rotate_givens(evecs, theta_new, ik, jk, angle_num, dim)
Q_new = evqual(evecsRot, ik, jk, dim, ndata, verbose)
#print(Q_new)
if Q_new > Q:
#we need to maximize quality (minimize cost function). Then if running k improves quality we keep the changes else, we do not change anything.
theta = np.array([td - alpha * dQ if k == d else t for k,t in enumerate(theta) ])
Q = Q_new
else:
theta_new = np.array([td if k ==d else t for k,t in enumerate(theta_new)])
del evecsRot
elif method == 2:
alpha = 0.1
#move up
theta_new = np.array([(td + alpha) if k == d else t for k,t in enumerate(theta) ])
evecsRot = rotate_givens(evecs, theta_new, ik, jk, angle_num, dim)
Q_up = evqual(evecsRot, ik, jk, dim, ndata, verbose)
del evecsRot
#move down
theta_new = np.array([(td - alpha) if k == d else t for k,t in enumerate(theta) ])
evecsRot = rotate_givens(evecs, theta_new, ik, jk, angle_num, dim)
Q_down = evqual(evecsRot, ik, jk, dim, ndata, verbose)
#update only if at least one of them is better
if (Q_up > Q) | (Q_down > Q):
if Q_up > Q_down:
theta = np.array([(td + alpha) if k == d else t for k,t in enumerate(theta) ])
Q = Q_up
else:
theta = np.array([(td - alpha) if k == d else t for k,t in enumerate(theta) ])
Q = Q_down
theta_new = np.array([td if k == d else t for k,t in enumerate(theta_new) ])
else:
theta_new = np.array([t for t in theta])
del evecsRot
#stopping criteria
if it > 2:
if Q - Q_old2 < 0.01: #if we loose too much quality, stop iterations and output C running value
break
Q_old2 = Q_old1
Q_old1 = Q
if verbose: print('Done after {0} iterations, quality Q={1}'.format(it,Q))
evecsRot = rotate_givens(evecs, theta_new, ik, jk, angle_num, dim)
clusts = cluster_assign(evecsRot, ik, jk, dim, ndata)
return Q, clusts, evecsRot
def cluster_rotate(evecs, evals, group_num, method, verbose):
group_num = sorted(group_num)
#find
group_num2 = [k for k in group_num if k!= 1]
if verbose: print('group_num:', group_num)
ndata, dim = evecs.shape #dim will correspond to the number of clusters
mag = np.zeros(shape=[dim], dtype=float)
for i in range(dim):
mag[i] = np.linalg.norm(evecs[:,i])
ind_mag = | np.argsort(mag) | numpy.argsort |
import networkx as nx
import numpy as np
import pandas as pd
import itertools
from functools import reduce
import operator as op
import string
# Returns product of all the element in a list/tuple
def Prod(v):
return reduce(op.mul, v, 1)
# Transposes a list of lists
def TransposeLists(l):
return [list(x) for x in zip(*l)]
# Converts negative axis to positive
def AdjustAxis(tsr, axis):
if axis < 0:
axis = len(tsr) + axis
assert axis >=0 and axis < len(tsr)
return axis
# A tensor object is a list of tuples containing the dimensions of the tensor.
# The flag 'is_input' is set for input tensors.
class Tensor(tuple):
def SetAsInput(self):
self.is_input = True
# Function to create an input tensor
def InputTensor(x):
t = Tensor(x)
t.SetAsInput()
return t
# Normalizes the cost of communicating 'words' no. of words in terms of the
# computation cost.
# 'WordsToFlops.bw_to_flops' is the ratio of an architecture's peak flop to peak
# inter-network bandwidth. It specifies the amount of computation that can be
# performed in the same time a word of data is transferred.
# We multiple 'words' with this parameter to obtain flops count equivalent to
# communication 'words' amount of data.
def WordsToFlops(words, flops=None, bw=None):
try:
return WordsToFlops.bw_to_flops * words
except AttributeError:
assert (flops is not None) and (bw is not None)
peak_flop = flops * 1000.0 # GFLOPS
bw = bw / 8.0 # GWords/sec
WordsToFlops.bw_to_flops = float(peak_flop / bw)
return WordsToFlops.bw_to_flops * words
# Returns a list of all factors of a number 'n'
def factors(n):
assert(n > 0)
return set(reduce(list.__add__, ([i, n//i] for i in range(
1, int(n**0.5) + 1) if n % i == 0)))
# Converts 'v' into a tuple (v, v) if 'v' is a scalar
def MakePair(v):
if hasattr(v, "__len__"):
assert len(v) == 2
return v
else:
return (v, v)
# Generates a list of configurations for an operation.
# We can set a 'cutoff' to ignore configurations that end up splitting the 'dom'
# too small.
def GetConfigs(dom, n_procs, cutoff):
dim = len(dom)
proc_set = []
for d in dom:
s = factors(d)
l = [e for e in s if d/e >= cutoff]
if len(l) <= 0:
l = [1]
proc_set.append(l)
configs = [c for c in itertools.product(*proc_set) if Prod(c) <= n_procs]
return configs
# Cost of performing all-reduce communication of 'words' words among 'procs'
# processors
def GetAllReduceCost(words, procs):
# All-reduce cost = 2*((n*k)/P)*(P-1)
chunks = words / procs # The elements are split into 'procs' chunks
steps = 2.0 * (procs - 1)
return WordsToFlops(chunks * steps) # When procs = 1, the cost is 0
# 'dom_per_proc' is a list of configurations that specify different ways to
# parallelize a GEMM computation. This function calculates the computation cost
# (per processor) for each of these configurations.
def GetGemmCompCosts(dom_per_proc, pw_op_cnt):
m_idx, n_idx, k_idx = 0, 1, 2
# 1 GEMM in fwd phase, 2 GEMMs in bwd phase
costs = np.prod(dom_per_proc, axis=1) * 3.0
# For pointwise ops, we have 1 fwd differentiation, 1 bwd differentiation,
# and 1 hadamard product (from chain rule) per pw_op
if pw_op_cnt > 0:
costs += ((3 * pw_op_cnt) * np.prod(dom_per_proc[:, m_idx:n_idx+1],
axis=1))
return costs
# 'dom_per_proc' is a list of configurations that specify different ways to
# parallelize a GEMM computation. This function calculates the intra-layer
# communication cost for each of these configurations.
def GetGemmCommCosts(dom_per_proc, dom_configs):
m_idx, n_idx, k_idx = 0, 1, 2
# Cost for reducing the output during fwd phase. Reduction dim: k
words = np.prod(dom_per_proc[:, m_idx:n_idx+1], axis=1)
costs = GetAllReduceCost(words, dom_configs[:, k_idx])
# Cost for reducing the output during input gradient computation. Reduction
# dim: n
words = np.prod(dom_per_proc[:, (m_idx, k_idx)], axis=1)
costs += GetAllReduceCost(words, dom_configs[:, n_idx])
# Cost for reducing the output during weights gradient computation.
# Reduction dim: m
words = np.prod(dom_per_proc[:, n_idx:k_idx+1], axis=1)
costs += GetAllReduceCost(words, dom_configs[:, m_idx])
return costs
# 'dom_per_proc' is a list of configurations that specify different ways to
# parallelize a GEMM computation. This function calculates the intra-layer
# computation + communication cost for each of these configurations.
def ComputeGemmCosts(dom, dom_configs, pw_op_cnt):
assert len(dom) == dom_configs.shape[-1] >= 3
assert len(dom_configs.shape) == 2 and len(dom) == dom_configs.shape[-1]
dom_per_proc = dom / dom_configs
batches_per_proc = np.prod(dom_per_proc[:,:-3], axis=1)
gemm_per_proc = dom_per_proc[:,-3:]
return (batches_per_proc * (GetGemmCompCosts(gemm_per_proc, pw_op_cnt) +
GetGemmCommCosts(gemm_per_proc, dom_configs[:,-3:])))
# Ghost/Halo communication costs for convolution, pooling
def ComputeGhostCommCosts(tsr, configs, r, s):
assert len(tsr) == configs.shape[1] == 4
b_idx, c_idx, h_idx, w_idx = range(4)
tsr_per_proc = tsr / configs
tsr_per_proc_with_ghosts = np.copy(tsr_per_proc[:, h_idx:w_idx+1])
# Add ghost elements along h and w dims if the dimension is split among more
# than one proc
np.add(tsr_per_proc_with_ghosts[:, 0], r, where=(configs[:, h_idx] > 1),
out=tsr_per_proc_with_ghosts[:, 0])
np.add(tsr_per_proc_with_ghosts[:, 1], s, where=(configs[:, w_idx] > 1),
out=tsr_per_proc_with_ghosts[:, 1])
# Get the ghost element count
inner_elems = np.prod(tsr_per_proc[:, h_idx:w_idx+1], axis=1)
outer_elems = np.prod(tsr_per_proc_with_ghosts, axis=1)
ghost_elems = outer_elems - inner_elems
# Multiply it by other dimensions
ghost_elems *= tsr_per_proc[:, b_idx]
ghost_elems *= tsr_per_proc[:, c_idx]
return WordsToFlops(ghost_elems)
# 'arr1' and 'arr2' are 2D arrays. This function replicates the rows of arr1 and
# arr2, so that all combinations (cross-product) of rows in arr1 and arr2 are
# formed.
def RowCartesianProd(arr1, arr2):
shape1 = arr1.shape[0]
shape2 = arr2.shape[0]
tile_shape = [shape1] + ([1] * (arr2.ndim - 1))
arr1 = np.repeat(arr1, repeats=shape2, axis=0)
arr2 = np.tile(arr2, tile_shape)
return arr1, arr2
# Returns the volume of tensor data to be communicated b/w two neighboring
# layors 'src' and 'tgt' for different parallelization configurations. This
# volume is used later to calculate inter-layer communication costs.
def GetAreaNeeded(src_data_sizes, tgt_data_sizes, src_procs, tgt_procs,
ignore_area_intersection=False):
if len(src_procs.shape) > 1:
src_procs = np.prod(src_procs, axis=1)
if len(tgt_procs.shape) > 1:
tgt_procs = np.prod(tgt_procs, axis=1)
# Area needed by the target vertex
tgt_area = np.prod(tgt_data_sizes, axis=1)
if ignore_area_intersection:
return tgt_area
# Intersection of area computed by source, and needed by target.
# If no. of target procs is more than src procs, then at least one proc
# contains no source data. So set it to 0.
area_intersection = np.where(tgt_procs > src_procs, 0,
np.prod(np.minimum(tgt_data_sizes, src_data_sizes), axis=1))
# Area that needs to be communicated
return (tgt_area - area_intersection).clip(min=0)
# Returns edge costs (inter-layer communicatoin costs) for different configs.
# Edge cost is computed using the difference b/w tensor volume needed per proc
# by the target vertex and the tensor volume held per proc by the source vertex.
def GetEdgeCosts(tsr, src_cfgs, tgt_cfgs, cross_prod=True):
# Calculate the domains per processor
src_tsr_per_proc = tsr / src_cfgs
tgt_tsr_per_proc = tsr / tgt_cfgs
# Get the no. of procs used for each config
src_procs = np.prod(src_cfgs, axis=1)
tgt_procs = np.prod(tgt_cfgs, axis=1)
if cross_prod:
src_tsr_per_proc, tgt_tsr_per_proc = RowCartesianProd(src_tsr_per_proc,
tgt_tsr_per_proc)
src_procs, tgt_procs = RowCartesianProd(src_procs, tgt_procs)
# Cost of communicating input matrix from src to tgt during fwd phase, and
# from tgt to src during bwd phase
# Multiply the area by 2 for forward and backward phases
area_needed = GetAreaNeeded(src_tsr_per_proc, tgt_tsr_per_proc, src_procs,
tgt_procs) * 2.0
return WordsToFlops(area_needed)
# Returns the output tensor height and width after applying convolution/pooling
# with 'stride' and 'pad' padding.
def GetConvolutedSize(h, w, r, s, stride, pad):
stride_r, stride_s = MakePair(stride)
pad_r, pad_s = MakePair(pad)
h_o = int((h - r + 2*pad_r) / stride_r) + 1
w_o = int((w - s + 2*pad_s) / stride_s) + 1
return h_o, w_o
# Parent operator class
class Ops():
# Static variables
G = None # Default graph
default_procs = 0 # Can be set once and reused for the entire graph.
tsr_to_node_id = {}
cutoff = 4 # Cutoff for getconfigs()
def SetDefaultArch(flops, bw):
WordsToFlops(1, flops, bw)
def SetCutoff(cutoff):
Ops.cutoff = cutoff
def AddVertex(self):
try:
node_id = Ops.G.number_of_nodes()
except AttributeError:
assert Ops.G is None
Ops.G = nx.DiGraph()
node_id = 0
print("Node: " + str(node_id) + "; Type: " +
str(self.__class__.__name__) + "; Configs: " +
str(self.dom_configs.shape[0]))
costs = pd.Series(self.costs, index=self.dom_config_tuples, name='cost')
Ops.G.add_node(node_id, op=self, costs=costs)
self.node_id = node_id
for i, t in enumerate(self.out_tsrs):
Ops.tsr_to_node_id[id(t)] = (node_id, i)
def AddEdge(self, src_tsr, tgt_tsr_idx):
try:
src, src_tsr_idx = Ops.tsr_to_node_id[id(src_tsr)]
tgt = self.node_id
except KeyError:
# Source is an input tensor
assert src_tsr.is_input == True
return
assert (src in Ops.G) and (tgt in Ops.G)
src_op = Ops.G.nodes[src]['op']
tgt_op = self
src_cfgs = src_op.GetOutTensorConfigs(src_tsr_idx)
tgt_cfgs = tgt_op.GetInTensorConfigs(tgt_tsr_idx)
costs = GetEdgeCosts(src_tsr, src_cfgs, tgt_cfgs)
idx = pd.MultiIndex.from_product([src_op.dom_config_tuples,
tgt_op.dom_config_tuples], names=[str(src), str(tgt)])
costs = pd.Series(costs, index=idx, name='cost')
Ops.G.add_edge(src, tgt, costs=costs)
def __init__(self, dom, in_tsrs, out_tsrs, n_procs):
has_right_type = lambda t: isinstance(t, Tensor) or all(isinstance(e,
Tensor) for e in t)
n_procs = n_procs or Ops.default_procs
assert n_procs > 0
assert has_right_type(in_tsrs)
assert has_right_type(out_tsrs)
self.dom = tuple(dom)
self.in_tsrs = in_tsrs
self.out_tsrs = out_tsrs
self.n_procs = n_procs
regularize_tsrs = lambda x: (x,) if isinstance(x, Tensor) else x
regularize_configs = lambda x: (x,) if isinstance(x, np.ndarray) else x
self.in_tsrs = regularize_tsrs(self.in_tsrs)
self.out_tsrs = regularize_tsrs(self.out_tsrs)
self.out_tsrs = tuple(Tensor(t) for t in self.out_tsrs) # Make sure out_tsrs
# are fresh copies
self.in_tsrs_cnt = len(self.in_tsrs)
self.out_tsrs_cnt = len(self.out_tsrs)
self.ComputeCosts()
self.in_tsr_configs = regularize_configs(self.in_tsr_configs)
self.out_tsr_configs = regularize_configs(self.out_tsr_configs)
assert len(self.in_tsr_configs) == self.in_tsrs_cnt
assert len(self.out_tsr_configs) == self.out_tsrs_cnt
# Add a vertex to the graph for the current op
self.AddVertex()
# Add edges to the predecessors
for i, t in enumerate(self.in_tsrs):
self.AddEdge(t, i)
def ComputeCosts(self):
try:
self.dom_configs = np.array(self.dom_config_tuples)
except AttributeError:
self.dom_config_tuples = GetConfigs(self.dom, self.n_procs,
self.cutoff)
self.dom_configs = np.array(self.dom_config_tuples)
assert self.dom_configs.ndim == 2
self.in_tsr_configs = None
self.out_tsr_configs = None
self.costs = 0
def GetInTensors(self):
return self.in_tsrs
def GetOutTensors(self):
return self.out_tsrs
def GetInTensor(self, idx):
assert idx < self.in_tsrs_cnt
return self.in_tsrs[idx]
def GetOutTensor(self, idx):
assert idx < self.out_tsrs_cnt
return self.out_tsrs[idx]
def GetInTensorConfigs(self, idx):
assert idx < self.in_tsrs_cnt
return self.in_tsr_configs[idx]
def GetOutTensorConfigs(self, idx):
assert idx < self.out_tsrs_cnt
return self.out_tsr_configs[idx]
def __call__(self, idx=None):
return self.GetOutTensors() if idx is None else self.GetOutTensor(idx)
class Variable(Ops):
def __init__(self, tsr, n_procs=None):
super().__init__(tuple(tsr), tsr, tsr, n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs
self.out_tsr_configs = self.dom_configs
# Elementwise ops such as add, mul, etc.,
class Elementwise(Ops):
def __init__(self, tsr1, tsr2, n_procs=None, pw_op_cnt=0):
# Both the inputs should have same rank and shape
assert len(tsr1) == len(tsr2)
assert all(t1 == t2 for t1, t2 in zip(tsr1, tsr2))
self.pw_op_cnt = pw_op_cnt
out_tsr = Tensor(tsr1)
super().__init__(tsr1, (tsr1, tsr2), out_tsr, n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = (self.dom_configs, self.dom_configs)
self.out_tsr_configs = self.dom_configs
dom_per_proc = np.prod(self.dom / self.dom_configs, axis=1)
self.costs = (1 + self.pw_op_cnt) * dom_per_proc
# Get communication cost of converting a multi-dimensional 'tsr1' to a 1D 'tsr2'
def GetFlatteningCost(tsr1, tsr2, tsr1_configs, tsr2_configs):
assert len(tsr2) == 1
assert tsr2[0] == Prod(tsr1)
tsr1_per_proc = tsr1 / tsr1_configs
tsr2_per_proc = tsr2 / tsr2_configs
# Take a single processor slice of tsr2, and reshape it into tsr1's shape.
# This provides the single processor slice of tsr1 split using 'tsr2_config'
new_tsr1_per_proc = np.empty_like(tsr1_per_proc)
tsr2_per_proc_copy = np.copy(tsr2_per_proc[:,0])
for i, d in enumerate(tsr1[::-1], start=1):
np.minimum(tsr2_per_proc_copy, d, out=new_tsr1_per_proc[:,-i])
tsr2_per_proc_copy = (tsr2_per_proc_copy // d).clip(min=1)
# Get amount of words to be transferred
src_procs = np.prod(tsr1_configs, axis=1)
tgt_procs = np.prod(tsr2_configs, axis=1)
words = GetAreaNeeded(tsr1_per_proc, new_tsr1_per_proc, src_procs,
tgt_procs) * 2.0
return WordsToFlops(words)
def ConfigureReshape(op):
def is_contiguous(cfg):
it = zip(op.dom, cfg)
# Skip the most significant axes until we reach an axis that is
# partially split
for d, c in it:
if d != c:
break
# All the axes after partial split should be unsplit
for _, c in it:
if c != 1:
return False
return True
# Pick only the configs that split the domain contiguously
op.dom_config_tuples = list(filter(is_contiguous, GetConfigs(op.dom,
op.n_procs, op.cutoff)))
op.dom_configs = np.array(op.dom_config_tuples)
[in_tsr] = op.in_tsrs
[out_tsr] = op.out_tsrs
assert (len(in_tsr) == 1) or (len(out_tsr) == 1)
assert Prod(in_tsr) == Prod(out_tsr)
assert len(op.dom) == max(len(in_tsr), len(out_tsr))
if in_tsr == op.dom:
op.in_tsr_configs = op.dom_configs
op.out_tsr_configs = np.prod(op.dom_configs, axis=1, keepdims=True)
else:
assert out_tsr == op.dom
op.out_tsr_configs = op.dom_configs
op.in_tsr_configs = np.prod(op.dom_configs, axis=1, keepdims=True)
class Ravel(Ops):
def __init__(self, tsr, n_procs=None):
out_tsr = Tensor((Prod(tsr),))
super().__init__(tsr, tsr, out_tsr, n_procs)
def ComputeCosts(self):
ConfigureReshape(self)
self.costs = 0
class Unravel(Ops):
def __init__(self, tsr, shape, n_procs=None):
# Input should be a flattened array
assert len(tsr) == 1
super().__init__(shape, tsr, Tensor(shape), n_procs)
def ComputeCosts(self):
ConfigureReshape(self)
self.costs = 0
def Reshape(tsr, shape, n_procs=None):
ravel = Ravel(tsr, n_procs)
unravel = Unravel(ravel.GetOutTensor(0), shape, n_procs)
return unravel
class Transpose(Ops):
def __init__(self, in_tsr, perm, n_procs=None):
assert len(in_tsr) == len(perm)
self.perm = perm
out_tsr = Tensor(tuple(in_tsr[p] for p in perm))
super().__init__(in_tsr, in_tsr, out_tsr, n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs
self.out_tsr_configs = self.dom_configs[:, tuple(p for p in self.perm)]
self.costs = 0
class Stack(Ops):
def __init__(self, in_tsrs, axis=0, n_procs=None):
assert all(isinstance(t, Tensor) for t in in_tsrs)
assert all(in_tsrs[0] == t for t in in_tsrs[1:])
self.axis = AdjustAxis(in_tsrs[0], axis)
self.num = len(in_tsrs)
dom = list(in_tsrs[0])
dom.insert(self.axis, 1) # This prevents distributing the stacking axis
out_tsr = list(in_tsrs[0])
out_tsr.insert(self.axis, self.num)
super().__init__(dom, in_tsrs, Tensor(out_tsr), n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = (np.delete(self.dom_configs, self.axis, axis=1),)
self.in_tsr_configs *= self.num
self.out_tsr_configs = self.dom_configs
self.costs = 0
class Unstack(Ops):
def __init__(self, in_tsr, axis=0, n_procs=None):
axis = self.axis = AdjustAxis(in_tsr, axis)
self.num = in_tsr[axis]
dom = list(in_tsr)
dom[axis] = 1 # This prevents distributing the stacking axis
out_tsr = in_tsr[:axis] + in_tsr[axis+1:]
out_tsrs = tuple(Tensor(out_tsr) for _ in range(self.num))
super().__init__(dom, in_tsr, out_tsrs, n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.dom_configs[:, self.axis] = 1 # Don't distribute along stack axis
self.in_tsr_configs = self.dom_configs
self.out_tsr_configs = (np.delete(
self.dom_configs, self.axis, axis=1),) * self.num
self.costs = 0
# Fully connected layer
class FC(Ops):
def __init__(self, in_tsr, n_units, n_procs=None, pw_op_cnt=0):
assert len(in_tsr) >= 2
self.pw_op_cnt = pw_op_cnt
m_idx, n_idx, k_idx = range(3)
# Domain and input/output tensors
dom = in_tsr[:-1] + (n_units, in_tsr[-1])
out_tsr = Tensor(dom[:-1])
super().__init__(dom, in_tsr, out_tsr, n_procs)
def ComputeCosts(self):
m_idx, n_idx, k_idx = range(3)
# Configurations
super().ComputeCosts()
self.in_tsr_configs = np.delete(self.dom_configs, -2, axis=1)
self.out_tsr_configs = self.dom_configs[:, :-1]
# Compute the costs for configs
gemm_dom = (Prod(self.dom[:-2]),) + self.dom[-2:]
gemm_configs = np.concatenate((
np.prod(self.dom_configs[:,:-2], axis=1, keepdims=True),
self.dom_configs[:,-2:]), axis=1)
self.costs = ComputeGemmCosts(gemm_dom, gemm_configs, self.pw_op_cnt)
class Einsum(Ops):
def __init__(self, eq, tsr1, tsr2, n_procs=None, pw_op_cnt=0):
in_dims, out_dims = eq.split('->')
in1_dims, in2_dims = in_dims.split(',')
self.pw_op_cnt = pw_op_cnt
# Dimension sets
in1_dims_set, in2_dims_set, out_dims_set = (
set(d) for d in (in1_dims, in2_dims, out_dims))
in_dims_set = (in1_dims_set | in2_dims_set)
# Sanity checks
assert out_dims_set.issubset(in_dims_set)
assert in_dims_set.issubset(set(string.ascii_letters))
assert len(in1_dims) == len(in1_dims_set)
assert len(in2_dims) == len(in2_dims_set)
assert len(out_dims) == len(out_dims_set)
common_dims_set = in1_dims_set & in2_dims_set
batch_dims_set = common_dims_set & out_dims_set
reduction_dims_set = common_dims_set - out_dims_set
m_dims_set = in1_dims_set - (batch_dims_set | reduction_dims_set)
n_dims_set = in2_dims_set - (batch_dims_set | reduction_dims_set)
assert (m_dims_set.issubset(out_dims_set)) and (
n_dims_set.issubset(out_dims_set))
assert not (m_dims_set & in2_dims_set) and not (
n_dims_set & in1_dims_set)
# TODO: This can be relaxed by inserting dummy 'm' & 'n' dims of size 1
if len(m_dims_set) < 1 or len(n_dims_set) < 1:
raise NotImplementedError
# Dimension maps
dims_to_sizes_map = {}
for d, s in zip(in1_dims, tsr1):
dims_to_sizes_map[d] = s
for d, s in zip(in2_dims, tsr2):
if d in dims_to_sizes_map:
assert dims_to_sizes_map[d] == s
else:
dims_to_sizes_map[d] = s
# Keep the convention of (out_dims, reduction_dims) for dom
reduction_dims = ''.join(reduction_dims_set)
dom = tuple(dims_to_sizes_map[d] for d in out_dims) + tuple(
dims_to_sizes_map[d] for d in reduction_dims)
dom_dims = out_dims + reduction_dims
print(f'Einsum domain: {dom_dims}; '
f'in_tsr1: {in1_dims}; in_tsr2: {in2_dims}; '
f'out_tsr: {out_dims}.')
out_tsr = tuple(dims_to_sizes_map[d] for d in out_dims)
# Indices
dims_to_indices = lambda dims: [dom_dims.index(d) for d in dims]
self.batch_indices = dims_to_indices(batch_dims_set)
self.m_indices = dims_to_indices(m_dims_set)
self.n_indices = dims_to_indices(n_dims_set)
self.reduction_indices = dims_to_indices(reduction_dims)
self.in1_tsr_indices = dims_to_indices(in1_dims)
self.in2_tsr_indices = dims_to_indices(in2_dims)
self.out_tsr_indices = dims_to_indices(out_dims)
super().__init__(dom, (tsr1, tsr2), Tensor(out_tsr), n_procs)
def ComputeCosts(self):
super().ComputeCosts()
gemm_dom = [self.dom[d] for d in self.batch_indices] + [
Prod(self.dom[d] for d in self.m_indices),
Prod(self.dom[d] for d in self.n_indices),
Prod(self.dom[d] for d in self.reduction_indices)]
gemm_configs = (self.dom_configs[:,self.batch_indices],
np.prod(self.dom_configs[:,self.m_indices], axis=1, keepdims=True),
np.prod(self.dom_configs[:,self.n_indices], axis=1, keepdims=True),
np.prod(self.dom_configs[:,self.reduction_indices], axis=1,
keepdims=True))
gemm_configs = np.concatenate(gemm_configs, axis=1)
self.in_tsr_configs = (self.dom_configs[:,self.in1_tsr_indices],
self.dom_configs[:,self.in2_tsr_indices])
self.out_tsr_configs = self.dom_configs[:,self.out_tsr_indices]
self.costs = ComputeGemmCosts(gemm_dom, gemm_configs, self.pw_op_cnt)
# Batched matmul
def MatMul(tsr1, tsr2, n_procs=None, pw_op_cnt=0):
# Both tensors should be of same rank and >=2, inner most two dimensions
# correspond to valid GEMM, and outer dimensions should match.
assert len(tsr1) == len(tsr2) >= 2
assert tsr1[-1] == tsr2[-2]
assert all(t1 == t2 for t1, t2 in zip(tsr1[:-2], tsr2[:-2]))
dims = string.ascii_letters[:len(tsr1)+1]
batch_dims = dims[:-3]
m, n, k = dims[-3:]
eq = f'{batch_dims}{m}{k},{batch_dims}{k}{n}->{batch_dims}{m}{n}'
return Einsum(eq, tsr1, tsr2, n_procs, pw_op_cnt)
# Convolution
class Conv(Ops):
def __init__(self, img, fltr, stride=1, pad=0, n_procs=None, pw_op_cnt=0):
assert len(img) == 4
assert len(fltr) == 4
assert img[1] == fltr[1]
self.pw_op_cnt = pw_op_cnt
b, c, h, w = img
n, _, r, s = fltr
h_o, w_o = GetConvolutedSize(h, w, r, s, stride, pad)
# Domain
dom = (b, c, h_o, w_o, r, s, n)
out_tsr = Tensor((b, n, h_o, w_o))
super().__init__(dom, img, out_tsr, n_procs)
def ConvertToGemmDom(self):
b_idx, c_idx, h_idx, w_idx, r_idx, s_idx, n_idx = range(7)
b, c, h_o, w_o, r, s, n = self.dom
gemm_dom = (b * h_o * w_o, n, c * r * s)
gemm_m = np.prod(self.dom_configs[:, (b_idx, h_idx, w_idx)],
axis=1, keepdims=True)
gemm_n = self.dom_configs[:, n_idx:n_idx+1]
gemm_k = np.prod(self.dom_configs[:, (c_idx, r_idx, s_idx)],
axis=1, keepdims=True)
gemm_configs = np.concatenate((gemm_m, gemm_n, gemm_k), axis=1)
return gemm_dom, gemm_configs
def ComputeCosts(self):
b_idx, c_idx, h_idx, w_idx, r_idx, s_idx, n_idx = range(7)
# Configurations
no_halo_exchange = True
if no_halo_exchange:
config_dom = list(self.dom)
config_dom[h_idx] = 1
config_dom[w_idx] = 1
self.dom_config_tuples = GetConfigs(config_dom, self.n_procs,
self.cutoff)
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs[:, b_idx:w_idx+1]
self.out_tsr_configs = self.dom_configs[:, (b_idx, n_idx, h_idx, w_idx)]
# Represent convolution as GEMM computation, and compute the cost for
# GEMM op
gemm_dom, gemm_configs = self.ConvertToGemmDom()
self.costs = ComputeGemmCosts(gemm_dom, gemm_configs, self.pw_op_cnt)
# Add costs for ghost communications
if not no_halo_exchange:
self.costs += ComputeGhostCommCosts(self.GetInTensor(0),
self.in_tsr_configs, self.dom[r_idx], self.dom[s_idx])
# Pooling - Maxpool, Avgpool
class Pooling(Ops):
def __init__(self, img, fltr, stride=1, pad=0, n_procs=None):
assert len(img) == 4
assert len(fltr) == 2
b, c, h, w = img
self.r, self.s = fltr
h_o, w_o = GetConvolutedSize(h, w, self.r, self.s, stride, pad)
dom = (b, c, h_o, w_o)
super().__init__(dom, img, Tensor(dom), n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs
self.out_tsr_configs = self.dom_configs
dom_per_proc = self.dom / self.dom_configs
self.costs = np.prod(dom_per_proc, axis=1)
# Add costs for ghost communications
self.costs += ComputeGhostCommCosts(self.GetInTensor(0),
self.in_tsr_configs, self.r, self.s)
class Concat(Ops):
def __init__(self, in_tsrs, axis, n_procs=None):
tsr0 = in_tsrs[0]
rank = len(tsr0)
axis = AdjustAxis(tsr0, axis)
assert len(in_tsrs) >= 2
# All tensors should be of same rank, and concat axis should be valid
assert all(len(t) == rank for t in in_tsrs)
# All tensors should have same dimensions along non-concatenated axes
assert all(t[i] == tsr0[i] for t in in_tsrs for i in range(rank) if i !=
axis)
concatenated_size = reduce(op.add, (t[axis] for t in in_tsrs))
dom = list(tsr0)
dom[axis] = 1 # This prevents distribution along 'axis'
in_tsrs = tuple(t for t in in_tsrs)
out_tsr = list(tsr0)
out_tsr[axis] = concatenated_size
super().__init__(dom, in_tsrs, Tensor(out_tsr), n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = (self.dom_configs,) * len(self.in_tsrs)
self.out_tsr_configs = self.dom_configs
self.costs = 0
class Split(Ops):
def __init__(self, in_tsr, num_splits, axis, n_procs=None):
axis = AdjustAxis(in_tsr, axis)
assert in_tsr[axis] % num_splits == 0
self.num_splits = num_splits
out_tsr = list(in_tsr)
out_tsr[axis] = int(out_tsr[axis] / num_splits)
out_tsrs = tuple(Tensor(out_tsr) for _ in range(num_splits))
dom = list(in_tsr)
dom[axis] = 1 # This prevents distribution along 'axis'
super().__init__(dom, in_tsr, out_tsrs, n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs
self.out_tsr_configs = (self.dom_configs,) * self.num_splits
self.costs = 0
class Norm(Ops):
def __init__(self, in_tsr, axis=-1, n_procs=None):
assert len(in_tsr) > 1
self.axis = AdjustAxis(in_tsr, axis)
super().__init__(in_tsr, in_tsr, Tensor(in_tsr), n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs
self.out_tsr_configs = self.dom_configs
# Computation cost for fwd phase. Same cost for bwd phase too.
dom_per_proc = self.dom / self.dom_configs
elems = np.prod(dom_per_proc, axis=1)
self.costs = (2 * 8.0) * elems
# Communication cost for fwd phase: Reduction and broadcast of mean and
# variance. 2 reductions for fwd phase, and 2 for bwd phase.
self.costs += 4.0 * GetAllReduceCost(elems / dom_per_proc[:, self.axis],
self.dom_configs[:, self.axis])
# Communication cost for broadcast/reduction of Weight vectors - scale
# and bias - in fwd/bwd phases
procs = np.prod(np.delete(self.dom_configs, self.axis, axis=1), axis=1)
self.costs += 4.0 * GetAllReduceCost(dom_per_proc[:, self.axis], procs)
def BatchNorm(in_tsr, n_procs=None):
return Norm(in_tsr, 0, n_procs)
class ReduceMean(Ops):
def __init__(self, in_tsr, axis=None, keepdims=False, n_procs=None):
if axis is None:
axis = list(range(len(in_tsr)))
elif not hasattr(axis, "__len__"):
axis = list(axis)
assert len(axis) <= len(in_tsr) and all(a < len(in_tsr) for a in axis)
self.axis = axis
self.keepdims = keepdims
out_tsr = []
axis = set(axis)
for i, t in enumerate(in_tsr):
if i not in axis:
out_tsr.append(t)
elif keepdims:
out_tsr.append(1)
if not out_tsr:
out_tsr = (1,)
super().__init__(in_tsr, in_tsr, Tensor(out_tsr), n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs
cols = list(set(range(len(self.GetInTensorConfigs(0)))) -
set(self.axis))
if not cols:
if self.keepdims == True:
self.out_tsr_configs = np.ones(self.dom_configs.shape)
else:
self.out_tsr_configs = np.ones((self.dom_configs.shape[0], 1))
else:
if self.keepdims == True:
self.out_tsr_configs = np.ones(self.dom_configs.shape)
self.out_tsr_configs[:, cols] = self.dom_configs[:, cols]
else:
self.out_tsr_configs = self.dom_configs[:, cols]
if self.out_tsr_configs.ndim == 1:
self.out_tsr_configs = self.out_tsr_configs.reshape((-1,1))
dom_per_proc = self.dom / self.dom_configs
words = np.prod(dom_per_proc, axis=1)
procs = np.prod(self.dom_configs[:, self.axis], axis=1)
self.costs = GetAllReduceCost(words, procs)
class Softmax(Ops):
def __init__(self, in_tsr, axis=1, n_procs=None):
assert axis < len(in_tsr)
self.axis = axis
super().__init__(in_tsr, in_tsr, Tensor(in_tsr), n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs
self.out_tsr_configs = self.dom_configs
# Softmax computation costs - Taking exponent and summation in forward
# pass + cost of performing N multiplications per input in backward pass.
exp_cost = 3.0 # Cost of computing a single exponent
dom_per_proc = self.dom / self.dom_configs
self.costs += (exp_cost + 1) * np.prod(dom_per_proc, axis=1)
self.costs += np.prod(dom_per_proc, axis=1) * self.dom[self.axis]
# Softmax communication costs - Adding partial sums: 1 word per input
# per proc => batchsize / proc in forward pass.
# Cost of gathering the rows in backward pass + reduction.
elems = np.prod(np.delete(dom_per_proc, self.axis, 1), axis=1)
self.costs += GetAllReduceCost(elems, self.dom_configs[:, self.axis])
self.costs += GetAllReduceCost(elems * self.dom[self.axis],
self.dom_configs[:, self.axis])
self.costs += GetAllReduceCost(elems, self.dom_configs[:, self.axis])
class SoftmaxCrossEntropy(Ops):
# TODO: Currently softmax axis is -1 by default. Add an axis parameter to
# support other axes.
def __init__(self, in_tsr, n_procs=None):
super().__init__(in_tsr, in_tsr, Tensor(in_tsr), n_procs)
def ComputeCosts(self):
super().ComputeCosts()
self.in_tsr_configs = self.dom_configs
self.out_tsr_configs = self.dom_configs
dom_per_proc = self.dom / self.dom_configs
batch_size = | np.prod(dom_per_proc[:, :-1], axis=1) | numpy.prod |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 20:17:38 2021
@author: lucasmurtinho
"""
import numpy as np
from ExKMC.Tree import Node
import time
import random
from find_cut import get_distances
def get_best_cut_makarychev(data, data_count, valid_data, centers, valid_centers,
n, k,phi_data, phi_centers):
start = time.time()
dim = len(data[0])
phi_data = phi_data[valid_data]
valid_n = data.shape[0] #linhas
data_count = data_count[valid_data]
valid_k = centers.shape[0] #linhas
phi_centers = phi_centers[valid_centers]
##### <NAME>
#para cada dimensao temos uma ordenacao dos centros ainda nao separados
#dada essa ordenacao, temos que a uniao dos cortes que separam os centros nessa ordenacao eh [c1,cn[
#seja cij o j-esimo centro (ordenado) na dimensao i
#At vai ser da forma [[1,c11,c1m],[2,c21,c2m],....,[d,cd1,cdm]], onde m eh o numero de centros nao separados ainda
At = []
for i in range(dim):
#corte possivel se ele separa pelo menos 2 centros
#se tem algum centro a direita, ele separa
# o corte que nao tem centro a direita eh o last_center
At.append([i])
phi_centers_dim = phi_centers[:,i]
phi_centers_dim_sort = np.argsort(phi_centers_dim)
last_phi_center = phi_centers_dim[phi_centers_dim_sort[-1]]
# for j in range(valid_k):
# if(centers_dim[j] < last_center):
# At[-1].append([centers_dim[j]])
first_phi_center = phi_centers_dim[phi_centers_dim_sort[0]]
if(last_phi_center > first_phi_center):
At[-1].append(first_phi_center)
At[-1].append(last_phi_center)
total_length =0
for i in range(dim):
if(len(At[i])==3):
total_length += At[i][2] - At[i][1]
rand = random.uniform(0,total_length)
# print(total_length)
# print(rand)
# print(At)
auxiliar_length = rand
best_dim = -1
best_cut = -1
for i in range(dim):
if(len(At[i])==3):
auxiliar_length = auxiliar_length -(At[i][2] - At[i][1])
if(auxiliar_length<0):
auxiliar_length+=At[i][2] - At[i][1]
best_cut = At[i][1] + auxiliar_length
best_dim = At[i][0]
# print('dim',best_dim)
# print(best_cut)
break
if(best_dim ==-1):
#in which case the draw gives total_length.
#As the interval is open, I define that it will be the same as when the draw gives 0.
#This happens with probability 0
for i in range(dim):
if(len(At[i])==3):
best_dim = At[0]
best_cut = At[1]
# Dt = 0
# for i in range(valid_k):
# for j in range(i+1,valid_k):
# dist = np.linalg.norm((centers[i]-centers[j]),ord = 1)
# if(dist>Dt):
# Dt = dist
# Bt =[]
# print("Dt = ",Dt)
# print("k=",k)
# for i in range(dim):
# centers_dim = centers[:,i]
# order_dim_index = np.argsort(centers_dim)
# for j in range(valid_k):
# count = 0 #quantidade de centros a uma distancia menor que Dw/k*3
# idx_1 = ordem_dim_index[j]
# w = j+1
# idx2 = ordem_dim_index[w]
# while(np.linalg.norm((centers[idx1]-centers[idx2]),ord = 1)<= Dt/(k**3))
# while(np.linalg.norm((centers[idx1]-centers[idx2]),ord = 1)<= Dt/(k**3))
# for w in range(j+1,valid_k):
# #percorrer os pontos depois dele na ordem crescente dessa dim
# if():
# count += 1
# if(count > 0):
# Bt.append([i,centers_dim[j]])
# Ct = []
# for i in range(len(At)):
# if At[i] not in Bt:
# Ct.append(At[i])
# print("At=",At)
# # print("Bt=",Bt)
# # print("Ct=",Ct)
# rand_index = random.randint(0,len(At)-1)
# best_dim = Ct[rand_index][0]
# best_cut = Ct[rand_index][1]
end = time.time()
return best_dim,best_cut
def best_cut_makarychev(data, data_count, valid_data, centers, valid_centers,phi_data, phi_centers,cuts_matrix):
"""
Finds the best cut across any dimension of data.
"""
dim = centers.shape[1]
best_cut = -np.inf
best_dim = -1
best_cost = np.inf
n = valid_data.sum()
k = valid_centers.sum()
terminal = False
ans = get_best_cut_makarychev(data, data_count, valid_data, centers, valid_centers,
n, k,phi_data, phi_centers)
best_dim, best_cut = ans
if best_cut == -np.inf:
terminal = True
return best_dim, best_cut, terminal
def build_tree_makarychev(data, data_count, centers, cur_height,
valid_centers, valid_data, phi_data, phi_centers,cuts_matrix ):
"""
Builds a tree that induces an explainable partition (from axis-aligned
cuts) of the data, based on the centers provided by an unrestricted
partition.
"""
node = Node()
k = valid_centers.sum()
n = valid_data.sum()
if k == 1:
node.value = | np.argmax(valid_centers) | numpy.argmax |
#
# works with polynomial (linear) fit
#
"""
functions:
goFromTo: calculates the phase shift matrix
"""
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__copyright = "ESRF, 2012"
import numpy, math
#from scipy import stats
import Shadow as sh
import Shadow.ShadowTools as st
def goFromTo(source,image,distance=1.0,lensF=None,wavelength=1e-10):
#distance = numpy.array(distance)
x1 = numpy.outer(source,numpy.ones(image.size))
x2 = numpy.outer(numpy.ones(source.size),image)
r = numpy.sqrt( numpy.power(x1-x2,2) + numpy.power(distance,2) ) - distance
# add lens at the image plane
if lensF != None:
x10 = numpy.outer(source*0,numpy.ones(image.size))
#print 'r: ',r
# exact value
rf = numpy.sqrt( numpy.power(x1-x2,2) + numpy.power(lensF,2) ) - lensF
# approx value
#rf = numpy.power(x10-x2,2)/(2*lensF)
r = r - rf
#print 'rf: ',rf
#print 'rnew: ',r
wavenumber = numpy.pi*2/wavelength
return numpy.exp(1.j * wavenumber * r)
def goFromToShadow(source,image,distance=1.0,lensF=None,wavelength=1e-10):
#distance = numpy.array(distance)
x1 = numpy.outer(source,numpy.ones(image.size))
x2 = numpy.outer(numpy.ones(source.size),image)
r0 = numpy.sqrt( numpy.power(x1-x2,2) + numpy.power(distance,2) ) #- distance
# add lens at the image plane
if lensF != None:
print('r0: ',r0)
useshadowlocal = 1
if useshadowlocal == 1:
#rf = -0.5*numpy.outer(numpy.ones(source.size),lensF)
# using fit alla mirone...
#rf = (-2.5144013e-07*x2 -0.0012614668/2*x2*x2)
#fit: [ -1.25898614e-03 -5.97183893e-08]
#print 'shapes image lensF: ',image.shape,lensF.shape
zz = numpy.polyfit(image, lensF, 1)
rf = zz[1]*x2 +zz[0]/2*x2*x2
#print 'fit: ',zx
#rf = -0.5*numpy.outer(numpy.ones(source.size),lensF)
else:
# applying phase change
focal = distance/2
# exact
#rf = -numpy.sqrt( numpy.power(x1-x2,2) + numpy.power(focal,2) ) - focal
# paraxial
rf = -numpy.power(x2,2)/(2*focal)
r = r0 + rf
print('rf: ',rf)
print('r: ',r)
else:
r = r0
wavenumber = numpy.pi*2/wavelength
return numpy.exp(1.j * wavenumber * r)
def main():
# inputs (working in m)
useshadow = 1
slitdistance = 30.9 # m
detdistance = 1.38 # m
detsize = 200e-6 # m
energy = 14.0 # keV
realisations = 1000
lensF = None # detdistance/2 # focal distance
shadowunits2m = 1e-2
wavelength = 12.398/(energy)*1e-10 # m
#wavelength = 500.0e-6 # mm
# open output file
f = open('twoslitsLeitenberger.spec', 'w')
header="#F twoslitsLeitenberger.spec \n"
f.write(header)
# read shadow files
#
flag=st.getshcol("star.01",10)
igood = numpy.where(flag >= 0)
igood = numpy.array(igood)
igood.shape = -1
print(flag.size)
print('igood: ',igood.size)
print('--------------')
# use shadow's number of points
#sourcepoints = 200
sourcepoints = igood.size
slitpoints = sourcepoints/2
detpoints = sourcepoints
if useshadow == 1:
#shadow
position1x = st.getshcol("begin.dat",3) * shadowunits2m
position1x = position1x[igood]
position1x.shape = -1
else:
#grid
sourcesize = 140e-6
position1x = numpy.linspace(-sourcesize/2,sourcesize/2,sourcepoints)
#position1x = st.getshcol("begin.dat",3) # * shadowunits2m
#position1x = position1x[igood]
#position1x.shape = -1
#sourcesize = 140e-6
#position1x = numpy.linspace(-sourcesize/2,sourcesize/2,sourcepoints)
print('>>> maxmin: ',position1x.min(), position1x.max())
if useshadow == 1:
#shadow
position2x = st.getshcol("screen.0101",3) * shadowunits2m
position2x = position2x[igood]
position2x.shape = -1
else:
#grid
slitsize = 2e-6
slitgap = 11.3e-6
tmp = numpy.linspace(-slitsize/2,slitsize/2,slitpoints)
position2x = numpy.concatenate((tmp-slitgap/2,tmp+slitgap/2))
#position3x = st.getshcol("star.02",3)
#position3x = position3x[igood]
#position3x.shape = -1
#direction3x = st.getshcol("star.02",6)
#direction3x = direction3x[igood]
#direction3x.shape = -1
#vz0101 = st.getshcol("screen.0101",6)
#vz0201 = st.getshcol("screen.0201",6)
# working with angles...
#tmp3 = -numpy.cos(numpy.arcsin(vz0201 -vz0101))
#tmp3 = (tmp3-tmp3.min()) * 1590.0
#tmp3 = tmp3[igood]
#tmp3.shape = -1
# working with differences
#tmp3 = (vz0201 -vz0101)
#tmp3 = tmp3[igood]
#tmp3.shape = -1
position3x = numpy.linspace(-detsize/2,detsize/2,igood.size)
print('igood: ',igood.size,position1x.size,position2x.size,position3x.size)
print('shape: ',igood.shape)
#for j in range(detpoints):
# print j,igood[j],position1x[j],position2x[j],position3x[j]
#direction3x = None
if useshadow == 0:
fields12 = goFromToShadow(position1x,position2x,slitdistance, lensF=None,wavelength=wavelength)
fields23 = goFromToShadow(position2x,position3x,detdistance, lensF=None,wavelength=wavelength)
else:
fields12 = goFromTo(position1x,position2x,slitdistance, lensF=None,wavelength=wavelength)
fields23 = goFromTo(position2x,position3x,detdistance, lensF=None,wavelength=wavelength)
# from 1 to 3, matrix multiplication
fields13 = numpy.dot(fields12,fields23)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#fields13 = fields23
print('shape 12: ',fields12.shape)
print('shape 23: ',fields23.shape)
print('shape 13: ',fields23.shape)
#sourcepoints = igood.size
fieldComplexAmplitude = numpy.dot(numpy.ones(sourcepoints),fields13)
fieldIntensity = numpy.power(numpy.abs(fieldComplexAmplitude),2)
fieldPhase = numpy.arctan2(numpy.real(fieldComplexAmplitude), numpy.imag(fieldComplexAmplitude))
print('fields: ',fields12.shape, fields23.shape)
# do the ensemble average
tmpSource = numpy.exp(1.j*2*numpy.pi* numpy.random.mtrand.rand(sourcepoints))
fieldSource=tmpSource
fieldIntensityEA = numpy.power(numpy.abs(fieldComplexAmplitude),2)
for i in range(realisations-1):
#tmpSource = numpy.exp(1.j*2* numpy.pi*numpy.random.mtrand.rand(sourcepoints))
#fieldComplexAmplitude = numpy.dot( tmpSource, fields13)
#fieldIntensityEA = fieldIntensityEA + numpy.power(numpy.abs(fieldComplexAmplitude),2)
tmpSource = numpy.exp(1.j*2* \
numpy.pi*numpy.random.mtrand.rand(sourcepoints))
fieldComplexAmplitude = numpy.dot( tmpSource, fields13)
fieldIntensityEA = fieldIntensityEA + \
numpy.power( | numpy.abs(fieldComplexAmplitude) | numpy.abs |
#!/usr/bin/env python
import argparse, sys
from argparse import RawTextHelpFormatter
import numpy as np
import scipy.optimize
import scipy.sparse as sp
from scipy.stats import multinomial
from sklearn.preprocessing import quantile_transform
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
import networkx as nx
import prmf
from prmf import prmf_args
import copy
import datetime
import math
import os, os.path
import pandas as pd
import random
import csv
import datetime
np.seterr(divide='raise')
EPSILON = | np.finfo(np.float32) | numpy.finfo |
# -*- coding: utf-8 -*-
"""
The below functions can be used to import delimited data files into Numpy or
Matlab database format.
"""
import argparse
import copy
import glob
import math
import os
import re
from enum import Enum
import numpy as np
import pkg_resources
# pylint: disable=no-member
import scipy.io
class _Colors:
"""
A collection of colors that can be used to highlight terminal outputs.
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class _TextSnippets(Enum):
"""
Text snippets to be used when merging delimited files.
"""
header = "This file was automatically generated using the merge_del\n" \
"function of the Python tribology package, version {}.\n" \
"\n" \
"See here for more information:\n" \
"https://pypi.org/project/tribology/\n"\
"\n"\
"The file contains data from the following source files " \
"(in order):\n"
seperator = "\n" \
"Beginning of file:\n" \
"{}\n"
def __make_dir(dirpath):
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
return dirpath
def __get_outpath(outdir):
if outdir:
outpath = __make_dir(outdir)
else:
outpath = os.getcwd()
return outpath
def __get_outfile(in_file, idx, out_ext):
fname = ''.join(in_file.split('.')[:-1]).split(os.sep)[-1]
return '{}-{}.{}'.format(fname, str(idx), out_ext)
def __num_char(char):
return bool(char.isdigit() or char == '-')
def split_del(file, deli='\t', ext='txt', cmin=3, hspan=1, outdir=None,
force=False):
"""
Split a delimited data file into several separate data files, if the file
contains more than one block of data. Blocks of data are typically
separated by at least one line of column headers. The first data column
of each data block has to be numeric.
This function is meant to be used on data files where different blocks of
data have different numbers of columns or different column headers. After
splitting the data file into individual data files, import methods like
:code:`import_del` can be used on the individual files. If all data should
be merged into a single database afterwards, the :code:`merge_npz` function
can be used.
Parameters
----------
file: str
Path to the data file.
deli: str, optional
Delimiter used to separate data columns in :code:`file`
ext: str, optional
File extension of output files. Default is :code:`txt`
cmin: int, optional
Minimum number of columns that a line of data needs to have in order to
be classified as data.
hspan: int, optional
Maximum number of non-data lines above each data block that should be
written to individual data files (usually equal to number of lines
spanned by the column headers).
outdir: str, optional
Path to output directory. Default is current working directory.
force: bool
If True, existing output files will be overwritten. Will raise an
exception if file exists and force is False.
Returns
-------
outfiles: list
Paths to output files.
"""
outpath = __get_outpath(outdir)
outfiles = []
idx = 0
f_out = None
write = False
to_write = []
with open(file) as infile:
for line in infile:
# if first character of line is not numeric
if not __num_char(line[0]):
write = False
to_write.append(line)
while len(to_write) > hspan:
del to_write[0]
else:
# if numeric line has at least 'cmin' columns
if len(line.split(deli)) >= cmin and not write:
write = True
idx += 1
f_out = os.sep.join([outpath,
__get_outfile(file, idx, ext)])
if f_out not in outfiles:
outfiles.append(f_out)
if os.path.isfile(f_out):
if force:
os.remove(f_out)
else:
raise OSError("output file exists. "
"use argument 'force' to overwrite.")
if write and f_out:
with open(f_out, "a") as out:
for element in to_write:
out.write(element)
to_write = []
out.write(line)
return outfiles
def __verify_merge(in_files, accum):
"""
Check if all npz files have same set of keys and contain all keys in accum.
Throw exception if not.
Parameters
----------
in_files: list
Paths to database files to merge. Files are merged in order.
accum: list
Database keys for which values should be accumulated. Values must be
numeric.
"""
ref_keys = []
for idx, file in enumerate(in_files):
keys = sorted(np.load(file).keys())
if idx == 0:
ref_keys = copy.deepcopy(keys)
if keys != ref_keys:
raise KeyError('keys in npz databases 0 and {} differ'.format(idx))
if accum and not all(key in keys for key in accum):
raise KeyError('key(s) defined in accum not in npz database {}'
.format(file))
def merge_npz(in_files, accum=None, safe=True):
"""
Merge npz databases by concatenating all databases in :code:`in_files`.
Databases are concatenated in the order given in :code:`in_files`.
Database keys for which values are to be accumulated can be given as a list
using the :code:`accum` argument. For examples, if all databases have the
key :code:`time`, then :code:`accum=['time']` will produce a continuous
time axis, adding the last time value of the first database to all time
values of the second database (and so on).
Parameters
----------
in_files: list
Paths to database files to merge. Files are merged in order.
accum: list
Database keys for which values should be accumulated. Values must be
numeric.
safe: bool
If True, checks will be performed to ensure that all databases share the
exact same set of keys and that all keys in :code:`accum` are in all
databases. An exception (type KeyError) will be raised if not.
Returns
-------
merged: dict
Merged data.
"""
if safe:
__verify_merge(in_files, accum)
merged = {}
for file in in_files:
in_dat = np.load(file)
for key in in_dat.keys():
if key in merged:
if accum and key in accum:
merged[key] = np.append(merged[key],
in_dat[key] + merged[key][-1])
else:
merged[key] = np.append(merged[key], in_dat[key])
else:
merged[key] = in_dat[key]
return merged
def __get_version(package):
"""
Get the version of a Python package.
Parameters
----------
package: str
The name of the package
Returns
-------
Version number as string.
"""
return pkg_resources.get_distribution(package).version
def __long_substr(strings):
"""
Returns longest common substring of list of strings. taken from:
# https://stackoverflow.com/questions/2892931/longest-common-substring-
from-more-than-two-strings-python
Parameters
----------
strings: list
A list of strings.
Returns
-------
substr: str
The longest common substring of all list elements. For a list with only
one element, the list element is returned; for an empty list, and empty
string is returned.
"""
substr = ''
if len(strings) > 1 and len(strings[0]) > 0:
for i in range(len(strings[0])):
for j in range(len(strings[0]) - i + 1):
if j > len(substr) and all(strings[0][i:i + j] in x for x in
strings):
substr = strings[0][i:i + j]
return substr
def merge_del(in_files, out_file=None):
"""
Merge several delimited data files into a single file. The merged
file contains all data from the data files, in the order given in the
:code:`in_files` argument.
No checks are performed to ensure that the data files
have a compatible format, for example the same number of data columns.
Parameters
----------
in_files: list
File paths to the files to be merged. Files will be merged in order.
out_file: str, optional
Path to output file, including file extension. If no path is provided,
a file name is generated based on the input file names.
Returns
-------
out_file_abs: str
Absolute path to the merged file.
"""
if len(in_files) == 0:
raise ValueError('need at least one file to merge')
in_files_abs = [os.path.abspath(file) for file in in_files]
if out_file:
out_file_abs = os.path.abspath(out_file)
else:
out_file = __long_substr(in_files_abs).split('.')[0]
out_file_abs = out_file + 'xxx-merged.txt'
max_len_path = max(len(file) for file in in_files_abs)
with open(out_file_abs, "w") as txt_file:
# write header
txt_file.write(str(_TextSnippets.header.value).format(
__get_version("tribology")))
for in_file in in_files_abs:
txt_file.write(in_file + "\n")
# write files
for in_file in in_files_abs:
txt_file.write('\n' + '#' * max_len_path)
txt_file.write(str(_TextSnippets.seperator.value).format(in_file))
txt_file.write('#' * max_len_path + '\n')
with open(in_file) as file:
for line in file:
txt_file.write(line)
return out_file_abs
def __print_status(message, status_color=_Colors.ENDC):
"""
Print a color-coded message to the terminal.
Parameters
----------
message: str
The message to print to the terminal.
status_color:
The color in which to print the message.
"""
print(status_color + message + _Colors.ENDC)
def __is_floatable(num):
"""
Check if 'num' can be converted to float. If yes, return :code:`True`, else
return :code:`False`.
"""
try:
float(num)
return True
except ValueError:
return False
def __to_float(num):
"""
Try to convert 'num' to float, return 'num' if it's not possible, else
return converted :code:`num`.
"""
try:
float(num)
return float(num)
except ValueError:
return num
def __assemble_data_table(num_data_tables, max_num_data_length):
"""
Assemble the complete data table from a list of data tables.
"""
num_data = np.zeros((
(len(num_data_tables) - 1) * max_num_data_length +
num_data_tables[-1].shape[0],
num_data_tables[-1].shape[1]), dtype=object)
for idx, data_table in enumerate(num_data_tables):
# do this for all but the last data table
if idx + 1 < len(num_data_tables):
num_data[idx * max_num_data_length:
(idx + 1) * max_num_data_length, :] = data_table
# do this for the last data table
else:
num_data[idx * max_num_data_length:, :] = data_table
return num_data
def __write_to_out_dict(num_data, column_headers, pcs=False):
"""
Extract the data columns from the num_data array and write them to a
dictionary.
Parameters
----------
num_data: ndarray
The data extracted from the delimited file, stored in a single table.
column_headers: ndarray
The column headers corresponding to the columns in :code:`num_data`
Returns
-------
out_dict: dict
A dictionary containing all data that is to be saved to the output
database. Keys are based on column headers, values are data columns of
num_data.
"""
out_dict = {'column_headers': column_headers}
for idx, column in enumerate(column_headers):
# explicitly take care of the fact that PCS forgot a '\tab' character in
# their data export implementation
if column == 'image_file_name' and \
math.isnan(float(num_data[0, idx])) and not \
column_headers[column_headers.tolist().index(column) - 1] and \
pcs is True:
out_dict[column] = num_data[:, idx - 1].astype(object)[:, None]
# take care of all other columns
# if empty data columns are not padded with tabs
elif column:
if idx >= num_data.shape[1]:
out_dict[column] = np.zeros(num_data.shape[1]) * float('nan')
else:
# if data is of numeric type
if __is_floatable(num_data[0, idx]):
out_dict[column] = num_data[:, idx].astype(float)[:, None]
# if data is of other type (string)
else:
out_dict[column] = num_data[:, idx].astype(object)[:, None]
return out_dict
def __process_header(heads):
"""
Process the column headers by removing special characters and converting to
Matlab-optimized data type.
Parameters
----------
prev_line: list of strings
The column headers of the delimited file.
Returns
-------
col_heads: ndarray (dtype = object)
The re-formated column headers.
"""
merge = []
# merge colum headers if they span several lines
for i in range(len(heads[0])):
merge.extend([' '.join([heads[row][i] for row in range(len(heads))])])
# replace non-alphanumeric characters and trailing underscores
col_heads = [re.sub(r"\W+", '_', item.lower()).strip('_') for item in merge]
# convert data type for easy matlab export
col_heads = np.asarray(col_heads, dtype='object')
return col_heads
def __process_data(split_line, num_dat, max_len, num_data_tables):
"""
Append a data line to the current data table. If the length of the current
data table exceeds the maximum permitted data table length, save the current
data table to a list of data tables and initialise a new one.
Parameters
----------
split_line: ls
The data that is to be appended to the table.
num_dat: ndarray
The current data table to which the last line of data was appended.
max_len: positive int
The maximum length of a data table.
num_data_tables: ls
The complete list of data tables.
Returns
-------
num_dat: ndarray
The data table to which the current line of data was appended.
"""
# if data table becomes large, make new data table and add old
# table to table list (for speed)
if num_dat.shape[0] == max_len:
num_data_tables.append(num_dat)
num_dat = np.asarray(
[__to_float(item.rstrip('\n')) for item in
split_line]).reshape((1, len(split_line)))
# else simply append to data table
else:
num_dat = np.append(num_dat, np.asarray(
[__to_float(item.rstrip('\n')) for item in split_line])
.reshape((1, len(split_line))), axis=0)
return num_dat
def __process_file(in_file, dec_mark, deli, pad=0, colheadlines=1):
"""
Extract data from a delimited text file and return a dictionary containing
all data.
Parameters
----------
in_file: str
The file handle of the delimited file that is to be imported.
dec_mark: str
The decimal mark of the data file.
deli: str
The delimiter used to separate data columns in the delimited file.
pad: positive int
Ignore the first :code:`n` leading columns in the delimited file, where
:code:`n = pad`. For example, if pad = 8, the first 8 columns
are ignored.
Returns
-------
out_dict: dict
A dictionary containing all data that is to be saved to the output
database. Keys are based on column headers, values are data columns of
num_data.
"""
max_len = 1000
num_dat = []
col_heads = []
num_data_tables = []
prev_lines = []
with open(in_file) as dat_file:
for line in dat_file:
split_line = line.replace(dec_mark, '.').split(deli)
if len(split_line) > pad:
split_line = split_line[pad:]
# get rid of trailing newline characters
if split_line[-1] == '\n':
split_line[-1] = ''
# check if first character is not (digit or minus symbol (hyphen))
# to identify non-data lines. skip non-data lines.
if not (line[0].isdigit() or line[0] == '-') or \
len(split_line) <= 1:
if split_line != ['']:
prev_lines.append(split_line)
if len(prev_lines) > colheadlines:
del prev_lines[0]
continue
# if line contains data, split line into data fields, fill empty
# fields with 'nan'
split_line[:] = (item or 'nan' for item in split_line)
# if this is the first data-containing line...
if not len(col_heads):
# get the column headers
col_heads = __process_header(prev_lines)
# write the first line to the data table
num_dat = np.asarray(
[__to_float(item.rstrip('\n'))
for item in split_line]).reshape((1, len(split_line)))
else:
num_dat = __process_data(split_line, num_dat, max_len,
num_data_tables)
# assemble the complete data table and create output dictionary
num_data_tables.append(num_dat)
num_dat = __assemble_data_table(num_data_tables, max_len)
return num_dat, col_heads
def __get_file_handles(in_dir, ext, recursive=False):
"""
Get file handles for all delimited files that are to be imported.
Parameters
----------
in_dir: str
The directory in which the delimited files are stored.
ext: str
The file extension of the delimited files.
recursive: bool, optional
If :code:`True`, delimited files are imported for all child directories
of :code:`directory` (including :code:`directory`). If :code:`False`,
only files in :code:`directory` are imported. Default is :code:`False`.
Returns
-------
in_files: ls of strings
The file handles to all delimited files that are to be imported.
"""
if not recursive:
in_files = sorted(glob.glob('{}{}*.{}'.format(in_dir, os.sep, ext)))
else:
in_files = []
dir_list = [x[0] + os.sep for x in os.walk(in_dir)]
for directory in dir_list:
in_files.extend(sorted(glob.glob('{}*.{}'.format(directory, ext))))
# in_files = [f.replace(in_dir, '').lstrip(os.sep) for f in in_files]
return in_files
def __save_out_file(out_file, out_dict, out_ext):
"""
Save the imported data to an output database, either in Numpy or Matlab
format.
Parameters
----------
out_file: str
A handle to the output file that was generated during import.
out_dict: dict
The output data stored in a dictionary where keys correspond to column
headers, values correspond to data.
out_ext: str
The file extension (format) of the output file. Options are :code:`npz`
for Numpy format and :code:`mat` for Matlab database format.
Returns
-------
out_file: str
A handle to the output file that was generated after import.
"""
if out_ext == 'mat':
out_file = '{}.mat'.format(out_file)
scipy.io.savemat(out_file, out_dict)
elif out_ext == 'npz':
out_file = '{}.npz'.format(out_file)
np.savez(out_file, **out_dict)
return out_file
def __get_out_file(in_file, out_dir):
"""
Get the path of the output file.
Parameters
----------
in_file: str
Path to input file.
out_dir: str
Path to output directory.
Returns
-------
file_no_ext: str
The file name without extension.
out_dir: str
The path to the output directory.
out_file: str
The path of the output file.
"""
if out_dir == '':
out_dir = os.path.dirname(in_file)
file_no_ext = os.path.splitext(in_file)[0].split(os.sep)[-1]
if out_dir == '':
out_dir = '.'
out_file = '/'.join([out_dir, file_no_ext])
return file_no_ext, out_dir, out_file
def __import_file(in_file, out_file, out_ext, force=False, deli='\t',
dec_mark='.', pad=0, colheadlines=1):
import_status = None
num_dat = None
col_heads = None
out_file_exists = os.path.isfile('{}.{}'.format(out_file, out_ext))
if (not out_file_exists) or (force is True):
try:
num_dat, col_heads = __process_file(in_file, dec_mark, deli,
pad=pad,
colheadlines=colheadlines)
import_status = True
except (ValueError, AttributeError):
import_status = False
return num_dat, col_heads, import_status
def import_del(in_file, force=False, deli='\t', dec_mark='.', out_ext='npz',
out_dir='', pad=0, colheadlines=1):
"""
Import a delimited data file into Numpy or Matlab database format. The file
must have at least two data columns that are separated by :code:`deli`.
Parameters
----------
in_file: str
The file handle of the delimited file that is to be imported.
force: bool, optional
If :code:`True`, existing output files will be overwritten during
import. Default is :code:`False`.
deli: str, optional
The delimiter used to separate data columns in the delimited file.
Default is tab.
dec_mark: str, optional
The decimal mark of the data file. Default is dot.
out_ext: str, optional
The file extension (format) of the output file. Default is :code:`npz`
for Numpy database format. Alternative is :code:`mat` for Matlab
database format.
out_dir: str, optional
The absolute or relative path to the output directory. Default is the
current working directory.
pad: positive int
The numbers of data columns to skip. For :code:`pad = n`, the first
:code:`n` data columns will not be imported.
colheadlines: int, optional
The number of lines spanned by the column headers. If several lines are
spanned, the lines will be merged to generate the column keys in the
output dictionary.
Returns
-------
out_file: str
A handle to the output file that was generated during import.
import_status: str
The import status of :code:`in_file`. If :code:`True`, the file was
successfully imported. If :code:`False`, file import was attempted and
failed. If :code:`None`, file import was not attempted (most likely
because an output file with the same name already exists).
out_dict: dict
The data that was imported from :code:`in_file`.
"""
_, out_dir, out_file_no_ext = __get_out_file(in_file, out_dir)
out_dict = None
num_dat, col_heads, import_status = \
__import_file(in_file, out_file_no_ext, out_ext, force=force, deli=deli,
dec_mark=dec_mark, pad=pad, colheadlines=colheadlines)
if import_status is True:
out_dict = __write_to_out_dict(num_dat, col_heads)
out_file = __save_out_file(out_file_no_ext, out_dict, out_ext)
else:
out_file = None
return out_file, import_status, out_dict
def __gen_acc_time(step_time, steps, outformat='npz'):
"""
For files produced by PCS Instrument test rigs, generate a continuous time
axis by combining all step times from all steps.
"""
# get index of last data point of each step
current_step_end = np.where(np.subtract(step_time[1:], step_time[0:-1]) < 0)
step_end = | np.append(current_step_end[0], [step_time.shape[0] - 1]) | numpy.append |
import numpy as np
from scipy import ndimage as nd
from .pyudwt import Denoise2D1DHardMRS
b3spline = | np.array([1.,4.,6.,4.,1.]) | numpy.array |
# -*- coding: utf-8 -*-
#
#
# lssa.py
#
# purpose: Tutorial on lssa
# author: <NAME>
# e-mail: <EMAIL>
# web: http://ocefpaf.tiddlyspot.com/
# created: 16-Jul-2012
# modified: Fri 27 Jul 2012 05:32:29 PM BRT
#
# obs: Least-squares spectral analysis
# http://en.wikipedia.org/wiki/Least-squares_spectral_analysis
#
"""Lomb/Scargle Periodogram
It does not require even spaced data. Therefore, it is used for sediment and
ice core data."""
r"""Lomb (1976) and Scargle (1982) improve on the simple periodogram by a
slight alteration. What they showed is that if the cosine and sine
coefficients are normalized separately then the classic periodogram can be used
with unevenly spaced data, and yet the statistical behavior of the power is
identical to the behavior you would expect if you had evenly-spaced points.
To calculate the Lomb-Scargle periodogram of a data set $(t_k, y_k)$ we define,
for every frequency `f`, the time constant $\tau$ by:
$$ \tan(4\pi\tau) = \frac{\sum\sin (4\pi f t_k)}{\sum\cos(4\pi f t_k)} $$,
Then the Lomb-Scargle periodogram estimate of the spectral power $P(f)$ at
frequency $f$ is given by:
$$ P(f) = \frac{1}{2\sigma^2}\left\{\frac{\left[ \sum_k(y_k -
\bar{y})\cos 2\pi f(t_k-\tau) \right]^2}{\sum_k\cos^2 2\pi f(t_k-\tau)} +
\frac{\left[ \sum_k(y_k - \bar{y})\sin 2\pi f(t_k-\tau) \right]^2}
{\sum_k\sin^2 2\pi f(t_k-\tau)}\right\}$$,
This equation is less imposing than it looks. It has two terms, one for the
cosine transform, the other for the sine transform. Each term is normalized
separately. The only complication is that each frequency uses a different
offset $\tau$. Other than these changes, the equation looks just like ab
ordinary digital Fourier transform.
The Lomb-Scargle method has several advantages over the classic periodogram.
One, obviously, is that paleoclimate data are not evenly spaced. Although this
can be handle by interpolation, the statistical effects of such interpolation
can be complicated. Secondly, there is a limit to the ordinary periodogram
that comes about from a process called aliasing. What this means is that two
signals of different frequencies can have identical sets of values if the
samples are taken at exactly even spacing. With unevenly-spaced data, this
effect can be substantially reduced. The net result is that the Lomb-Scargle
periodogram can measure frequencies that would be aliased in evenly-spaced
data.
"""
import numpy as np
import matplotlib.pyplot as plt
def lomb(t, y, freq):
r"""Calculates Lomb periodogram."""
# Sets constants.
nfreq = len(freq)
fmax, fmin = freq[-1], freq[0]
power = np.zeros(nfreq)
f4pi = freq * 4 * np.pi
pi2 = np.pi * 2.
n = len(y)
cosarg = np.zeros(n)
sinarg = np.zeros(n)
argu = np.zeros(n)
var = np.cov(y) # Variance.
yn = y - y.mean()
# Do one Lomb loop.
for fi in range(nfreq):
sinsum = np.sum(np.sin(f4pi[fi]) * t)
cossum = np.sum(np.cos(f4pi[fi]) * t)
tau = np.arctan2(sinsum, cossum)
argu = pi2 * freq[fi] * (t - tau)
cosarg = np.cos(argu)
cfi = np.sum(yn * cosarg)
cosnorm = np.sum(cosarg ** 2)
sinarg = np.sin(argu)
sfi = np.sum(yn * sinarg)
sinnorm = np.sum(sinarg ** 2)
power[fi] = (cfi ** 2 / cosnorm + sfi ** 2 / sinnorm) / 2 * var
return power
# Tutorial
rand = np.random.rand
age = np.arange(0, 601)
ager = age + 0.3 * rand(age.size) - 0.15
ager[0] = age[0]
ager[600] = age[600]
depth = age / 10. # Creates depth between 0 and 60.
bkg = np.interp(ager, np.arange(0, 601, 10), rand(61))
# Fake Frequencies at 95 and 127 kyr.
f1, f2 = 1. / 95, 1. / 125
sig = | np.cos(2 * np.pi * f1 * ager) | numpy.cos |
import numpy as np
# we build some distributions and load them into a dict
mu, sigma = 0, 0.5
normal = np.random.normal(mu, sigma, 1000)
lognormal = | np.random.lognormal(mu, sigma, 1000) | numpy.random.lognormal |
import random
import numpy as np
import torch
import torch.utils.data
from io import BytesIO
from google.cloud import storage
client = storage.Client()
bucket = client.bucket('your-bucket-name')
class VocalRemoverCloudDataset(torch.utils.data.Dataset):
def __init__(self, dataset, vocal_dataset, num_training_items=None, force_voxaug=True, is_validation=False, mixup_alpha=1, mixup_rate=0.5):
self.num_training_items = num_training_items
self.force_voxaug = force_voxaug
self.is_validation = is_validation
self.mixup_alpha = mixup_alpha
self.mixup_rate = mixup_rate
blobs = list(client.list_blobs(bucket, prefix=dataset))
patch_list = []
for blob in blobs:
patch_list.append(blob.name)
vocal_blobs = list(client.list_blobs(bucket, prefix=vocal_dataset))
vocal_list = []
for blob in vocal_blobs:
vocal_list.append(blob.name)
self.full_list = patch_list
self.patch_list = patch_list
self.vocal_list = vocal_list
self.reset()
def reset(self):
if self.num_training_items is not None:
random.shuffle(self.full_list)
self.patch_list = self.full_list[:self.num_training_items]
def __len__(self):
return len(self.patch_list)
def __getitem__(self, idx):
path = self.patch_list[idx]
blob = bucket.get_blob(path)
blob_data = blob.download_as_bytes()
resource = BytesIO(blob_data)
data = np.load(resource)
aug = 'Y' not in data.files
X, Xc = data['X'], data['c']
Y = X if aug else data['Y']
if not self.is_validation:
if self.slide:
start = np.random.randint(0, X.shape[2] - self.cropsize)
stop = start + self.cropsize
X = X[:,:,start:stop]
Y = Y[:,:,start:stop]
if aug and np.random.uniform() > 0.02:
V, Vc = self._get_vocals()
X = Y + V
c = np.max([Xc, Vc, | np.abs(X) | numpy.abs |
#!/usr/bin/env python
#
# __init__.py -
#
# Author: <NAME> <<EMAIL>>
#
import os
import os.path as op
import gc
import re
import sys
import time
import shlex
import shutil
import logging
import tempfile
import traceback
import contextlib
import wx
import numpy as np
from io import StringIO
from unittest import mock
import matplotlib.image as mplimg
import fsleyes_props as props
from fsl.utils.tempdir import tempdir
import fsl.utils.idle as idle
import fsl.utils.image.resample as resample
import fsl.transform.affine as affine
import fsl.data.image as fslimage
import fsleyes
import fsleyes.frame as fslframe
import fsleyes.main as fslmain
import fsleyes.render as fslrender
import fsleyes.actions.frameactions as frameactions # noqa
import fsleyes.gl as fslgl
import fsleyes.gl.textures as textures
import fsleyes.colourmaps as colourmaps
import fsleyes.displaycontext as dc
import fsleyes.overlay as fsloverlay
from .compare_images import compare_images
def haveGL21():
try:
return float(fslgl.GL_COMPATIBILITY) >= 2.1
except:
return False
def haveFSL():
path = op.expandvars('$FSLDIR/data/standard/MNI152_T1_2mm.nii.gz')
return op.exists(path)
def touch(fname):
with open(fname, 'wt') as f:
pass
def waitUntilIdle():
called = [False]
def flag():
called[0] = True
idle.idle(flag)
while not called[0]:
realYield(50)
@contextlib.contextmanager
def mockFSLDIR(**kwargs):
from fsl.utils.platform import platform as fslplatform
oldfsldir = fslplatform.fsldir
oldfsldevdir = fslplatform.fsldevdir
try:
with tempdir() as td:
fsldir = op.join(td, 'fsl')
bindir = op.join(fsldir, 'bin')
os.makedirs(bindir)
for subdir, files in kwargs.items():
subdir = op.join(fsldir, subdir)
if not op.isdir(subdir):
os.makedirs(subdir)
for fname in files:
touch(op.join(subdir, fname))
fslplatform.fsldir = fsldir
fslplatform.fsldevdir = None
path = op.pathsep.join((bindir, os.environ['PATH']))
with mock.patch.dict(os.environ, {'PATH': path}):
yield fsldir
finally:
fslplatform.fsldir = oldfsldir
fslplatform.fsldevdir = oldfsldevdir
@contextlib.contextmanager
def exitMainLoopOnError(app):
oldhook = sys.excepthook
error = [None]
def myhook(type_, value, tb):
# some errors come from
# elsewhere (e.g. matplotlib),
# and are out of our control
ignore = True
while tb is not None:
frame = tb.tb_frame
mod = frame.f_globals['__name__']
if any([mod.startswith(m) for m in ('fsl', 'fsleyes')]):
ignore = False
break
tb = tb.tb_next
if not ignore:
app.ExitMainLoop()
error[0] = value
oldhook(type_, value, traceback)
try:
sys.excepthook = myhook
yield error
finally:
app = None
sys.excepthook = oldhook
# Under GTK, a single call to
# yield just doesn't cut it
def realYield(centis=10):
for i in range(int(centis)):
wx.YieldIfNeeded()
time.sleep(0.01)
def yieldUntil(condition):
while not condition():
realYield()
class CaptureStdout(object):
"""Context manager which captures stdout and stderr. """
def __init__(self):
self.reset()
def reset(self):
self.__mock_stdout = StringIO('')
self.__mock_stderr = StringIO('')
def __enter__(self):
self.__real_stdout = sys.stdout
self.__real_stderr = sys.stderr
sys.stdout = self.__mock_stdout
sys.stderr = self.__mock_stderr
def __exit__(self, *args, **kwargs):
sys.stdout = self.__real_stdout
sys.stderr = self.__real_stderr
if args[0] is not None:
print('Error')
print('stdout:')
print(self.stdout)
print('stderr:')
print(self.stderr)
return False
@property
def stdout(self):
self.__mock_stdout.seek(0)
return self.__mock_stdout.read()
@property
def stderr(self):
self.__mock_stderr.seek(0)
return self.__mock_stderr.read()
@contextlib.contextmanager
def tempdir():
"""Returnsa context manager which creates and returns a temporary
directory, and then deletes it on exit.
"""
testdir = tempfile.mkdtemp()
prevdir = os.getcwd()
try:
os.chdir(testdir)
yield testdir
finally:
os.chdir(prevdir)
shutil.rmtree(testdir)
def testdir(contents=None, suffix=""):
"""Returnsa context manager which creates, changes to, and returns a
temporary directory, and then deletes it on exit.
"""
if contents is not None:
contents = [op.join(*c.split('/')) for c in contents]
class ctx(object):
def __init__(self, contents):
self.contents = contents
def __enter__(self):
self.testdir = tempfile.mkdtemp(suffix=suffix)
self.prevdir = os.getcwd()
os.chdir(self.testdir)
if self.contents is not None:
contents = [op.join(self.testdir, c) for c in self.contents]
for path in contents:
os.makedirs(op.dirname(path), exist_ok=True)
with open(path, 'wt') as f:
f.write('{}\n'.format(path))
return self.testdir
def __exit__(self, *a, **kwa):
os.chdir(self.prevdir)
shutil.rmtree(self.testdir)
return ctx(contents)
def run_with_fsleyes(func, *args, **kwargs):
"""Create a ``FSLeyesFrame`` and run the given function. """
import fsleyes_widgets.utils.status as status
fsleyes.configLogging()
gc.collect()
idle.idleLoop.reset()
idle.idleLoop.allowErrors = True
propagateRaise = kwargs.pop('propagateRaise', True)
startingDelay = kwargs.pop('startingDelay', 500)
finishingDelay = kwargs.pop('finishingDelay', 250)
callAfterApp = kwargs.pop('callAfterApp', None)
class State(object):
pass
state = State()
state.result = None
state.raised = None
state.frame = None
state.app = None
state.dummy = None
state.panel = None
glver = os.environ.get('FSLEYES_TEST_GL', '2.1')
glver = [int(v) for v in glver.split('.')]
def init():
fsleyes.initialise()
props.initGUI()
colourmaps.init()
fslgl.bootstrap(glver)
wx.CallAfter(run)
def finish():
state.frame.Close(askUnsaved=False, askLayout=False)
state.dummy.Close()
waitUntilIdle()
realYield(100)
fslgl.shutdown()
state.app.ExitMainLoop()
def run():
overlayList = fsloverlay.OverlayList()
displayCtx = dc.DisplayContext(overlayList)
state.frame = fslframe.FSLeyesFrame(None,
overlayList,
displayCtx)
state.app.SetOverlayListAndDisplayContext(overlayList, displayCtx)
state.app.SetTopWindow(state.frame)
state.frame.Show()
while not state.frame.IsShownOnScreen():
realYield()
try:
if func is not None:
state.result = func(state.frame,
overlayList,
displayCtx,
*args,
**kwargs)
except Exception as e:
traceback.print_exc()
state.raised = e
finally:
wx.CallLater(finishingDelay, finish)
state.app = fslmain.FSLeyesApp()
state.dummy = wx.Frame(None)
state.panel = wx.Panel(state.dummy)
state.sizer = wx.BoxSizer(wx.HORIZONTAL)
state.sizer.Add(state.panel, flag=wx.EXPAND, proportion=1)
state.dummy.SetSizer(state.sizer)
if callAfterApp is not None:
callAfterApp()
state.dummy.SetSize((100, 100))
state.dummy.Layout()
state.dummy.Show()
if getattr(fslgl, '_glContext', None) is not None:
wx.CallLater(startingDelay, init)
else:
wx.CallLater(startingDelay,
fslgl.getGLContext,
ready=init,
raiseErrors=True)
with exitMainLoopOnError(state.app) as err:
state.app.MainLoop()
status.setTarget(None)
if status._clearThread is not None:
status._clearThread.die()
status._clearThread.clear(0.01)
status._clearThread.join()
status._clearThread = None
raised = state.raised
result = state.result
if err[0] is not None:
raise err[0]
time.sleep(1)
if raised and propagateRaise:
raise raised
state.app.Destroy()
state = None
return result
def run_render_test(
args,
outfile,
benchmark,
size=(640, 480),
scene='ortho',
threshold=50,
hook=None):
"""Runs fsleyes render with the given arguments, and compares the result
against the given benchmark.
"""
glver = os.environ.get('FSLEYES_TEST_GL', '2.1')
glver = [int(v) for v in glver.split('.')]
args = '-gl {} {}'.format(*glver) .split() + \
'-of {}' .format(outfile).split() + \
'-sz {} {}'.format(*size) .split() + \
'-s {}' .format(scene) .split() + \
list(args)
idle.idleLoop.reset()
idle.idleLoop.allowErrors = True
fslrender.main(args, hook)
# gaaargh, why is macos case insensitive??
if not op.exists(benchmark):
head, tail = op.split(benchmark)
benchmark = op.join(head, tail.lower())
testimg = mplimg.imread(outfile)
benchimg = mplimg.imread(benchmark)
result, diff = compare_images(testimg, benchimg, threshold)
assert result
def run_cli_tests(
prefix, tests, extras=None, scene='ortho', threshold=10, hook=None):
"""Calls run_render_test on every line in ``tests``. """
if extras is None:
extras = {}
glver = os.environ.get('FSLEYES_TEST_GL', '2.1')
glver = [int(v) for v in glver.split('.')]
if tuple(glver) < (2, 1):
exclude = ['tensor', ' sh', '_sh', 'spline', 'mip']
else:
exclude = []
tests = [t.strip() for t in tests.split('\n')]
tests = [t for t in tests if t != '' and t[0] != '#']
tests = [re.sub(r'\s+', ' ', t) for t in tests]
tests = [re.sub(r'#.*', '', t) for t in tests]
tests = [t.strip() for t in tests]
allpassed = True
datadir = op.join(op.dirname(__file__), 'testdata')
benchdir = op.join(op.dirname(__file__), 'testdata', 'cli_tests')
def fill_test(t):
templates = re.findall('{{(.*?)}}', t)
for temp in templates:
t = t.replace('{{' + temp + '}}', eval(temp, {}, extras))
return t
with tempdir() as td:
shutil.copytree(datadir, op.join(td, 'testdata'))
os.chdir('testdata')
for test in tests:
if any([exc in test for exc in exclude]):
print('CLI test skipped [{}] {}'.format(prefix, test))
continue
test = fill_test(test)
fname = test.replace(' ', '_').replace('/', '_')
fname = '{}_{}.png'.format(prefix, fname)
benchmark = op.join(benchdir, fname)
testfile = op.join(td, fname)
try:
run_render_test(list(test.split()), testfile, benchmark,
scene=scene, threshold=threshold, hook=hook)
print('CLI test passed [{}] {}'.format(prefix, test))
except Exception as e:
allpassed = False
print('CLI test failed [{}] {}: {}'.format(prefix, test, e))
traceback.print_exc()
if op.exists(testfile):
print('Copying {} to {}'.format(testfile, datadir))
shutil.copy(testfile, datadir)
assert allpassed
def run_with_viewpanel(func, vptype, *args, **kwargs):
def inner(frame, overlayList, displayCtx, *a, **kwa):
panel = frame.addViewPanel(vptype)
displayCtx = panel.displayCtx
try:
while not panel.IsShownOnScreen():
realYield()
result = func(panel, overlayList, displayCtx, *a, **kwa)
except Exception as e:
print(e)
traceback.print_exception(type(e), e, e.__traceback__)
raise
finally:
frame.removeViewPanel(panel)
return result
return run_with_fsleyes(inner, *args, **kwargs)
def run_with_orthopanel(func, *args, **kwargs):
"""Create a ``FSLeyesFrame`` with an ``OrthoPanel`` and run the given
function.
"""
from fsleyes.views.orthopanel import OrthoPanel
return run_with_viewpanel(func, OrthoPanel, *args, **kwargs)
def run_with_lightboxpanel(func, *args, **kwargs):
"""Create a ``FSLeyesFrame`` with a ``LightBoxPanel`` and run the given
function.
"""
from fsleyes.views.lightboxpanel import LightBoxPanel
return run_with_viewpanel(func, LightBoxPanel, *args, **kwargs)
def run_with_scene3dpanel(func, *args, **kwargs):
"""Create a ``FSLeyesFrame`` with a ``Scene3DPanel`` and run the given
function.
"""
from fsleyes.views.scene3dpanel import Scene3DPanel
return run_with_viewpanel(func, Scene3DPanel, *args, **kwargs)
def run_with_timeseriespanel(func, *args, **kwargs):
"""Create a ``FSLeyesFrame`` with a ``TimeSeriesPanel`` and run the given
function.
"""
from fsleyes.views.timeseriespanel import TimeSeriesPanel
return run_with_viewpanel(func, TimeSeriesPanel, *args, **kwargs)
def run_with_histogrampanel(func, *args, **kwargs):
"""Create a ``FSLeyesFrame`` with a ``HistogramPanel`` and run the given
function.
"""
from fsleyes.views.histogrampanel import HistogramPanel
return run_with_viewpanel(func, HistogramPanel, *args, **kwargs)
def run_with_powerspectrumpanel(func, *args, **kwargs):
"""Create a ``FSLeyesFrame`` with a ``PowerSpectrumPanel`` and run the
given function.
"""
from fsleyes.views.powerspectrumpanel import PowerSpectrumPanel
return run_with_viewpanel(func, PowerSpectrumPanel, *args, **kwargs)
@contextlib.contextmanager
def MockFileDialog(dirdlg=False):
class MockDlg(object):
def __init__(self, *args, **kwargs):
pass
def ShowModal(self):
return MockDlg.ShowModal_retval
def GetPath(self):
return MockDlg.GetPath_retval
def GetPaths(self):
return MockDlg.GetPaths_retval
def Close(self):
pass
def Destroy(self):
pass
ShowModal_retval = wx.ID_OK
GetPath_retval = ''
GetPaths_retval = []
if dirdlg: patched = 'wx.DirDialog'
else: patched = 'wx.FileDialog'
with mock.patch(patched, MockDlg):
yield MockDlg
# stype:
# 0 for single click
# 1 for double click
# 2 for separatemouse down/up events
def simclick(sim, target, btn=wx.MOUSE_BTN_LEFT, pos=None, stype=0):
GTK = any(['gtk' in p.lower() for p in wx.PlatformInfo])
class FakeEv(object):
def __init__(self, evo):
self.evo = evo
def GetEventObject(self):
return self.evo
parent = target.GetParent()
if GTK:
if type(target).__name__ == 'StaticTextTag' and \
type(parent).__name__ == 'TextTagPanel':
parent._TextTagPanel__onTagLeftDown(FakeEv(target))
realYield()
return
if type(target).__name__ == 'StaticText' and \
type(parent).__name__ == 'TogglePanel':
parent.Toggle(FakeEv(target))
realYield()
return
w, h = target.GetClientSize().Get()
x, y = target.GetScreenPosition()
if pos is None:
pos = [0.5, 0.5]
x += w * pos[0]
y += h * pos[1]
sim.MouseMove(round(x), round(y))
realYield()
if stype == 0: sim.MouseClick(btn)
elif stype == 1: sim.MouseDblClick(btn)
else:
sim.MouseDown(btn)
sim.MouseUp(btn)
realYield()
def simtext(sim, target, text, enter=True):
GTK = any(['gtk' in p.lower() for p in wx.PlatformInfo])
target.SetFocus()
parent = target.GetParent()
# The EVT_TEXT_ENTER event
# does not seem to occur
# under docker/GTK so we
# have to hack. EVT_TEXT
# does work though.
if GTK and type(parent).__name__ == 'FloatSpinCtrl':
if enter:
target.ChangeValue(text)
parent._FloatSpinCtrl__onText(None)
else:
target.SetValue(text)
elif GTK and type(parent).__name__ == 'AutoTextCtrl':
if enter:
target.ChangeValue(text)
parent._AutoTextCtrl__onEnter(None)
else:
target.SetValue(text)
else:
target.SetValue(text)
if enter:
sim.KeyDown(wx.WXK_RETURN)
realYield()
def fliporient(filename):
base = fslimage.removeExt(filename)
outfile = '{}_flipped'.format(base)
img = fslimage.Image(filename)
aff = img.voxToWorldMat
aff[0, 0] = -aff[0, 0]
aff[0, 3] = aff[0, 3] - (img.shape[0] - 1) * img.pixdim[0]
img.voxToWorldMat = aff
img[:] = img[::-1, ...]
img.save(outfile)
return outfile
def swapdim(filename, d0, d1, d2):
indices = { 'x' : 0, '-x' : 0,
'y' : 1, '-y' : 1,
'z' : 2, '-z' : 2}
base = fslimage.removeExt(filename)
outfile = '{}_swapdim_{}_{}_{}'.format(base, d0, d1, d2)
img = fslimage.Image(filename)
s0, s1, s2 = [-1 if d.startswith('-') else 1 for d in (d0, d1, d2)]
d0, d1, d2 = [indices[d] for d in (d0, d1, d2)]
if len(img.shape) == 3:
order = (d0, d1, d2)
else:
order = (d0, d1, d2) + tuple(range(3, len(img.shape)))
data = img.data
if s0 < 0: data = np.flip(data, d0)
if s1 < 0: data = np.flip(data, d1)
if s2 < 0: data = np.flip(data, d2)
data = data.transpose(order)
aff = np.zeros((4, 4))
aff[0, d0] = s0
aff[1, d1] = s1
aff[2, d2] = s2
aff[3, 3] = 1
aff = affine.concat(aff, img.voxToWorldMat)
img = fslimage.Image(data, xform=aff)
img.save(outfile)
return outfile
def roi(fname, roi):
base = fslimage.removeExt(op.basename(fname))
outfile = '{}_roi_{}_{}_{}_{}_{}_{}'.format(base, *roi)
img = fslimage.Image(fname)
xs, xe, ys, ye, zs, ze = roi
data = img[xs:xe, ys:ye, zs:ze, ...]
xform = img.voxToWorldMat
offset = [lo for lo in roi[::2]]
offset = affine.scaleOffsetXform([1, 1, 1], offset)
xform = affine.concat(xform, offset)
img = fslimage.Image(data, xform=xform, header=img.header)
img.save(outfile)
return outfile
def resampled(fname, fac):
base = fslimage.removeExt(op.basename(fname))
outfile = '{}_resampled_{}'.format(base, fac)
img = fslimage.Image(fname)
pix = np.array(img.pixdim[:3]) * fac
data, xform = resample.resampleToPixdims(img, pix)
fslimage.Image(data, header=img.header, xform=xform).save(outfile)
return outfile
def asrgb(infile):
basename = fslimage.removeExt(op.basename(infile))
outfile = '{}_asrgb.nii.gz'.format(basename)
img = fslimage.Image(infile)
data = img.data
shape = data.shape[:3]
rgbdtype = np.dtype([('R', 'uint8'), ('G', 'uint8'), ('B', 'uint8')])
newdata = np.zeros(shape, dtype=rgbdtype)
for c, ci in zip('RGB', range(3)):
cd = (0.5 * data[..., ci] + 0.5) * 255
newdata[c] = np.round(cd).astype(np.uint8)
fslimage.Image(newdata, xform=img.voxToWorldMat).save(outfile)
return outfile
def discretise(infile, stepsize, min=None, max=None):
basename = fslimage.removeExt(op.basename(infile))
img = fslimage.Image(infile)
data = img[:]
if min is None:
min = data.min()
if max is None:
max = data.max()
outfile = '{}_discretised_{}_{}_{}.nii.gz'.format(
basename, stepsize, min, max)
for i, li in enumerate(range(min, max, stepsize)):
data[(data >= li) & (data < (li + stepsize))] = i
img[:] = data
img.save(outfile)
return outfile
def translate(infile, x, y, z):
basename = fslimage.removeExt(op.basename(infile))
outfile = '{}_translated_{}_{}_{}.nii.gz'.format(basename, x, y, z)
img = fslimage.Image(infile)
xform = img.voxToWorldMat
shift = affine.scaleOffsetXform(1, (x, y, z))
xform = affine.concat(shift, xform)
img.voxToWorldMat = xform
img.save(outfile)
return outfile
def rotate(infile, rx, ry, rz):
basename = fslimage.removeExt(op.basename(infile))
outfile = '{}_rotated_{}_{}_{}.nii.gz'.format(basename, rx, ry, rz)
img = fslimage.Image(infile)
rx = rx * np.pi / 180
ry = ry * np.pi / 180
rz = rz * np.pi / 180
rot = affine.axisAnglesToRotMat(rx, ry, rz)
rot = affine.rotMatToAffine(rot)
img.voxToWorldMat = affine.concat(rot, img.voxToWorldMat)
img.save(outfile)
return outfile
def zero_centre(infile):
basename = fslimage.removeExt(op.basename(infile))
outfile = '{}_zero_centre.nii.gz'.format(basename)
img = fslimage.Image(infile)
data = img[:]
img[:] = data - data.mean()
img.save(outfile)
return outfile
def complex():
data = np.linspace(0, 1, 1000).reshape((10, 10, 10)) + \
1j * np.linspace(1, 0, 1000).reshape((10, 10, 10))
data = np.array(data, dtype=np.complex64)
img = fslimage.Image(data, xform=np.eye(4))
img.save('complex.nii.gz')
return 'complex.nii.gz'
def invert(infile):
if fslimage.looksLikeImage(infile):
basename = fslimage.removeExt(op.basename(infile))
img = fslimage.Image(infile)
data = img.data
dmin, dmax = data.min(), data.max()
data = dmin + (dmax - data)
outfile = '{}_inverted.nii.gz'.format(basename)
fslimage.Image(data, header=img.header).save(outfile)
# assume text file
else:
basename, ext = op.split(infile)
data = | np.loadtxt(infile) | numpy.loadtxt |
import torch
import torch.nn as nn
import numpy as np
from lib.config import cfg
import lib.utils.kitti_utils as kitti_utils
import lib.utils.roipool3d.roipool3d_utils as roipool3d_utils
import lib.utils.iou3d.iou3d_utils as iou3d_utils
class ProposalTargetLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_dict):
roi_boxes3d, gt_boxes3d = input_dict['roi_boxes3d'], input_dict['gt_boxes3d']
batch_rois, batch_gt_of_rois, batch_roi_iou = self.sample_rois_for_rcnn(roi_boxes3d, gt_boxes3d)
rpn_xyz, rpn_features = input_dict['rpn_xyz'], input_dict['rpn_features']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_dict['rpn_intensity'].unsqueeze(dim=2),
input_dict['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_dict['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_dict['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
# point cloud pooling
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
sampled_pts, sampled_features = pooled_features[:, :, :, 0:3], pooled_features[:, :, :, 3:]
# data augmentation
if cfg.AUG_DATA:
# data augmentation
sampled_pts, batch_rois, batch_gt_of_rois = \
self.data_augmentation(sampled_pts, batch_rois, batch_gt_of_rois)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_ry = batch_rois[:, :, 6] % (2 * np.pi)
roi_center = batch_rois[:, :, 0:3]
sampled_pts = sampled_pts - roi_center.unsqueeze(dim=2) # (B, M, 512, 3)
batch_gt_of_rois[:, :, 0:3] = batch_gt_of_rois[:, :, 0:3] - roi_center
batch_gt_of_rois[:, :, 6] = batch_gt_of_rois[:, :, 6] - roi_ry
for k in range(batch_size):
sampled_pts[k] = kitti_utils.rotate_pc_along_y_torch(sampled_pts[k], batch_rois[k, :, 6])
batch_gt_of_rois[k] = kitti_utils.rotate_pc_along_y_torch(batch_gt_of_rois[k].unsqueeze(dim=1),
roi_ry[k]).squeeze(dim=1)
# regression valid mask
valid_mask = (pooled_empty_flag == 0)
reg_valid_mask = ((batch_roi_iou > cfg.RCNN.REG_FG_THRESH) & valid_mask).long()
# classification label
batch_cls_label = (batch_roi_iou > cfg.RCNN.CLS_FG_THRESH).long()
invalid_mask = (batch_roi_iou > cfg.RCNN.CLS_BG_THRESH) & (batch_roi_iou < cfg.RCNN.CLS_FG_THRESH)
batch_cls_label[valid_mask == 0] = -1
batch_cls_label[invalid_mask > 0] = -1
output_dict = {'sampled_pts': sampled_pts.view(-1, cfg.RCNN.NUM_POINTS, 3),
'pts_feature': sampled_features.view(-1, cfg.RCNN.NUM_POINTS, sampled_features.shape[3]),
'cls_label': batch_cls_label.view(-1),
'reg_valid_mask': reg_valid_mask.view(-1),
'gt_of_rois': batch_gt_of_rois.view(-1, 7),
'gt_iou': batch_roi_iou.view(-1),
'roi_boxes3d': batch_rois.view(-1, 7)}
return output_dict
def sample_rois_for_rcnn(self, roi_boxes3d, gt_boxes3d):
"""
:param roi_boxes3d: (B, M, 7)
:param gt_boxes3d: (B, N, 8) [x, y, z, h, w, l, ry, cls]
:return
batch_rois: (B, N, 7)
batch_gt_of_rois: (B, N, 8)
batch_roi_iou: (B, N)
"""
batch_size = roi_boxes3d.size(0)
fg_rois_per_image = int(np.round(cfg.RCNN.FG_RATIO * cfg.RCNN.ROI_PER_IMAGE))
batch_rois = gt_boxes3d.new(batch_size, cfg.RCNN.ROI_PER_IMAGE, 7).zero_()
batch_gt_of_rois = gt_boxes3d.new(batch_size, cfg.RCNN.ROI_PER_IMAGE, 7).zero_()
batch_roi_iou = gt_boxes3d.new(batch_size, cfg.RCNN.ROI_PER_IMAGE).zero_()
for idx in range(batch_size):
cur_roi, cur_gt = roi_boxes3d[idx], gt_boxes3d[idx]
k = cur_gt.__len__() - 1
while cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
# include gt boxes in the candidate rois
iou3d = iou3d_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N)
max_overlaps, gt_assignment = torch.max(iou3d, dim=1)
# sample fg, easy_bg, hard_bg
fg_thresh = min(cfg.RCNN.REG_FG_THRESH, cfg.RCNN.CLS_FG_THRESH)
fg_inds = torch.nonzero((max_overlaps >= fg_thresh)).view(-1)
# TODO: this will mix the fg and bg when CLS_BG_THRESH_LO < iou < CLS_BG_THRESH
# fg_inds = torch.cat((fg_inds, roi_assignment), dim=0) # consider the roi which has max_iou with gt as fg
easy_bg_inds = torch.nonzero((max_overlaps < cfg.RCNN.CLS_BG_THRESH_LO)).view(-1)
hard_bg_inds = torch.nonzero((max_overlaps < cfg.RCNN.CLS_BG_THRESH) &
(max_overlaps >= cfg.RCNN.CLS_BG_THRESH_LO)).view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy( | np.random.permutation(fg_num_rois) | numpy.random.permutation |
import pytest
import numpy as np
from PythonLinearNonlinearControl.models.two_wheeled import TwoWheeledModel
from PythonLinearNonlinearControl.configs.two_wheeled \
import TwoWheeledConfigModule
class TestTwoWheeledModel():
"""
"""
def test_step(self):
config = TwoWheeledConfigModule()
two_wheeled_model = TwoWheeledModel(config)
curr_x = np.ones(config.STATE_SIZE)
curr_x[-1] = np.pi / 6.
u = np.ones((1, config.INPUT_SIZE))
next_x = two_wheeled_model.predict_traj(curr_x, u)
pos_x = np.cos(curr_x[-1]) * u[0, 0] * config.DT + curr_x[0]
pos_y = np.sin(curr_x[-1]) * u[0, 0] * config.DT + curr_x[1]
expected = np.array([[1., 1., np.pi / 6.],
[pos_x, pos_y, curr_x[-1] + u[0, 1] * config.DT]])
assert next_x == pytest.approx(expected)
def test_predict_traj(self):
config = TwoWheeledConfigModule()
two_wheeled_model = TwoWheeledModel(config)
curr_x = np.ones(config.STATE_SIZE)
curr_x[-1] = np.pi / 6.
u = np.ones((1, config.INPUT_SIZE))
pred_xs = two_wheeled_model.predict_traj(curr_x, u)
u = np.tile(u, (1, 1, 1))
pred_xs_alltogether = two_wheeled_model.predict_traj(curr_x, u)[0]
assert pred_xs_alltogether == pytest.approx(pred_xs)
def test_gradient_state(self):
config = TwoWheeledConfigModule()
two_wheeled_model = TwoWheeledModel(config)
xs = | np.ones((1, config.STATE_SIZE)) | numpy.ones |
# Part of the psychopy.iohub library.
# Copyright (C) 2012-2016 iSolver Software Solutions
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import division
"""
ioHub Eye Tracker Online Sample Event Parser
WORK IN PROGRESS - VERY EXPERIMENTAL
Copyright (C) 2012-2014 iSolver Software Solutions
Distributed under the terms of the GNU General Public License
(GPL version 3 or any later version).
.. moduleauthor:: <NAME> <<EMAIL>>
.. fileauthor:: <NAME> <<EMAIL>>
NOTES:
* The parser is designed to work with monocular and binocular eye data,
but only binocular input samples have been tested so far.
* If binocular input samples are being used, they are converted to monocular
samples for parsing. If both left and right eye position data is available
for a sample, the positions are averaged together. If only one of the two eyes
has valid data, then that eye data is used for the sample. So the only case
where a sample will be tagged as missing data is when both eyes do not have
valid eye position / pupil size data.
POSITION_FILTER and VELOCITY_FILTER can be set to one of the following event
field filter types. Example values for any input arguments are given. The filter
is selected by giving the filter class name followed by a dictionary of values
to use for the filter. Valid filter options depend on the filter selected.
eventfilters.MovingWindowFilter
--------------------------------
This is a standard averaging filter. Any samples within the window buffer are
simply averaged together to give the filtered value for a given sample.
Parameters:
* length: The size of the moving window in samples. Minimum of 2 required.
* knot_pos: The index within the moving window that should be used to extract
a sample from and apply the current window filtered value.
Example:
POSITION_FILTER = eventfilters.MovingWindowFilter, {length: 3, knot_pos:'center'}
Applies the MovingWindowFilter to x and y gaze data fields of eye samples. The
window size is three, and each sample position is filtered using data from the
previous and next samples as well as itself.
eventfilters.PassThroughFilter
---------------------------------
A NULL filter. In other words, the filter does not do any filtering.
Parameters: None
Example:
VELOCITY_FILTER = eventfilters.PassThroughFilter, {}
Velocity data is calculated from (filtered) sample positions, but is not
filtered itself.
eventfilters.MedianFilter
-----------------------------
MedianFilter applies the median value of the filter window to the knot_pos
window sample.
Parameters:
* length: The size of the moving window in samples. Minimum of 3 is
required and the length must be odd.
* knot_pos: The index within the moving window that should be used to extract
a sample from and apply the current window filtered value.
Example:
POSITION_FILTER = eventfilters.MedianFilter, {length: 3, knot_pos: 0}
Sample position fields are filtered by the median value of three samples, those
being the current sample and the two following samples (so the current sample is
at index 0.
eventfilters.WeightedAverageFilter
-----------------------------------
WeightedAverageFilter is similar to the standard MovingWindowFilter field filter,
however each element in the window is assigned a weighting factor that is used
during averaging.
Parameters:
* weights: A list of weights to be applied to the window values. The window
length is == len(weights). The weight values are all normalized to sum to 1
before use in the filter. For example, a weight list of (25,50,25) will be
converted to (0.25,0.50,0.25) for use in the filter, with window value index
i being multiplied by weith list index i.
* knot_pos: The index within the moving window that should be used to extract
a sample from and apply the current window filtered value.
Example:
VELOCITY_FILTER = eventfilters.WeightedAverageFilter, {weights: (25,50,25), knot_pos: 1}
A weighted average window filter will be applied to x and y velocity fields.
The length of the window is 3 samples, and the filtered sample index retrieved
is 1, the same as using 'center' in this case. The filtered sample index will
count toward 1/2 the weighted average, with the previous and next samples
contributing 1/4 of the weighted average each.
eventfilters.StampFilter
--------------------------
A variant of the filter proposed by Dr. <NAME> (1993 ???). A window of
length 3 is used, with the knot_pos centered, or at index 1. If the current
3 values in the window list are monotonic, then the sample is not filtered.
If the values are non-monotonic, then v[1] = (v[0]+v[2])/2.0
Parameters:
* levels: The number of iterations (recursive) that should be applied to the
windowed data. Minimum value is 1. The number of levels equals
the number of samples the filtered sample will be delayed
compared to the non filtered sample time.
Example:
POSITION_FILTER = eventfilters.StampFilter, {level: 1}
Data is filtered once, similar to what a 'normal' filter level would be in the
eyelink<tm> system. Level = 2 would be similar to the 'extra' filter level
setting of eyelink<tm>.
"""
import numpy as np
from ....constants import EventConstants
from ....errors import print2err
from ... import DeviceEvent, eventfilters
from collections import OrderedDict
from ....util.visualangle import VisualAngleCalc
MONOCULAR_EYE_SAMPLE = EventConstants.MONOCULAR_EYE_SAMPLE
BINOCULAR_EYE_SAMPLE = EventConstants.BINOCULAR_EYE_SAMPLE
FIXATION_START = EventConstants.FIXATION_START
FIXATION_END = EventConstants.FIXATION_END
SACCADE_START = EventConstants.SACCADE_START
SACCADE_END = EventConstants.SACCADE_END
BLINK_START = EventConstants.BLINK_START
BLINK_END = EventConstants.BLINK_END
NO_EYE = 0
LEFT_EYE = 1
RIGHT_EYE = 2
BOTH_EYE = 3
class EyeTrackerEventParser(eventfilters.DeviceEventFilter):
def __init__(self, **kwargs):
eventfilters.DeviceEventFilter.__init__(self, **kwargs)
self.sample_type = None
self.io_sample_class = None
self.io_event_ix = None
self.last_valid_sample = None
self.last_sample = None
self.invalid_samples_run = []
self._last_parser_sample = None
self.open_parser_events = OrderedDict()
self.convertEvent = None
self.isValidSample = None
self.vel_thresh_history_dur = kwargs.get(
'adaptive_vel_thresh_history', 3.0)
position_filter = kwargs.get('position_filter')
velocity_filter = kwargs.get('velocity_filter')
display_device = kwargs.get('display_device')
sampling_rate = kwargs.get('sampling_rate')
if position_filter:
pos_filter_class_name = position_filter.get(
'name', 'PassThroughFilter')
pos_filter_class = getattr(eventfilters, pos_filter_class_name)
del position_filter['name']
pos_filter_kwargs = position_filter
else:
pos_filter_class, pos_filter_kwargs = eventfilters.PassThroughFilter, {}
if velocity_filter:
vel_filter_class_name = position_filter.get(
'name', 'PassThroughFilter')
vel_filter_class = getattr(eventfilters, vel_filter_class_name)
del velocity_filter['name']
vel_filter_kwargs = velocity_filter
else:
vel_filter_class, vel_filter_kwargs = eventfilters.PassThroughFilter, {}
self.adaptive_x_vthresh_buffer = np.zeros(
self.vel_thresh_history_dur * sampling_rate)
self.x_vthresh_buffer_index = 0
self.adaptive_y_vthresh_buffer = np.zeros(
self.vel_thresh_history_dur * sampling_rate)
self.y_vthresh_buffer_index = 0
pos_filter_kwargs['event_type'] = MONOCULAR_EYE_SAMPLE
pos_filter_kwargs['inplace'] = True
pos_filter_kwargs['event_field_name'] = 'angle_x'
self.x_position_filter = pos_filter_class(**pos_filter_kwargs)
pos_filter_kwargs['event_field_name'] = 'angle_y'
self.y_position_filter = pos_filter_class(**pos_filter_kwargs)
vel_filter_kwargs['event_type'] = MONOCULAR_EYE_SAMPLE
vel_filter_kwargs['inplace'] = True
vel_filter_kwargs['event_field_name'] = 'velocity_x'
self.x_velocity_filter = vel_filter_class(**vel_filter_kwargs)
vel_filter_kwargs['event_field_name'] = 'velocity_y'
self.y_velocity_filter = vel_filter_class(**vel_filter_kwargs)
vel_filter_kwargs['event_field_name'] = 'velocity_xy'
self.xy_velocity_filter = vel_filter_class(**vel_filter_kwargs)
###
mm_size = display_device.get('mm_size')
if mm_size:
mm_size = mm_size['width'], mm_size['height'],
pixel_res = display_device.get('pixel_res')
eye_distance = display_device.get('eye_distance')
self.visual_angle_calc = VisualAngleCalc(mm_size,
pixel_res,
eye_distance)
self.pix2deg = self.visual_angle_calc.pix2deg
@property
def filter_id(self):
return 23
@property
def input_event_types(self):
event_type_and_filter_ids = dict()
event_type_and_filter_ids[BINOCULAR_EYE_SAMPLE] = [0, ]
event_type_and_filter_ids[MONOCULAR_EYE_SAMPLE] = [0, ]
return event_type_and_filter_ids
def process(self):
""""""
samples_for_processing = []
for in_evt in self.getInputEvents():
if self.sample_type is None:
self.initializeForSampleType(in_evt)
# If event is binocular, convert to monocular.
# Regardless of type, convert pix to angle positions and calculate
# unfiltered velocity data.
current_mono_evt = self.convertEvent(self.last_sample, in_evt)
is_valid = self.isValidSample(current_mono_evt)
if is_valid:
# If sample is valid (no missing pos data), first
# check for a previous missing data run and handle.
if self.invalid_samples_run:
if self.last_valid_sample:
samples_for_processing.extend(
self.interpolateMissingData(current_mono_evt))
self._addVelocity(
samples_for_processing[-1], current_mono_evt)
# Discard all invalid samples that occurred prior
# to the first valid sample.
del self.invalid_samples_run[:]
# Then add current event to field filters. If a filtered event
# is returned, add it to the to be processed sample list.
filtered_event = self.addToFieldFilters(current_mono_evt)
if filtered_event:
filtered_event, _junk = filtered_event
x_vel_thresh, y_vel_thresh = self.addVelocityToAdaptiveThreshold(
filtered_event)
filtered_event[self.io_event_ix('raw_x')] = x_vel_thresh
filtered_event[self.io_event_ix('raw_y')] = y_vel_thresh
samples_for_processing.append(filtered_event)
self.last_valid_sample = current_mono_evt
else:
self.invalid_samples_run.append(current_mono_evt)
self.addOutputEvent(current_mono_evt)
self.last_sample = current_mono_evt
# Add any new filtered samples to be output.
# Also create parsed events with no heuristics being used
# at this point.
for s in samples_for_processing:
self.parseEvent(s)
if self.isValidSample(s):
self.addOutputEvent(s)
self.clearInputEvents()
def parseEvent(self, sample):
if self._last_parser_sample:
last_sec = self.getSampleEventCategory(self._last_parser_sample)
current_sec = self.getSampleEventCategory(sample)
if last_sec and last_sec != current_sec:
start_event, end_event = self.createEyeEvents(
last_sec, current_sec, self._last_parser_sample, sample)
if start_event:
self.addOutputEvent(start_event)
if end_event:
self.addOutputEvent(end_event)
else:
self.open_parser_events.setdefault(
current_sec + '_SAMPLES', []).append(sample)
self._last_parser_sample = sample
def getSampleEventCategory(self, sample):
if self.isValidSample(sample):
x_velocity_threshold = sample[self.io_event_ix('raw_x')]
y_velocity_threshold = sample[self.io_event_ix('raw_y')]
if x_velocity_threshold == np.NaN:
return None
sample_vx = sample[self.io_event_ix('velocity_x')]
sample_vy = sample[self.io_event_ix('velocity_y')]
if sample_vx >= x_velocity_threshold or sample_vy >= y_velocity_threshold:
return 'SAC'
return 'FIX'
return 'MIS'
def createEyeEvents(
self,
last_sample_category,
current_sample_category,
last_sample,
current_sample):
start_event = None
end_event = None
if last_sample_category == 'MIS':
# Create end blink event
existing_start_event = self.open_parser_events.get('MIS')
evt_samples = self.open_parser_events.get('MIS_SAMPLES')
if evt_samples:
del self.open_parser_events['MIS_SAMPLES']
if existing_start_event:
end_event = self.createBlinkEndEventArray(
last_sample, existing_start_event, evt_samples)
del self.open_parser_events['MIS']
else:
# print2err("PARSER Warning: Blink Start Event not found; Blink End event being dropped: ", end_event)
pass
elif last_sample_category == 'FIX':
# Create end fix event
existing_start_event = self.open_parser_events.get('FIX')
evt_samples = self.open_parser_events.get('FIX_SAMPLES')
if evt_samples:
del self.open_parser_events['FIX_SAMPLES']
if existing_start_event:
end_event = self.createFixationEndEventArray(
last_sample, existing_start_event, evt_samples)
del self.open_parser_events['FIX']
else:
# print2err("PARSER Warning: Fixation Start Event not found; Fixation End event being dropped: ", end_event)
pass
elif last_sample_category == 'SAC':
# Create end sac event
existing_start_event = self.open_parser_events.get('SAC')
evt_samples = self.open_parser_events.get('SAC_SAMPLES')
if evt_samples:
del self.open_parser_events['SAC_SAMPLES']
if existing_start_event:
end_event = self.createSaccadeEndEventArray(
last_sample, existing_start_event, evt_samples)
del self.open_parser_events['SAC']
else:
# print2err("PARSER Warning: Saccade Start Event not found; Saccade End event being dropped: ", end_event)
pass
if current_sample_category == 'MIS':
# Create start blink event
start_event = self.createBlinkStartEventArray(current_sample)
self.open_parser_events['MIS_SAMPLES'] = [current_sample, ]
existing_start_event = self.open_parser_events.get('MIS')
if existing_start_event:
print2err(
'PARSER ERROR: Blink Start Event already Open and is being dropped: ',
existing_start_event)
self.open_parser_events['MIS'] = current_sample
elif current_sample_category == 'FIX':
# Create start fix event
start_event = self.createFixationStartEventArray(current_sample)
self.open_parser_events['FIX_SAMPLES'] = [current_sample, ]
existing_start_event = self.open_parser_events.get('FIX')
if existing_start_event:
print2err(
'PARSER ERROR: Fixation Start Event already Open and is being dropped: ',
existing_start_event)
self.open_parser_events['FIX'] = current_sample
elif current_sample_category == 'SAC':
# Create start sac event
start_event = self.createSaccadeStartEventArray(current_sample)
self.open_parser_events['SAC_SAMPLES'] = [current_sample, ]
existing_start_event = self.open_parser_events.get('SAC')
if existing_start_event:
print2err(
'PARSER ERROR: Saccade Start Event already Open and is being dropped: ',
existing_start_event)
self.open_parser_events['SAC'] = current_sample
return end_event, start_event
def addVelocityToAdaptiveThreshold(self, sample):
velocity_x = sample[self.io_event_ix('velocity_x')]
velocity_y = sample[self.io_event_ix('velocity_y')]
velocity_buffers = [
self.adaptive_x_vthresh_buffer,
self.adaptive_y_vthresh_buffer]
velocity_buffer_indexs = [
self.x_vthresh_buffer_index,
self.y_vthresh_buffer_index]
vthresh_values = []
for v, velocity in enumerate([velocity_x, velocity_y]):
current_velocity_buffer = velocity_buffers[v]
current_vbuffer_index = velocity_buffer_indexs[v]
blen = len(current_velocity_buffer)
if velocity > 0.0:
i = current_vbuffer_index % blen
current_velocity_buffer[i] = velocity
full = current_vbuffer_index >= blen
if v == 0:
self.x_vthresh_buffer_index += 1
else:
self.y_vthresh_buffer_index += 1
if full:
PT = current_velocity_buffer.min() + current_velocity_buffer.std() * 3.0
velocity_below_thresh = current_velocity_buffer[
current_velocity_buffer < PT]
PTd = 2.0
pt_list = [PT, ]
while PTd >= 1.0:
if len(pt_list) > 0:
PT = velocity_below_thresh.mean() + 3.0 * velocity_below_thresh.std()
velocity_below_thresh = current_velocity_buffer[
current_velocity_buffer < PT]
PTd = np.abs(PT - pt_list[-1])
pt_list.append(PT)
vthresh_values.append(PT)
if len(vthresh_values) != v + 1:
vthresh_values.append(np.NaN)
return vthresh_values
def reset(self):
eventfilters.DeviceEventFilter.reset(self)
self._last_parser_sample = None
self.last_valid_sample = None
self.last_sample = None
self.invalid_samples_run = []
self.open_parser_events.clear()
self.x_position_filter.clear()
self.y_position_filter.clear()
self.x_velocity_filter.clear()
self.y_velocity_filter.clear()
self.xy_velocity_filter.clear()
self.x_vthresh_buffer_index = 0
self.y_vthresh_buffer_index = 0
def initializeForSampleType(self, in_evt):
# in_evt[DeviceEvent.EVENT_TYPE_ID_INDEX]
self.sample_type = MONOCULAR_EYE_SAMPLE
#print2err("self.sample_type: ",self.sample_type,", ",EventConstants.getName(self.sample_type))
self.io_sample_class = EventConstants.getClass(self.sample_type)
self.io_event_fields = self.io_sample_class.CLASS_ATTRIBUTE_NAMES
#print2err("self.io_sample_class: ",self.io_sample_class,", ",len(self.io_event_fields),"\n>>",self.io_event_fields)
self.io_event_ix = self.io_sample_class.CLASS_ATTRIBUTE_NAMES.index
if in_evt[DeviceEvent.EVENT_TYPE_ID_INDEX] == BINOCULAR_EYE_SAMPLE:
self.convertEvent = self._convertToMonoAveraged
self.isValidSample = lambda x: x[self.io_event_ix('status')] != 22
else:
self.convertEvent = self._convertMonoFields
self.isValidSample = lambda x: x[self.io_event_ix('status')] == 0
def interpolateMissingData(self, current_sample):
samples_for_processing = []
invalid_sample_count = len(self.invalid_samples_run)
gx_ix = self.io_event_ix('angle_x')
gy_ix = self.io_event_ix('angle_y')
ps_ix = self.io_event_ix('pupil_measure1')
starting_gx = self.last_valid_sample[gx_ix]
starting_gy = self.last_valid_sample[gy_ix]
starting_ps = self.last_valid_sample[ps_ix]
ending_gx = current_sample[gx_ix]
ending_gy = current_sample[gy_ix]
ending_ps = current_sample[ps_ix]
x_interp = np.linspace(starting_gx, ending_gx,
num=invalid_sample_count + 2)[1:-1]
y_interp = np.linspace(starting_gy, ending_gy,
num=invalid_sample_count + 2)[1:-1]
p_interp = np.linspace(starting_ps, ending_ps,
num=invalid_sample_count + 2)[1:-1]
# print2err('>>>>')
# print2err('invalid_sample_count: ', invalid_sample_count)
# print2err('starting_gx, ending_gx: ', starting_gx,', ',ending_gx)
# print2err('x_interp: ', x_interp)
# print2err('starting_gy, ending_gy: ', starting_gx,', ',ending_gx)
# print2err('y_interp: ', y_interp)
# print2err('<<<<')
prev_samp = self.last_valid_sample
# interpolate missing sample values, adding to pos and vel filters
for ix, curr_samp in enumerate(self.invalid_samples_run):
curr_samp[gx_ix] = x_interp[ix]
curr_samp[gy_ix] = y_interp[ix]
curr_samp[ps_ix] = p_interp[ix]
self._addVelocity(prev_samp, curr_samp)
filtered_event = self.addToFieldFilters(curr_samp)
if filtered_event:
filtered_event, _junk = filtered_event
samples_for_processing.append(filtered_event)
prev_samp = curr_samp
return samples_for_processing
def addToFieldFilters(self, sample):
self.x_position_filter.add(sample)
self.y_position_filter.add(sample)
self.x_velocity_filter.add(sample)
self.y_velocity_filter.add(sample)
return self.xy_velocity_filter.add(sample)
def _convertPosToAngles(self, mono_event):
gx_ix = self.io_event_ix('gaze_x')
gx_iy = self.io_event_ix('gaze_y')
mono_event[
self.io_event_ix('angle_x')], mono_event[
self.io_event_ix('angle_y')] = self.pix2deg(
mono_event[gx_ix], mono_event[gx_iy])
def _addVelocity(self, prev_event, current_event):
io_ix = self.io_event_ix
dx = np.abs(
current_event[
io_ix('angle_x')] -
prev_event[
io_ix('angle_x')])
dy = np.abs(
current_event[
io_ix('angle_y')] -
prev_event[
io_ix('angle_y')])
dt = current_event[io_ix('time')] - prev_event[io_ix('time')]
current_event[io_ix('velocity_x')] = dx / dt
current_event[io_ix('velocity_y')] = dy / dt
current_event[io_ix('velocity_xy')] = np.hypot(dx / dt, dy / dt)
def _convertMonoFields(self, prev_event, current_event):
if self.isValidSample(current_event):
self._convertPosToAngles(self, current_event)
if prev_event:
self._addVelocity(prev_event, current_event)
def _convertToMonoAveraged(self, prev_event, current_event):
mono_evt = []
binoc_field_names = EventConstants.getClass(
EventConstants.BINOCULAR_EYE_SAMPLE).CLASS_ATTRIBUTE_NAMES
#print2err("binoc_field_names: ",len(binoc_field_names),"\n",binoc_field_names)
status = current_event[binoc_field_names.index('status')]
for field in self.io_event_fields:
if field in binoc_field_names:
mono_evt.append(current_event[binoc_field_names.index(field)])
elif field == 'eye':
mono_evt.append(LEFT_EYE)
elif field.endswith('_type'):
mono_evt.append(
int(current_event[binoc_field_names.index('left_%s' % (field))]))
else:
#print2err("binoc status: ",status)
if status == 0:
lfv = float(
current_event[
binoc_field_names.index(
'left_%s' %
(field))])
rfv = float(
current_event[
binoc_field_names.index(
'right_%s' %
(field))])
mono_evt.append((lfv + rfv) / 2.0)
elif status == 2:
mono_evt.append(
float(
current_event[
binoc_field_names.index(
'left_%s' %
(field))]))
elif status == 20:
mono_evt.append(
float(
current_event[
binoc_field_names.index(
'right_%s' %
(field))]))
elif status == 22:
# both eyes have missing data, so use data from left eye
# (does not really matter)
mono_evt.append(
float(
current_event[
binoc_field_names.index(
'left_%s' %
(field))]))
else:
ValueError('Unknown Sample Status: %d' % (status))
mono_evt[self.io_event_fields.index(
'type')] = EventConstants.MONOCULAR_EYE_SAMPLE
if self.isValidSample(mono_evt):
self._convertPosToAngles(mono_evt)
if prev_event:
self._addVelocity(prev_event, mono_evt)
return mono_evt
def _binocSampleValidEyeData(self, sample):
evt_status = sample[self.io_event_ix('status')]
if evt_status == 0:
# both eyes are valid
return BOTH_EYE
elif evt_status == 20: # right eye data only
return RIGHT_EYE
elif evt_status == 2: # left eye data only
return LEFT_EYE
elif evt_status == 22: # both eye data missing
return NO_EYE
def createFixationStartEventArray(self, sample):
return [sample[self.io_event_ix('experiment_id')],
sample[self.io_event_ix('session_id')],
sample[self.io_event_ix('device_id')],
sample[self.io_event_ix('event_id')],
EventConstants.FIXATION_START,
sample[self.io_event_ix('device_time')],
sample[self.io_event_ix('logged_time')],
sample[self.io_event_ix('time')],
0.0,
0.0,
0,
sample[self.io_event_ix('eye')],
sample[self.io_event_ix('gaze_x')],
sample[self.io_event_ix('gaze_y')],
0.0,
sample[self.io_event_ix('angle_x')],
sample[self.io_event_ix('angle_y')],
# used to hold online x velocity threshold calculated for
# sample
sample[self.io_event_ix('raw_x')],
# used to hold online y velocity threshold calculated for
# sample
sample[self.io_event_ix('raw_y')],
sample[self.io_event_ix('pupil_measure1')],
sample[self.io_event_ix('pupil_measure1_type')],
0.0,
0,
0.0,
0.0,
sample[self.io_event_ix('velocity_x')],
sample[self.io_event_ix('velocity_y')],
sample[self.io_event_ix('velocity_xy')],
sample[self.io_event_ix('status')]
]
def createFixationEndEventArray(
self,
sample,
existing_start_event,
event_samples):
evt_sample_array = np.asarray(event_samples)
vx = self.io_event_ix('velocity_x')
vy = self.io_event_ix('velocity_y')
vxy = self.io_event_ix('velocity_xy')
gx = self.io_event_ix('gaze_x')
gy = self.io_event_ix('gaze_y')
return [sample[self.io_event_ix('experiment_id')],
sample[self.io_event_ix('session_id')],
sample[self.io_event_ix('device_id')],
sample[self.io_event_ix('event_id')],
EventConstants.FIXATION_END,
sample[self.io_event_ix('device_time')],
sample[self.io_event_ix('logged_time')],
sample[self.io_event_ix('time')],
0.0,
0.0,
0,
sample[self.io_event_ix('eye')],
sample[self.io_event_ix(
'time')] - existing_start_event[self.io_event_ix('time')],
existing_start_event[gx],
existing_start_event[gy],
0.0,
existing_start_event[self.io_event_ix('angle_x')],
existing_start_event[self.io_event_ix('angle_y')],
# used to hold online x velocity threshold calculated for
# sample
existing_start_event[self.io_event_ix('raw_x')],
# used to hold online y velocity threshold calculated for
# sample
existing_start_event[self.io_event_ix('raw_y')],
existing_start_event[self.io_event_ix('pupil_measure1')],
existing_start_event[self.io_event_ix('pupil_measure1_type')],
0.0,
0,
0.0,
0.0,
existing_start_event[vx],
existing_start_event[vy],
existing_start_event[vxy],
sample[gx],
sample[gy],
0.0,
sample[self.io_event_ix('angle_x')],
sample[self.io_event_ix('angle_y')],
# used to hold online x velocity threshold calculated for
# sample
sample[self.io_event_ix('raw_x')],
# used to hold online y velocity threshold calculated for
# sample
sample[self.io_event_ix('raw_y')],
sample[self.io_event_ix('pupil_measure1')],
sample[self.io_event_ix('pupil_measure1_type')],
0.0,
0,
0.0,
0.0,
sample[vx],
sample[vy],
sample[vxy],
evt_sample_array[:, gx].mean(), # average_gaze_x,
evt_sample_array[:, gy].mean(), # average_gaze_y,
0.0,
0.0,
0.0,
0.0,
0.0,
evt_sample_array[
:,
self.io_event_ix('pupil_measure1')].mean(),
# average_pupil_measure1,
# average_pupil_measure1_type,
sample[self.io_event_ix('pupil_measure1_type')],
0.0,
0.0,
0.0,
0.0,
evt_sample_array[:, vx].mean(), # average_velocity_x,
evt_sample_array[:, vy].mean(), # average_velocity_y,
evt_sample_array[:, vxy].mean(), # average_velocity_xy,
evt_sample_array[:, vx].max(), # peak_velocity_x,
evt_sample_array[:, vy].max(), # peak_velocity_y,
evt_sample_array[:, vxy].max(), # peak_velocity_xy,
sample[self.io_event_ix('status')]
]
################### Saccade Event Types ##########################
def createSaccadeStartEventArray(self, sample):
return [sample[self.io_event_ix('experiment_id')],
sample[self.io_event_ix('session_id')],
sample[self.io_event_ix('device_id')],
sample[self.io_event_ix('event_id')],
EventConstants.SACCADE_START,
sample[self.io_event_ix('device_time')],
sample[self.io_event_ix('logged_time')],
sample[self.io_event_ix('time')],
0.0,
0.0,
0,
sample[self.io_event_ix('eye')],
sample[self.io_event_ix('gaze_x')],
sample[self.io_event_ix('gaze_y')],
0.0,
sample[self.io_event_ix('angle_x')],
sample[self.io_event_ix('angle_y')],
# used to hold online x velocity threshold calculated for
# sample
sample[self.io_event_ix('raw_x')],
# used to hold online y velocity threshold calculated for
# sample
sample[self.io_event_ix('raw_y')],
sample[self.io_event_ix('pupil_measure1')],
sample[self.io_event_ix('pupil_measure1_type')],
0.0,
0,
0.0,
0.0,
sample[self.io_event_ix('velocity_x')],
sample[self.io_event_ix('velocity_y')],
sample[self.io_event_ix('velocity_xy')],
sample[self.io_event_ix('status')]
]
def createSaccadeEndEventArray(
self,
sample,
existing_start_event,
event_samples):
evt_sample_array = np.asarray(event_samples)
gx = self.io_event_ix('gaze_x')
gy = self.io_event_ix('gaze_y')
x1 = existing_start_event[gx]
y1 = existing_start_event[gy]
x2 = sample[gx]
y2 = sample[gy]
xDiff = x2 - x1
yDiff = y2 - y1
vx = self.io_event_ix('velocity_x')
vy = self.io_event_ix('velocity_y')
vxy = self.io_event_ix('velocity_xy')
return [sample[self.io_event_ix('experiment_id')],
sample[self.io_event_ix('session_id')],
sample[self.io_event_ix('device_id')],
sample[self.io_event_ix('event_id')],
EventConstants.SACCADE_END,
sample[self.io_event_ix('device_time')],
sample[self.io_event_ix('logged_time')],
sample[self.io_event_ix('time')],
0.0,
0.0,
0,
sample[self.io_event_ix('eye')],
sample[self.io_event_ix(
'time')] - existing_start_event[self.io_event_ix('time')],
xDiff,
yDiff,
np.rad2deg( | np.arctan(yDiff, xDiff) | numpy.arctan |
import numpy as np
import joblib
from .rbm import RBM
from .utils import sigmoid
# TODO(anna): add sparsity constraint
# TODO(anna): add entroty loss term
# TODO(anna): add monitoring kl divergence (and reverse kl divergence)
# TODO(anna): run on the paper examples again
# TODO(anna): try unit test case? say in a 3x3 patch, only 1 pixel is on
class GaussianBernoulliRBM(RBM):
additional_losses = [
'sparsity',
'h_given_v_entropy',
]
def __init__(self,
nv, nh,
sigma,
sparsity_coef=0.,
h_given_v_entropy_coef=0.,
random_state=None):
super(GaussianBernoulliRBM, self).__init__(
nv, nh, random_state=random_state)
self.sparsity_coef = sparsity_coef
self.h_given_v_entropy_coef = h_given_v_entropy_coef
self.sigma = sigma
def p_h_given_v(self, v):
# v: (batch_size, nv)
# output: (batch_size, nh)
return sigmoid(self.hb[np.newaxis] + np.matmul(v, self.W) / self.sigma)
def p_h_given_v_logits(self, v):
return self.hb[np.newaxis] + np.matmul(v, self.W) / self.sigma
def mean_p_v_given_h(self, h):
# h: (batch_size, nh)
# output: (batch_size, nv)
return self.vb[np.newaxis] + np.matmul(h, self.W.T)
def sample_p_v_given_h(self, h):
# h: (batch_size, nh)
# output: (batch_size, nv)
center = self.vb[np.newaxis] + np.matmul(h, self.W.T)
return self.random_state.normal(loc=center, scale=self.sigma)
def par_nll_par_W(self, v, h):
batch_size = len(v)
return np.matmul(v.T, h) / batch_size / self.sigma
def par_nll_par_hb(self, h):
return np.mean(h, axis=0)
def par_nll_par_vb(self, v):
return np.mean(v - self.vb, axis=0) / (self.sigma ** 2)
def par_l1_par_W(self):
return | np.sign(self.W) | numpy.sign |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
project: https://github.com/charnley/rmsd
license: https://github.com/charnley/rmsd/blob/master/LICENSE
"""
import os
import sys
import unittest
import numpy as np
from contextlib import contextmanager
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from rmsd import (get_coordinates_pdb, get_coordinates_xyz, get_coordinates,
rmsd, centroid, kabsch_rmsd, kabsch_rotate, kabsch,
quaternion_rmsd, quaternion_rotate, quaternion_transform,
makeQ, makeW, write_coordinates)
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestRMSD(unittest.TestCase):
"""Test the DSSP parser methods."""
def setUp(self):
"""Initialize the framework for testing."""
self.centroid = centroid
self.rmsd = rmsd
abs_path = os.path.abspath(os.path.dirname(__file__))
self.examples = abs_path + "/examples/"
self.get_coordinates = get_coordinates
self.get_coordinates_pdb = get_coordinates_pdb
self.get_coordinates_xyz = get_coordinates_xyz
self.kabsch_rmsd = kabsch_rmsd
self.kabsch_rotate = kabsch_rotate
self.kabsch_algo = kabsch
self.quaternion_rmsd = quaternion_rmsd
self.quaternion_rotate = quaternion_rotate
self.quaternion_transform = quaternion_transform
self.makeQ = makeQ
self.makeW = makeW
self.write_coordinates = write_coordinates
def tearDown(self):
"""Clear the testing framework."""
self.centroid = None
self.rmsd = None
self.examples = None
self.get_coordinates = None
self.get_coordinates_pdb = None
self.get_coordinates_xyz = None
self.kabsch_rmsd = None
self.kabsch_rotate = None
self.kabsch_algo = None
self.quaternion_rmsd = None
self.quaternion_rotate = None
self.quaternion_transform = None
self.makeQ = None
self.makeW = None
self.write_coordinates = None
def assertListAlmostEqual(self, list1, list2, places):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, places=places)
def test_get_coordinates_pdb(self):
infile = self.examples + 'ci2_1.pdb'
coords = self.get_coordinates_pdb(infile)
self.assertEqual('N', coords[0][0])
self.assertEqual([-7.173, -13.891, -6.266], coords[1][0].tolist())
def test_get_coordinates_xyz(self):
infile = self.examples + 'ethane.xyz'
coords = self.get_coordinates_xyz(infile)
self.assertEqual('C', coords[0][0])
self.assertEqual([-0.98353, 1.81095, -0.0314], coords[1][0].tolist())
def test_get_coordinates(self):
infile = self.examples + 'ci2_1.pdb'
coords = self.get_coordinates(infile, 'pdb')
self.assertEqual('N', coords[0][0])
self.assertEqual([-7.173, -13.891, -6.266], coords[1][0].tolist())
infile = self.examples + 'ethane.xyz'
coords = self.get_coordinates(infile, 'xyz')
self.assertEqual('C', coords[0][0])
self.assertEqual([-0.98353, 1.81095, -0.0314], coords[1][0].tolist())
def test_centroid(self):
a1 = np.array([-19.658, 17.18, 25.163], dtype=float)
a2 = np.array([-20.573, 18.059, 25.88], dtype=float)
a3 = | np.array([-22.018, 17.551, 26.0], dtype=float) | numpy.array |
from __future__ import division, print_function, absolute_import
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import warnings
import numpy as np
from numpy.testing import assert_allclose, \
assert_array_almost_equal_nulp, TestCase, run_module_suite, dec, \
assert_raises, verbose, assert_equal, assert_array_equal
from numpy import array, finfo, argsort, dot, round, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix, isspmatrix
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence, arpack
from scipy.linalg import svd, hilbert
from scipy.lib._gcutils import assert_deallocated
# eigs() and eigsh() are called many times, so apply a filter for the warnings
# they generate here.
_eigs_warn_msg = "Single-precision types in `eigs` and `eighs`"
def setup_module():
warnings.filterwarnings("ignore", message=_eigs_warn_msg)
def teardown_module():
warnings.filterwarnings("default", message=_eigs_warn_msg)
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N,N))
if complex:
M = M + 1j * np.random.random((N,N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i,j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.where(i == j)
j[ind] = (j[ind] + 1) % N
M[i,j] = 0
M[j,i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i,j] = 0
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eval, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eval, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eval - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eval - sigma)
+ 1. / (eval - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eval - sigma)
- 1. / (eval - np.conj(sigma)))
elif mode == 'cayley':
reval = (eval + sigma) / (eval - sigma)
elif mode == 'buckling':
reval = eval / (eval - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ.lower())
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval_a = exact_eval
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eval, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eval, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eval, typ, k, which,
sigma, OPpart, mode)
eval_a = eval
eval = eval[ind]
evec = evec[:,ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eval * | np.dot(b, evec) | numpy.dot |
import numpy as np
from ..tools import n_ball_volume, n_sphere_area, delay_coordinates, lstsqr
from ..tools.nd_utils import nd_function
from .math_utils import _lstsqr_design_matrix
from scipy.special import gamma
from nolds.measures import poly_fit
from tqdm import tqdm
from typing import Union
import plotly.graph_objs as go
def _fast_count_row_1d(x, traj, r, norm_p):
return np.count_nonzero(np.linalg.norm(traj - x[np.newaxis, :], ord=norm_p, axis=1) <= r)
def _fast_count_row(x, traj, r, norm_p):
"""
:param x: (dim, )
:param traj: (n_points, dim)
:param norm_p: int, float('inf')
:return:
"""
return np.count_nonzero(np.linalg.norm(traj - x[np.newaxis, :], ord=norm_p, axis=1) <= r)
def _fast_count_traj(x, r, norm_p):
"""
:param x: (n_points, dim)
:param norm_p: int, float('inf')
:return:
"""
if x.shape[1] > 1:
return np.sum(np.apply_along_axis(_fast_count_row, 1, x, x, r, norm_p))
elif x.shape[1] == 1:
return np.sum(np.apply_along_axis(_fast_count_row_1d, 1, x, x, r, norm_p))
def corr_sum(traj, r, norm_p=1, allow_equals=False):
if allow_equals:
return _fast_count_traj(traj, r, norm_p).astype(np.float64) / traj.shape[0] ** 2
else:
return (_fast_count_traj(traj, r, norm_p).astype(np.float64) - traj.shape[0]) /\
(traj.shape[0] * (traj.shape[0] - 1))
def dsty_est(x, samples, r, norm_p=1):
if len(samples.shape) == 1:
samples = samples[:, np.newaxis]
dim = samples.shape[1]
# x is (k,)
if len(x.shape) == 1:
# if k = d, then x is a point in phase space
if x.shape[0] == dim:
x = x[np.newaxis, :]
# x is a collection of sample points in 1d phase space
elif dim == 1:
x = x[:, np.newaxis]
elif len(x.shape) == 2:
assert(x.shape[1] == samples.shape[1])
return np.apply_along_axis(_fast_count_row, 1, x, samples, r, norm_p).astype(np.float64) / (n_ball_volume(dim, norm_p) * r**dim * samples.shape[0])
def reference_rule_alpha(p: Union[float, int], d: int):
if d == 1:
alpha = (12. * np.sqrt(np.pi)) ** (1./5)
else:
alpha = (
(4. * (2. * np.sqrt(np.pi))**d * (3. * gamma(1+(d+2.)/p) * gamma(1+1./p))**2)
/ ((d + 2.) * n_ball_volume(d, p) * (gamma(1+3./p) * gamma(1.+d/p)) ** 2)
) ** (1./(d+4))
return alpha
@nd_function
def reference_rule(x: np.ndarray, dim: Union[int, str] = 'auto', norm_p: Union[int, float, str] = 2) -> float:
n = x.shape[0]
if dim == 'auto':
d = 1
if len(x.shape) == 2:
d = x.shape[1]
elif isinstance(dim, int):
d = dim
else:
raise ValueError('dim must be "auto" or int')
# print(d)
std = np.sqrt(x.var(axis=0, ddof=1).mean())
from scipy import stats
iqr = stats.iqr(x)
scale = min(std, iqr/1.34)
gamma_n = n ** (-1/(d+4.))
if norm_p in ['manhattan', 'euclidean', 'supremum']:
norm_p = ["manhattan", "euclidean"].index(norm_p) + 1 if norm_p != "supremum" else float("inf")
alpha_p_d = reference_rule_alpha(norm_p, d)
return gamma_n * alpha_p_d * scale
# def rule_of_thumb(x: np.ndarray, norm_p=2, version: str = 'normal') -> float:
# n = x.shape[0]
# d = 1
# if len(x.shape) == 2:
# d = x.shape[1]
#
# if norm_p in ['manhattan', 'euclidean', 'supremum']:
# norm_p = ["manhattan", "euclidean"].index(norm_p) + 1 if norm_p != "supremum" else float("inf")
#
# std = np.sqrt(x.var(axis=0, ddof=1).mean())
#
#
# # version 1
# if version == 'normal':
# return std * ((9. * n_ball_volume(d, norm_p) * (2 * np.sqrt(np.pi)) ** d)/ ((d + 2) * n)) ** (1/(d+4))
# elif version == 'scott':
# return std * 3.5 * n ** (-1 / (d + 2))
# # version 2
# # return std * (((d + 2)**2 * (2*np.sqrt(np.pi))**d) / (n * n_ball_volume(d, norm_p) * (1/2. * d + 1/4. * d**2))) ** (1/(d+4))
def grassberger_proccacia(x: np.ndarray, rvals=None, rmin=None, rmax=None, omit_plateau=True, norm_p=2, method='lstsqr',
hack_filter_rvals=None, nr=20, plot=False, fig=None, show=True, full_output=False,
log_base=10, remove_tail=True, verbose=False):
"""
Estimates the correlation dimension using the Grassberger-Proccacia algorithm. The code is greatly inspired by
nolds: https://github.com/CSchoel/nolds/blob/master/nolds/measures.py and makes use of nolds version of poly_fit
:param x: time-series (n_points, dim) or (n_points, )
:param rvals: the threshold values to use
:return: the correlation dimension
"""
if log_base == 10:
log = np.log10
elif log_base == np.exp(1):
log = np.log
else:
log = lambda x: np.log(x)/ | np.log(log_base) | numpy.log |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
# acting on behalf of its Max Planck Institute for Intelligent Systems and the
# Max Planck Institute for Biological Cybernetics. All rights reserved.
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
# on this computer program. You can only use this computer program if you have closed a license agreement
# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and liable to prosecution.
# Contact: <EMAIL>
#
#
# If you use this code in a research publication please consider citing the following:
#
# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>
#
#
# Code Developed by:
# <NAME> <https://nghorbani.github.io/>
#
# 2018.12.13
import numpy as np
import torch
import torch.nn as nn
# from smplx.lbs import lbs
from lbs import lbs
import pickle
class Struct(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def to_tensor(array, dtype=torch.float32):
if 'torch.tensor' not in str(type(array)):
return torch.tensor(array, dtype=dtype)
def to_np(array, dtype=np.float32):
if 'scipy.sparse' in str(type(array)):
array = array.todense()
return np.array(array, dtype=dtype)
class BodyModel(nn.Module):
def __init__(self,
bm_path,
params=None,
num_betas=10,
batch_size=1, v_template = None,
num_dmpls=None, path_dmpl=None,
num_expressions=10,
use_posedirs=True,
dtype=torch.float32):
super(BodyModel, self).__init__()
'''
:param bm_path: path to a SMPL model as pkl file
:param num_betas: number of shape parameters to include.
if betas are provided in params, num_betas would be overloaded with number of thoes betas
:param batch_size: number of smpl vertices to get
:param device: default on gpu
:param dtype: float precision of the compuations
:return: verts, trans, pose, betas
'''
# Todo: if params the batchsize should be read from one of the params
self.dtype = dtype
if params is None: params = {}
# -- Load SMPL params --
if '.npz' in bm_path:
smpl_dict = np.load(bm_path, encoding='latin1')
elif '.pkl' in bm_path:
with open(bm_path, 'rb') as smpl_file:
smpl_dict = Struct(**pickle.load(smpl_file,encoding='latin1'))
else:
raise ValueError('bm_path should be either a .pkl nor .npz file')
njoints = smpl_dict.posedirs.shape[2] // 3
self.model_type = {69: 'smpl', 153: 'smplh', 162: 'smplx', 45: 'mano'}[njoints]
assert self.model_type in ['smpl', 'smplh', 'smplx', 'mano', 'mano'], ValueError(
'model_type should be in smpl/smplh/smplx/mano.')
self.use_dmpl = False
if num_dmpls is not None:
if path_dmpl is not None:
self.use_dmpl = True
else:
raise (ValueError('path_dmpl should be provided when using dmpls!'))
if self.use_dmpl and self.model_type in ['smplx', 'mano']: raise (
NotImplementedError('DMPLs only work with SMPL/SMPLH models for now.'))
# Mean template vertices
if v_template is None:
v_template = np.repeat(smpl_dict.v_template[np.newaxis], batch_size, axis=0)
else:
v_template = np.repeat(v_template[np.newaxis], batch_size, axis=0)
self.register_buffer('v_template', torch.tensor(v_template, dtype=dtype))
self.register_buffer('f', torch.tensor(smpl_dict.f.astype(np.int32), dtype=torch.int32))
if len(params):
if 'betas' in params.keys():
num_betas = params['betas'].shape[1]
if 'dmpls' in params.keys():
num_dmpls = params['dmpls'].shape[1]
num_total_betas = smpl_dict.shapedirs.shape[-1]
if num_betas < 1:
num_betas = num_total_betas
shapedirs = smpl_dict.shapedirs[:, :, :num_betas]
self.register_buffer('shapedirs', torch.tensor(to_np(shapedirs), dtype=dtype))
if self.model_type == 'smplx':
begin_shape_id = 300 if smpl_dict.shapedirs.shape[-1] > 300 else 10
exprdirs = smpl_dict.shapedirs[:, :, begin_shape_id:(begin_shape_id + num_expressions)]
self.register_buffer('exprdirs', torch.tensor(exprdirs, dtype=dtype))
expression = torch.tensor(np.zeros((batch_size, num_expressions)), dtype=dtype, requires_grad=True)
self.register_parameter('expression', nn.Parameter(expression, requires_grad=True))
if self.use_dmpl:
dmpldirs = np.load(path_dmpl)['eigvec']
dmpldirs = dmpldirs[:, :, :num_dmpls]
self.register_buffer('dmpldirs', torch.tensor(dmpldirs, dtype=dtype))
# Regressor for joint locations given shape - 6890 x 24
self.register_buffer('J_regressor', to_tensor(to_np(
smpl_dict.J_regressor), dtype=dtype))
# Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*30 x 207
if use_posedirs:
posedirs = smpl_dict.posedirs
posedirs = posedirs.reshape([posedirs.shape[0] * 3, -1]).T
self.register_buffer('posedirs', torch.tensor(posedirs, dtype=dtype))
else:
self.posedirs = None
# indices of parents for each joints
kintree_table = smpl_dict.kintree_table.astype(np.int32)
self.register_buffer('kintree_table', torch.tensor(kintree_table, dtype=torch.int32))
# LBS weights
# weights = np.repeat(smpl_dict.weights[np.newaxis], batch_size, axis=0)
weights = smpl_dict.weights
self.register_buffer('weights', torch.tensor(weights, dtype=dtype))
if 'trans' in params.keys():
trans = params['trans']
else:
trans = torch.tensor(np.zeros((batch_size, 3)), dtype=dtype, requires_grad=True)
self.register_parameter('trans', nn.Parameter(trans, requires_grad=True))
# root_orient
# if self.model_type in ['smpl', 'smplh']:
root_orient = torch.tensor(np.zeros((batch_size, 3)), dtype=dtype, requires_grad=True)
self.register_parameter('root_orient', nn.Parameter(root_orient, requires_grad=True))
# pose_body
if self.model_type in ['smpl', 'smplh', 'smplx']:
pose_body = torch.tensor(np.zeros((batch_size, 63)), dtype=dtype, requires_grad=True)
self.register_parameter('pose_body', nn.Parameter(pose_body, requires_grad=True))
# pose_hand
if 'pose_hand' in params.keys():
pose_hand = params['pose_hand']
else:
if self.model_type in ['smpl']:
pose_hand = torch.tensor(np.zeros((batch_size, 1 * 3 * 2)), dtype=dtype, requires_grad=True)
elif self.model_type in ['smplh', 'smplx']:
pose_hand = torch.tensor(np.zeros((batch_size, 15 * 3 * 2)), dtype=dtype, requires_grad=True)
elif self.model_type in ['mano']:
pose_hand = torch.tensor(np.zeros((batch_size, 15 * 3)), dtype=dtype, requires_grad=True)
self.register_parameter('pose_hand', nn.Parameter(pose_hand, requires_grad=True))
# face poses
if self.model_type == 'smplx':
pose_jaw = torch.tensor( | np.zeros((batch_size, 1 * 3)) | numpy.zeros |
import numpy as np
import segyio as so
from scipy.signal import butter, sosfilt
import time
# Simple timer with message
def timer(start, message):
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
info('{}: {:d}:{:02d}:{:02d}'.format(message, int(hours), int(minutes), int(seconds)))
# Time resampling for shot records
def resample(rec, num):
if num == rec.time_range.num:
return np.asarray(rec.data), np.asarray(rec.coordinates.data)
start, stop = rec._time_range.start, rec._time_range.stop
dt0 = rec._time_range.step
new_time_range = TimeAxis(start=start, stop=stop, num=num)
dt = new_time_range.step
to_interp = np.asarray(rec.data)
data = np.zeros((num, to_interp.shape[1]))
for i in range(to_interp.shape[1]):
tck = interpolate.splrep(rec._time_range.time_values, to_interp[:, i], k=3)
data[:, i] = interpolate.splev(new_time_range.time_values, tck)
coords_loc = np.asarray(rec.coordinates.data)
# Return new object
return data, coords_loc
#######################################################################################
# Segy writer for shot records
def segy_write(data, sourceX, sourceZ, groupX, groupZ, dt, filename, sourceY=None,
groupY=None, elevScalar=-1000, coordScalar=-1000):
nt = data.shape[0]
nsrc = 1
nxrec = len(groupX)
if sourceY is None and groupY is None:
sourceY = np.zeros(1, dtype='int')
groupY = np.zeros(nxrec, dtype='int')
nyrec = len(groupY)
# Create spec object
spec = segyio.spec()
spec.ilines = np.arange(nxrec) # dummy trace count
spec.xlines = | np.zeros(1, dtype='int') | numpy.zeros |
import numpy as np
import attr_dict
import cfg
import yaml
from contextlib import contextmanager
import tactics_utils
class Player:
def __init__(self, pos=None, angle=0, label='', role=''):
self.pos = np.array(pos)
self.angle = int(angle)
self.label = label
self.role = role
@property
def name(self):
return f'{self.role}{self.label}'
@staticmethod
def from_dict(d):
d = attr_dict.AttrDict(d)
return Player(pos=d.pos, angle=d.orientation, label=d.label, role=d.role)
@property
def manimpos(self):
return | np.array([self.pos[0], self.pos[1], 1]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plots
plotrange, Btau, Ctau, ellipse, SUE
plotool:
set_clib, set_fig, set_ax,
reset_handles, append_handles, get_handles, set_legend,
plot, eplot, save, show, close
pplot(plotool):
add_plot, add_legend
"""
import warnings
from astropy import units as u
import numpy as np
from scipy import optimize
# import matplotlib as mpl
from matplotlib.ticker import (
NullFormatter, ScalarFormatter, LogFormatter,
LogFormatterExponent, LogFormatterSciNotation,
PercentFormatter
)
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
## Local
from utilities import InputError, merge_aliases
from arrays import arrayize, ramp
# cmap = mpl.cm.viridis
# norm = mpl.colors.Normalize(vmin=0, vmax=1)
##------------------------------
## Automatic plot range setting
##------------------------------
def plotrange(x,y,xran=None,yran=None,xlog=False,ylog=False,mask=None, \
errx=None,erry=None,xisln=False,yisln=False):
'''
Automatically sets the x and y ranges for (X,Y) plots, based on the entered
data set.
Copyright: <NAME>
'''
# Check the imput
N = np.size(x)
if (np.size(y) != N):
UT.strike('plotrange','x and y should have the same size.')
xran = arrayize(xran,N=2)
yran = arrayize(yran,N=2)
# X error bar settings
if errx is not None:
if (np.isscalar(errx)): errx = np.array([errx])
sex = np.shape(errx)
if (len(sex) == 2):
if (sex != (2,N) ): UT.strike('plotrange','wrong size for errx.')
elif (len(sex) == 1):
if (sex != (N,) ): UT.strike('plotrange','wrong size for errx.')
errx = np.array([errx,errx])
else:
errx = np.zeros((2,N))
# Y error bar settings
if erry is not None:
if (np.isscalar(erry)): erry = np.array([erry])
sey = np.shape(erry)
if (len(sey) == 2):
if (sey != (2,N) ): UT.strike('plotrange','wrong size for erry.')
elif (len(sey) == 1):
if (sey != (N,) ): UT.strike('plotrange','wrong size for erry.')
erry = np.array([erry,erry])
else:
erry = np.zeros((2,N))
# Homogenize the arrays and account for errors
xlow = np.array(x,dtype=float).flatten() - errx[0,:]
xhigh = xlow + errx[1,:]
ylow = np.array(y,dtype=float).flatten() - erry[0,:]
yhigh = ylow + erry[1,:]
# Lin/Log
if (xisln): xlow, xhigh = np.exp(xlow), np.exp(xhigh)
if (yisln): ylow, yhigh = np.exp(ylow), np.exp(yhigh)
# Mask
mask = arrayize(mask,default=True,N=N)
if (xlog): mask = ( mask & (xlow > 0) & (xhigh > 0) )
if (ylog): mask = ( mask & (ylow > 0) & (yhigh > 0) )
if (xran[0] != None): mask = ( mask & (xlow >= xran[0]) )
if (xran[1] != None): mask = ( mask & (xhigh <= xran[1]) )
if (yran[0] != None): mask = ( mask & (ylow >= yran[0]) )
if (yran[1] != None): mask = ( mask & (yhigh <= yran[1]) )
# Plain range
xran = np.array([ np.min(xlow[mask]), np.max(xhigh[mask]) ])
yran = np.array([ np.min(ylow[mask]), np.max(yhigh[mask]) ])
# Add aesthetical margins
fracmarg = 0.03
if (not xlog):
dxr = xran[1] - xran[0]
xran += ( dxr*(-fracmarg), dxr*fracmarg )
else:
dxr = xran[1] / xran[0]
xran *= ( dxr**(-fracmarg), dxr**fracmarg )
if (not ylog):
dyr = yran[1] - yran[0]
yran += ( dyr*(-fracmarg), dyr*fracmarg )
else:
dyr = yran[1] / yran[0]
yran *= ( dyr**(-fracmarg), dyr**fracmarg )
# Output
return(xran,yran)
##-----------------------------------------------
##
## Plotting functions for ellipses and SUEs
##
## (Copyright: <NAME>)
##
##-----------------------------------------------
## Function for SUEs
def Btau(tau):
return( (1-2/np.pi)*(tau-1)**2 + tau )
## Function for SUEs
def Ctau(tau):
return( ( (4/np.pi-1)*tau**2 + (3-8/np.pi)*tau + 4/np.pi-1 ) * (tau-1) )
## Ellipse
def ellipse(xmean=None,ymean=None,xstdev=None,ystdev=None,rho=None, \
xmin=None,xmax=None,ymin=None,ymax=None,Npt=300, \
xisln=False,yisln=False):
"""
UNCERTAINTY ELLIPSES
Function to plot uncertainty ellipses (or 1 sigma contour of a bivariate
normal distribution). The parameters are the means (xmean,ymean), the
standard deviations (xstdev,ystdev) and the correlation coefficients (rho).
The optional bounds (xmin,xmax,ymin,ymax) have the effect of truncating the
ellipses in case there is a range of parameter space that is forbidden.
It is important to notice that the xisln/yisln parameters are not related to
the log settings of the axes where we plot the ellipse, but are here to
indicate that the moments of the variable to plot correspond to the natural
logarithm (ln) of the variable we want to display. For instance, for
displaying the ellipses of (x,y) where, for x, the moments are those of lnx,
we would write:
ellipse(xmean=mean_of_lnx,ymean=mean_of_y,xstdev=stdev_of_lnx, \
ystdev=stdev_of_y,rho=correl_coeff_of_lnx_and_y,xisln=True)
"""
x = ramp(x0=xmean-xstdev*(1-1.E-5),x1=xmean+xstdev*(1-1.E-5),N=Npt)
c1 = rho * (x-xmean)/xstdev
c2 = np.sqrt( (1-rho**2) * (1-(x-xmean)**2/xstdev**2) )
y1 = ystdev * ( c1 - c2 ) + ymean
y2 = ystdev * ( c1 + c2 ) + ymean
xplot = np.concatenate((x,x[::-1],[x[0]]))
yplot = np.concatenate((y1,y2[::-1],[y1[0]]))
if (xisln): xplot = np.exp(xplot)
if (yisln): yplot = np.exp(yplot)
if (xmin != None): xplot[xplot < xmin] = xmin
if (xmax != None): xplot[xplot > xmax] = xmax
if (ymin != None): yplot[yplot < ymin] = ymin
if (ymax != None): yplot[yplot > ymax] = ymax
return(xplot,yplot)
## SUEs (1 sigma contour of a bivariate split-normal distribution)
def SUE(xmean=None,ymean=None,xstdev=None,ystdev=None,rho=None, \
xskew=None,yskew=None,xmin=None,xmax=None,ymin=None,ymax=None, \
Npt=300,xisln=False,yisln=False):
"""
SKEWED UNCERTAINTY ELLIPSES (SUE)
Function to plot uncertainty SUEs (or 1 sigma contour of a bivariate
split-normal distribution). The parameters are the means (xmean,ymean), the
standard deviations (xstdev,ystdev), the skewnesses (xskew,yskew) and the
correlation coefficients (rho). The optional bounds (xmin,xmax,ymin,ymax)
have the effect of truncating the SUEs in case there is a range of
parameter space that is forbidden.
It is important to notice that the xisln/yisln parameters are not related to
the log settings of the axes where we plot the SUE, but are here to
indicate that the moments of the variable to plot correspond to the natural
logarithm (ln) of the variable we want to display. For instance, for
displaying the ellipses of (x,y) where, for x, the moments are those of lnx,
we would write:
SUE(xmean=mean_of_lnx,ymean=mean_of_y,xstdev=stdev_of_lnx, \
ystdev=stdev_of_y,xskew=skewness_of_lnx,yskew=skewness_of_y, \
rho=correl_coeff_of_lnx_and_y,xisln=True)
"""
# Rotation angle
theta = 1./2 * np.arctan( 2*rho*xstdev*ystdev / (xstdev**2-ystdev**2) )
# Numerically solve for taux and tauy (tau=1.D2 ==> skew=0.99)
taugrid = ramp(N=10000,x0=1.E-2,x1=1.E2,log=True)
Ax = np.sqrt(np.pi/2) \
* ( (np.cos(theta))**3*xskew*xstdev**3 \
+ (np.sin(theta))**3*yskew*ystdev**3 ) \
/ ( (np.sin(theta))**6 + (np.cos(theta))**6 ) \
* ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \
/ ( (np.cos(theta))**2*xstdev**2 \
- (np.sin(theta))**2*ystdev**2 ) )**1.5
Ay = np.sqrt(np.pi/2) \
* ( (np.cos(theta))**3*yskew*ystdev**3 \
- (np.sin(theta))**3*xskew*xstdev**3 ) \
/ ( (np.cos(theta))**6 + (np.sin(theta))**6 ) \
* ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \
/ ( (np.cos(theta))**2*ystdev**2 \
- (np.sin(theta))**2*xstdev**2 ) )**1.5
taux = np.exp(np.interp(Ax,Ctau(taugrid)/(Btau(taugrid))**1.5, \
np.log(taugrid)))
tauy = np.exp(np.interp(Ay,Ctau(taugrid)/(Btau(taugrid))**1.5, \
np.log(taugrid)))
if (not np.isfinite(taux) or taux > 1.E2): taux = 1.E2
if (not np.isfinite(tauy) or tauy > 1.E2): tauy = 1.E2
# Rest of the parameters
lambdax = np.sqrt( ( (np.cos(theta))**2*xstdev**2 \
- (np.sin(theta))**2*ystdev**2 ) \
/ ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(taux) )
lambday = np.sqrt( ( (np.cos(theta))**2*ystdev**2 \
- (np.sin(theta))**2*xstdev**2 ) \
/ ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(tauy) )
x0 = xmean - np.sqrt(2/np.pi) * ( np.cos(theta)*lambdax*(taux-1) \
- np.sin(theta)*lambday*(tauy-1) )
y0 = ymean - np.sqrt(2/np.pi) * ( np.sin(theta)*lambdax*(taux-1) \
+ np.cos(theta)*lambday*(tauy-1) )
# Draw the SUE
matrot = np.array([ [ np.cos(theta), -np.sin(theta) ], \
[ np.sin(theta), np.cos(theta) ] ])
xell_ax1 = np.zeros(2)
yell_ax1 = np.zeros(2)
xell_ax2 = np.zeros(2)
yell_ax2 = np.zeros(2)
for k in np.arange(4):
if (k == 0):
xell_sub = ramp(N=Npt,x0=-lambdax,x1=0) + x0
rx = 1-(xell_sub-x0)**2/lambdax**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 1):
xell_sub = ramp(N=Npt,x0=0,x1=lambdax*taux) + x0
rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 2):
xell_sub = (ramp(N=Npt,x0=0,x1=lambdax*taux))[::-1] + x0
rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 3):
xell_sub = (ramp(N=Npt,x0=-lambdax,x1=0))[::-1] + x0
rx = 1-(xell_sub-x0)**2/lambdax**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
# Add the limit case (half ellipse)
mask = np.logical_and(np.isfinite(yell_sub),np.isfinite(xell_sub))
xell_sub = xell_sub[mask]
yell_sub = yell_sub[mask]
Nsub = np.count_nonzero(mask)
# Rotate the ellipse
for j in np.arange(Nsub):
vecell = np.matmul(matrot, \
np.array([xell_sub[j]-x0,yell_sub[j]-y0]))
xell_sub[j] = vecell[0] + x0
yell_sub[j] = vecell[1] + y0
if (k == 0):
xell = xell_sub
yell = yell_sub
else:
xell = np.concatenate((xell,xell_sub))
yell = np.concatenate((yell,yell_sub))
xplot = np.concatenate((xell,[xell[0]]))
yplot = np.concatenate((yell,[yell[0]]))
# Logs and limits
if (xisln):
xplot = np.exp(xplot)
x0 = np.exp(x0)
if (yisln):
yplot = np.exp(yplot)
y0 = np.exp(y0)
if (xmin != None):
xplot[xplot < xmin] = xmin
if (x0 < xmin): x0 = xmin
if (xmax != None):
xplot[xplot > xmax] = xmax
if (x0 > xmax): x0 = xmax
if (ymin != None):
yplot[yplot < ymin] = ymin
if (y0 < ymin): y0 = ymin
if (ymax != None):
yplot[yplot > ymax] = ymax
if (y0 > ymax): y0 = ymax
return(xplot,yplot,x0,y0)
##-----------------------------------------------
##
## <plotool> based tools
##
##-----------------------------------------------
class plotool:
'''
plot Tool
------ INPUT ------
nrows,ncols Default: 1,1
figint Interactive figure (Default: True)
plt.subplots(**kwargs)
'''
def __init__(self, nrows=1, ncols=1,
x=np.zeros(2), y=np.zeros(2), figint=True, **kwargs):
'''
------ self ------
figid ID number of current figure (Default: 0)
horder ID number of current handle in self.handles (Default: 0)
handles Current handle list (Default: [])
labels Current label list (Default: [])
legend Current legend (Default: None)
trans transformation of coordinate system
'Data'
'Axes' (default)
'Figure'
'Display'
'''
## INPUTS
self.figid = 0
self.horder = 0
self.handles = []
self.labels = []
self.legend = None
self.trans = 'Axes'
self.x = x
self.y = y
if figint:
plt.ion()
self.nrows = nrows
self.ncols = ncols
self.fig, self.axes = plt.subplots(nrows, ncols, squeeze=False,
num=self.figid, **kwargs)
## De-squeeze/Inflate (if squeeze=True)
# if nrows==1 and ncols==1:
# self.axes = np.array(self.axes)[np.newaxis,np.newaxis]
# elif nrows==1:
# self.axes = self.axes[np.newaxis,:]
# elif ncols==1:
# self.axes = self.axes[:,np.newaxis]
self.ax = self.axes[0,0]
def set_clib(self, clib):
if clib=='base':
self.clib = list(mplc.BASE_COLORS) # 8 colors
elif clib=='tableau':
self.clib = list(mplc.TABLEAU_COLORS) # 10 colors
elif clib=='ccs4' or clib=='x11':
self.clib = list(mplc.CSS4_COLORS)
elif clib=='xkcd':
self.clib = list(mplc.XKCD_COLORS)
else:
self.clib = clib
def set_fig(self, left=None, right=None, bottom=None, top=None,
wspace=None, hspace=None, title=None, tsize=None):
self.fig.subplots_adjust(left=left, right=right,
bottom=bottom, top=top, wspace=wspace, hspace=hspace)
if title is not None:
self.fig.suptitle(title, size=tsize)
def set_ax(self, subpos=None, # ax = axes[subpos[0],subpos[1]]
xlog=False, ylog=False, # ax.set_xscale
basex=10, basey=10, nonposx='clip', nonposy='clip', # ax.set_xscale
xlim=(None,None), ylim=(None,None), #ax.set_xlim
xtk=None, xtkmi=None, xtkform=None, xtksize=None, # ax.xaxis.set_tick_params
ytk=None, ytkmi=None, ytkform=None, ytksize=None,
xlabel=None, xsize=None, ylabel=None, ysize=None, # ax.set_xlabel
xtktoggle=False, ytktoggle=False,
title=None, tsize=None, # ax.set_title (subplot title)
**kwargs):
'''
nonposx, nonposy: 'sym', 'mask', 'clip'
------ INPUT ------
subpos identify subplot (Default: None - current self.ax)
xtk,ytk list of major tick labels
xtkmi,ytkmi list of minor tick labels
xtkform,ytkform formatter of (major) tick labels
xtksize,ytksize size of (major) tick labels
xsize,ysize size of axes labels
xtktoggle toggle x tick and label to top
ytktoggle toggle y tick and label to right
self.ax.tick_params(**kwargs):
axis {'x', 'y', 'both' (default)}
reset Default: False
which {'major' (dafault), 'minor', 'both'}
direction {'in', 'out' (default), 'inout'}
length tick length
width tick width
'''
if self.nrows!=1 or self.ncols!=1:
if subpos is not None:
self.ax = self.axes[subpos[0],subpos[1]]
## else: keep current self.ax (Default: (0,0))
if xlog:
if nonposx=='sym':
self.ax.set_xscale('symlog',base=basex)
else:
self.ax.set_xscale('log',base=basex,nonpositive=nonposx)
if ylog:
if nonposx=='sym':
self.ax.set_yscale('symlog',base=basey)
else:
self.ax.set_yscale('log',base=basey,nonpositive=nonposy)
if xlim[0]!=None or xlim[1]!=None:
self.ax.set_xlim(xlim[0], xlim[1])
if ylim[0]!=None or ylim[1]!=None:
self.ax.set_ylim(ylim[0], ylim[1])
## x ticks
if xtkform=='log':
xformat = LogFormatter()
elif xtkform=='log_exp':
xformat = LogFormatterExponent()
elif xtkform=='log_sci':
xformat = LogFormatterSciNotation()
elif xtkform=='pct':
xformat = PercentFormatter()
else:
xformat = ScalarFormatter()
if xtk is not None:
self.ax.set_xticks(xtk, minor=False) # major
if xtkmi is not None:
self.ax.set_xticks(xtkmi, minor=True) # minor
self.ax.xaxis.set_major_formatter(xformat) # major
self.ax.xaxis.set_minor_formatter(NullFormatter()) # minor
self.ax.xaxis.set_tick_params(labelsize=xtksize)
if xtktoggle:
self.ax.xaxis.tick_top()
self.ax.xaxis.set_label_position('top')
## y ticks
if ytkform=='log':
yformat = LogFormatter()
elif ytkform=='log_exp':
yformat = LogFormatterExponent()
elif ytkform=='log_sci':
yformat = LogFormatterSciNotation()
elif ytkform=='pct':
yformat = PercentFormatter()
else:
yformat = ScalarFormatter()
if ytk is not None:
self.ax.set_yticks(ytk, minor=False) # major
if ytkmi is not None:
self.ax.set_yticks(ytkmi, minor=True) # minor
self.ax.yaxis.set_major_formatter(yformat) # major
self.ax.yaxis.set_minor_formatter(NullFormatter()) # minor
self.ax.yaxis.set_tick_params(labelsize=ytksize)
if ytktoggle:
self.ax.yaxis.tick_right()
self.ax.yaxis.set_label_position('right')
## both ticks
self.ax.tick_params(**kwargs)
if xlabel is not None:
self.ax.set_xlabel(xlabel, size=xsize)
if ylabel is not None:
self.ax.set_ylabel(ylabel, size=ysize)
if title is not None:
self.ax.set_title(title, size=tsize)
def reset_handles(self):
'''
Reset handles (before plot with new legend)
'''
if self.trans=='Figure':
self.fig.add_artist(self.legend)
elif self.trans=='Axes' or self.trans=='Data':
self.ax.add_artist(self.legend)
self.handles = []
self.horder = 0
return self.handles
def append_handles(self):
'''
Append currently added handle (after plot)
'''
handles, labels = self.ax.get_legend_handles_labels()
for handle, label in zip(handles, labels):
if label not in self.labels:
self.handles.append(handle)
self.labels.append(label)
self.horder += 1
return self.handles
def get_handles(self):
'''
Get non-repeated handles (after plot)
'''
handles, labels = self.ax.get_legend_handles_labels()
handlist, labelist = [], []
for handle, label in zip(handles, labels):
if label not in labelist:
handlist.append(handle)
labelist.append(label)
self.horder = len(handlist)
self.handles = handlist
self.labels = labelist
return self.handles
def set_legend(self, subpos=None,
left=None, right=None, bottom=None, top=None, figtight=False,
handles=None, **kwargs):
'''
- bbox_to_anchor rules: (1,1) correspond to upper right of the axis
bbox_to_anchor = (1,1)
.--------.
| |
| Axes |
| (bbox) |
| |
.--------.
- lengend loc is relative to the reference point (bbox_to_anchor) as follows:
|
lower right (2) | lower left (1)
---------------bbox_to_anchor---------------
upper right (3) | upper left (4)
|
------ INPUT ------
subpos identify subplot (Default: None - current self.ax)
x0,y0 bottom left of current Axes (Default: None)
shrinkx,shrinky current Axes size (Default: 1)
figtight ignore Axes settings (Default: False)
handles Default: None - self.handles
self.fig/ax.legend(**kwargs):
title title of legend box
loc relative position of legend box
bbox_to_anchor reference point of legend box
bbox_transform reference frame of coordinates
self.fig.transFigure
self.ax.transAxes (default)
------ OUTPUT ------
self.legend registrated with self.fig/ax.add_artist(self.legend)
'''
if handles is None:
handles = self.handles
if self.trans=='Figure':
self.fig.subplots_adjust(left=left, right=right,
bottom=bottom, top=top)
self.legend = self.fig.legend(handles=handles, **kwargs)
elif self.trans=='Axes' or self.trans=='Data':
if self.nrows!=1 or self.ncols!=1:
if subpos is not None:
self.ax = self.axes[subpos[0],subpos[1]]
## else: keep current self.ax (Default: (0,0))
## Set current axes
bbox = self.ax.get_position()
if left is None:
x0 = bbox.x0
else:
x0 = bbox.x0 + left*(bbox.x1-bbox.x0)
if right is None:
x1 = bbox.x1
else:
x1 = bbox.x0 + right*(bbox.x1-bbox.x0)
if bottom is None:
y0 = bbox.y0
else:
y0 = bbox.y0 + bottom*(bbox.y1-bbox.y0)
if top is None:
y1 = bbox.y1
else:
y1 = bbox.y0 + top*(bbox.y1-bbox.y0)
self.ax.set_position([x0, y0, x1-x0, y1-y0])
self.legend = self.ax.legend(handles=handles, **kwargs)
else:
raise InputError('<plotool.set_legend>',
'Non-recognized transformation!')
if figtight:
self.fig.tight_layout()
return self.legend
def plot(self, x=None, y=None, yerr=None, xerr=None, xisln=False, yisln=False,
fmt='', capsize=None, barsabove=False, # errorbar kw
ecolor=None, ec=None, elinewidth=None, elw=None, # errorbar kw
subpos=None, mod='CA', **kwargs):
'''
Like set_ax(), this is a clump operation.
The idea is to all set in one command,
while each single operation should also be valid.
------ INPUT ------
subpos identify subplot (Default: None - current self.ax)
self.ax.errorbar(**kwargs)
'''
if self.nrows!=1 or self.ncols!=1:
if subpos is not None:
self.ax = self.axes[subpos[0],subpos[1]]
## else: keep current self.ax (Default: (0,0))
## Keyword aliases
ec = merge_aliases(None, ecolor=ecolor, ec=ec)
elw = merge_aliases(None, elinewidth=elinewidth, elw=elw)
if x is None:
x = self.x
else:
self.x = x
if y is None:
y = self.y
else:
self.y = y
## Log inputs
xp, xperr, yp, yperr = None, None, None, None
if (xisln):
if x is not None: xp = np.exp(x)
if xerr is not None: xperr = x * (1. - np.exp(-xerr)) ## suppose xmin <-> lnxmin
else:
if x is not None: xp = x
if xerr is not None: xperr = xerr
if (yisln):
if y is not None: yp = np.exp(y)
if yerr is not None: yperr = y * (1. - np.exp(-yerr))
else:
if y is not None: yp = y
if yerr is not None: yperr = yerr
## CA: Cartesian using matplotlib.pyplot.errorbar
if mod=='CA':
self.markers, self.caps, self.bars = self.ax.errorbar(
x=xp, y=yp, yerr=yperr, xerr=xperr,
fmt=fmt, ecolor=ec, elinewidth=elw,
capsize=capsize, barsabove=barsabove,
**kwargs)
else:
print('*******************')
print('Prochainement...')
print('PL: polar')
print('CL: cylindrical')
print('SP: spherical')
print('*******************')
def eplot(self, x=None, y=None, mask=None,
xmin=None, xmax=None, ymin=None, ymax=None,
xisln=False, yisln=False,
## Uncertainty kw
sigmax=None, sigmay=None, rho=None,
gammax=None, gammay=None,
## Bar/ellipse keywords
edgecolor=None, ecolor=None, ec=None,
elinewidth=None, elw=None, elinestyle=None, els=None,
efillcolor=None, efc=None, efill=False, ehatch=None,
errinlegend=None, alpha=1,
## Other kw
subpos=None, **kwargs):
'''
DISPLAY ERROR BARS/ELLIPSES/SUES
x and y are (N,) shape arrays. sigmax and sigmay are either (N,) or (2,N)
shape arrays. If xisln is set, then it is assumed that all the x related
quantities are moments of lnx (x is mu(lnx), sigmax is stdev(lnx), etc.).
This is independent of the xlog setting.
If rho is an (N,) shape array, an ellipse
is drawn, instead. If gammax or gammay are not None, an "asymmetric
ellipse" or SUE (1 sigma contour of a bivariate split-normal
distribution) is drawn.
------ INPUT ------
subpos identify subplot (Default: None - current self.ax)
self.plot(**kwargs)
'''
if self.nrows!=1 or self.ncols!=1:
if subpos is not None:
self.ax = self.axes[subpos[0],subpos[1]]
## else: keep current self.ax (Default: (0,0))
## kw aliases
ec = merge_aliases(None, edgecolor=edgecolor, ecolor=ecolor, ec=ec)
elw = merge_aliases(None, elinewidth=elinewidth, elw=elw)
els = merge_aliases(None, elinestyle=elinestyle, els=els)
efc = merge_aliases(None, efillcolor=efillcolor, efc=efc)
## Central values
xp, N = arrayize(x)
yp = arrayize(y,N=N)
if (N != np.size(yp)):
raise InputError('<plotool.eplot>',
'x and y must have the same size.')
## Ellipse/SUE
ell = (rho is not None)
skewll = ( ( (gammax is not None) or (gammay is not None) ) \
and ell )
## X error bar settings
uncx = False
if sigmax is not None:
uncx = True
if (np.isscalar(sigmax)): sigmax = np.array([sigmax])
sex = np.shape(sigmax)
if (len(sex) == 2):
if (sex != (2,N) ):
raise InputError('<plotool.eplot>',
'wrong size for errx.')
elif (len(sex) == 1):
if (sex != (N,) ):
raise InputError('<plotool.eplot>',
'wrong size for errx.')
sigmax = np.array([sigmax,sigmax])
## Y error bar settings
uncy = False
if sigmay is not None:
uncy = True
if ( | np.isscalar(sigmay) | numpy.isscalar |
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
from scipy.misc import factorial
from numpy.polynomial.polynomial import polyval as npp_polyval
import math
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] <NAME>, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = | np.delete(z, z1_idx) | numpy.delete |
import numpy as np
import HyperUtils as hu
check_eps = 0.3
check_sig = 2.0
check_alp = np.array([0.2, 0.18, 0.16, 0.14])
check_chi = np.array([0.9, 1.0, 1.1, 1.2])
file = "h1epmk_nest"
name = "1D Linear Elastic-Plastic with Multisurface Kinematic Hardening - Nested"
mode = 0
ndim = 1
const = [100.0, 4, 0.1, 100.0, 0.2, 33.333333, 0.3, 20.0, 0.4, 10.0]
eta = 0.04
tref = 1.0e6
def deriv():
global E, k, H, recip_k, name_const, n_int, n_inp, n_y, n_const
E = float(const[0])
n_int = int(const[1])
n_inp = int(const[1])
n_y = int(const[1])
n_const = 2 + 2*n_inp
k = np.array(const[2:2+2*n_inp:2])
H = np.array(const[3:3+2*n_inp:2])
recip_k = 1.0 / k
name_const = ["E", "N"]
for i in range(n_inp):
name_const.append("k"+str(i+1))
name_const.append("H"+str(i+1))
deriv()
def alpdiff(alp): return np.array([(alp[i]-alp[i+1]) for i in range(n_inp-1)])
def f(eps,alp): return ((E*(eps-alp[0])**2)/2.0 +
np.einsum("n,n,n->",H[:n_inp-1],alpdiff(alp),alpdiff(alp))/2.0 +
H[n_inp-1]*(alp[n_inp-1]**2)/2.0)
def dfde(eps,alp): return E*(eps-alp[0])
def dfda(eps,alp):
temp = np.zeros(n_int)
temp[0] = -E*(eps-alp[0])
temp[:n_inp-1] += np.einsum("n,n->n",H[:n_inp-1],alpdiff(alp))
temp[1:n_inp] -= np.einsum("n,n->n",H[:n_inp-1],alpdiff(alp))
temp[n_inp-1] += H[n_inp-1]*alp[n_inp-1]
return temp
def d2fdede(eps,alp): return E
def d2fdeda(eps,alp):
temp = | np.zeros(n_int) | numpy.zeros |
'''
Implementation of long-time intensity autocorrelation analysis according to
Houel et al. ACS Nano 2015, 9, 1, 886–893
Fitting Eq. 3 therein to long-time-scale (> milliseconds) autocorrelation
which for simple two-level dots gives a measure related to the power law exponent of switching
Autocorrelations are obtained using Wahl algorithm with logarithmic coarsening
'''
def Houelautocorrelationanalysis(MakeLongCorrs, PlotLongCorrs, Show_intermediateplots, CPA_insteadof_binned, Simulated_insteadof_MeasuredData):
import numpy as np
import os
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import acc_functions as acc
import loaddata_functions as load
import correlate_jit as corr
# fit range for the Houel et al analysis
shortcutoff = 1e-3 # seconds
longcutoff = 1 # seconds
coarsening = 5 # to calculate long time g(2) you exponentially coarsen the time scale
# every n points you double the time step. Good values are from 2 to 10.
# Bigger is slower
# =============================================================================
# import preamble
# =============================================================================
if Simulated_insteadof_MeasuredData:
import preamble_simulated as pre
else:
import preamble_measured as pre
# =============================================================================
# set outputfolder
# =============================================================================
if CPA_insteadof_binned:
outputfolder = pre.outputfolder_1 + pre.outputfolder_2_CPA
else:
outputfolder = pre.outputfolder_1 + pre.outputfolder_2_binned
# =============================================================================
# start processing the data
# =============================================================================
Dotlist = [i for i in os.listdir(pre.timetags_filepath) if i.startswith('Dot_') and pre.sig in i]
print('\n\nRunning routine to perform autocorrelation analysis [Houel et al. ]')
for dot_file in Dotlist:
dot_idx = int(dot_file[4:6])
print('##################################################')
print('Starting Dot', dot_idx)
# =============================================================================
# create the folder to save the data
# =============================================================================
savepath = outputfolder + 'Dot_%02d/' %dot_idx
if not os.path.exists(savepath):
os.makedirs(savepath)
# =============================================================================
# Load the timestamps
# =============================================================================
'''
timestamps_chX_bin : all events in channel X E (A, B, R)
timestamps_bin : all events in channels A and B, chronologically
'''
timestamps_chA_bin, timestamps_chB_bin, timestamps_chR_bin = load.LoadTimeStamps(pre.timetags_filepath+'Dot_%02d/' %dot_idx, pre.timetags_filenames, pre.timetags_headers)
timestamps_bin = np.sort( | np.concatenate((timestamps_chA_bin, timestamps_chB_bin)) | numpy.concatenate |
import matplotlib.pyplot as plt
import h5py, argparse
import numpy as np
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib.colors as colors
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as mpatches
from scipy.optimize import minimize
'''
该文件计算激光触发符合区域的事例对应的参量,以及TTS
'''
def fitGaus(tts,limits):
tts_select = tts[(tts<limits[1])&(tts>limits[0])]
result = minimize(likelihood,[1, np.mean(tts_select),np.std(tts_select)],args=(tts_select, tts_select.shape[0]), bounds=[(0,None),limits,(0,(limits[1]-limits[0])/2)])
return result, tts_select.shape[0]
def likelihood(x,*args):
A,mu,sigma = x
tts,N = args
return A*N-tts.shape[0]*np.log(A)+np.sum((tts-mu)**2)/2/sigma**2+tts.shape[0]*np.log(sigma)
psr = argparse.ArgumentParser()
psr.add_argument('-i', dest='ipt', help='input h5 file')
psr.add_argument('-o', dest='opt', help='output png file')
psr.add_argument('-c', dest='channel', nargs='+', default=[0,1],help='channel used in DAQ')
psr.add_argument('-t', dest='trigger', help='trigger h5 file')
args = psr.parse_args()
#plt.style.use('fivethirtyeight')
info = []
results = np.zeros(len(args.channel), dtype=[('peakC','<f4'), ('vallyC','<f4'),('PV','<f4'),('chargeMu','<f4'),('chargeSigma','<f4')])
with h5py.File(args.ipt, 'r') as ipt:
for j in range(len(args.channel)):
info.append(ipt['ch{}'.format(args.channel[j])][:])
with h5py.File(args.trigger, 'r') as ipt:
rinterval = ipt['rinterval'][:]
rangemin =-100
rangemax = 500
bins = rangemax-rangemin
# set the figure appearance
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
jet = plt.cm.jet
newcolors = jet(np.linspace(0, 1, 32768))
white = np.array([1, 1, 1, 0.5])
newcolors[0, :] = white
cmap = ListedColormap(newcolors)
print('begin plot')
pdf = PdfPages(args.opt+'.pdf')
# 下面循环绘制每个channel的图像
nearMax = 10
for j in range(len(args.channel)):
# charge分布
fig, ax = plt.subplots()
ax.set_title('charge distribution')
rangemin = int(np.min(info[j]['minPeakCharge'])-1)
rangemax = int(np.max(info[j]['minPeakCharge'])+1)
bins = rangemax-rangemin
h = ax.hist(info[j]['minPeakCharge'], histtype='step', bins=bins, range=[rangemin, rangemax], label='charge')
ax.set_xlabel('charge/mVns')
ax.set_ylabel('entries')
ax.legend()
ax.set_yscale('log')
ax.xaxis.set_minor_locator(MultipleLocator(100))
# plt.savefig('{}/{}charge.png'.format(args.opt,args.channel[j]))
pdf.savefig(fig)
ax.set_xlim([-5, 1000])
pdf.savefig(fig)
ax.set_yscale('linear')
if h[0].shape[0]>200:
ax.set_ylim([0, 2*np.max(h[0][70:150])])
pi = h[1][70:150][np.argmax(h[0][70:150])]
vi = h[1][15:70][np.argmin(h[0][15:70])]
pv = np.max(h[0][70:150])
vv = np.min(h[0][10:80])
plt.scatter([pi,vi],[pv,vv])
selectinfo = info[j]['minPeakCharge'][(info[j]['minPeak']>3)&(info[j]['minPeakCharge']<800)]
results[j] = (pi,vi, pv/vv,np.mean(selectinfo), np.std(selectinfo))
handles, labels = ax.get_legend_handles_labels()
handles.append(mpatches.Patch(color='none', label='Gain:{:.2f}'.format(pi/50/1.6)))
handles.append(mpatches.Patch(color='none', label='P/V:{:.2f}'.format(pv/vv)))
handles.append(mpatches.Patch(color='none', label='$\mu_{p>3mV}$:'+'{:.2f}'.format(results[j]['chargeMu'])))
handles.append(mpatches.Patch(color='none', label='$\sigma_{p>3mV}$'+':{:.2f}'.format(results[j]['chargeSigma'])))
ax.legend(handles=handles)
# plt.savefig('{}/{}chargeLinear.png'.format(args.opt,args.channel[j]))
pdf.savefig(fig)
plt.close()
# peak分布
fig, ax = plt.subplots()
ax.set_title('peak height distribution')
h = ax.hist(info[j]['minPeak'],histtype='step', bins=1000, range=[0,1000], label='baseline - peak')
print('peak height max:{};max index {}; part of peak {}'.format(np.max(h[0]), np.argmax(h[0]), h[0][:(np.argmax(h[0])+5)]))
ax.set_xlabel('peak height/mV')
ax.set_ylabel('entries')
ax.legend()
ax.xaxis.set_minor_locator(MultipleLocator(100))
# plt.savefig('{}/{}minpeakLinear.png'.format(args.opt,args.channel[j]))
# pdf.savefig(fig)
ax.set_yscale('log')
# plt.savefig('{}/{}minpeak.png'.format(args.opt,args.channel[j]))
pdf.savefig(fig)
ax.xaxis.set_minor_locator(MultipleLocator(10))
ax.set_yscale('linear')
ax.set_xlim([0,100])
ax.set_ylim([0,2*np.max(h[0][5:30])])
pdf.savefig(fig)
# min peak position分布
fig, ax = plt.subplots()
ax.set_title('peak position distribution')
h = ax.hist(info[j]['minPeakPos'],histtype='step', bins=100, label='$t_{peak}-t_{trigger}$')
print('h shape:{};max index {}'.format(h[0].shape,np.argmax(h[0])))
ax.set_xlabel('$t_{peak}-t_{trigger}$/ns')
ax.set_ylabel('entries')
ax.legend()
# pdf.savefig(fig)
ax.set_yscale('log')
pdf.savefig(fig)
fig, ax = plt.subplots()
ax.set_title('peak($V_p>3$mV) position distribution')
h = ax.hist(info[j]['minPeakPos'][(info[j]['minPeak']>3)], histtype='step', bins=100, label='$t_{peak}-t_{trigger}$')
print('h shape:{};max index {}'.format(h[0].shape,np.argmax(h[0])))
ax.set_xlabel('$t_{peak}-t_{trigger}$/ns')
ax.set_ylabel('entries')
ax.legend()
pdf.savefig(fig)
ax.set_yscale('log')
# risetime and downtime,里面对于范围做了限制,需要动态考虑
fig, ax = plt.subplots()
ax.set_title('$T_R$,$T_d$,FWHM ($V_p>3$mV) distribution')
ax.hist(info[j]['riseTime'][(info[j]['minPeak']>3)],histtype='step',bins=300, range=[0,30], label='risingtime:{:.2f}ns'.format(np.mean(info[j]['riseTime'][(info[j]['minPeak']>5)])))
ax.hist(info[j]['downTime'][(info[j]['minPeak']>3)],histtype='step',bins=300, range=[0,30], label='downtime:{:.2f}ns'.format(np.mean(info[j]['downTime'][(info[j]['minPeak']>5)])))
ax.hist(info[j]['FWHM'][(info[j]['minPeak']>3)],histtype='step',bins=300, range=[0,30], label='FWHM:{:.2f}ns'.format(np.mean(info[j]['FWHM'][(info[j]['minPeak']>5)])))
ax.set_xlabel('Time/ns')
ax.set_ylabel('entries')
ax.legend()
#ax.set_xlim([1,40])
pdf.savefig(fig)
ax.set_yscale('log')
# pdf.savefig(fig)
plt.close()
fig,ax = plt.subplots()
limits_mu, limits_sigma = np.mean(info[j]['begin10'][(info[j]['minPeak']>3)&(info[j]['isTrigger'])]),np.std(info[j]['begin10'][(info[j]['minPeak']>3)&(info[j]['isTrigger'])])
limits_sigma = min(limits_sigma, 15)
limits = [limits_mu-limits_sigma, limits_mu+limits_sigma]
result, N = fitGaus(info[j]['begin10'][(info[j]['minPeak']>3)&(info[j]['isTrigger'])], limits)
print(result)
ax.hist(info[j]['begin10'][(info[j]['minPeak']>3)&(info[j]['isTrigger'])],bins=int(100*limits_sigma),range=[limits_mu-3*limits_sigma, limits_mu+3*limits_sigma], histtype='step', label='$t_{0.1}-t_{trigger}$')
ax.plot( | np.arange(limits_mu-3*limits_sigma, limits_mu+3*limits_sigma, 0.1) | numpy.arange |
import sys
import tensorflow as tf
import numpy as np
import librosa
from python_speech_features import fbank,delta
import scipy.io.wavfile as wave
from tensorflow.python.client import device_lib
def _parse_function(example_proto):
''' Function to parse tfrecords file '''
feature = {'data': tf.VarLenFeature(tf.float32),
'label':tf.FixedLenFeature([],tf.int64)}
features = tf.parse_single_example(example_proto, features=feature)
image = tf.sparse_tensor_to_dense(features['data'], default_value=0)
label = tf.cast(features['label'], tf.int16)
return image, label
def get_filterbanks(filename_placeholder, duration=8):
''' Returns filterbanks, delta1 and delta2 of input file '''
def padding(audio,sr,duration=8):
''' Returns audio with padding '''
nmax = sr*duration
padlen = nmax - len(audio)
audio = np.concatenate((audio, [0.0]*padlen))
return audio
def normalize_frames(m,epsilon=1e-12):
''' Normalizes features '''
return np.array([(v - np.mean(v)) / max(np.std(v),epsilon) for v in m]).flatten()
assert filename_placeholder.endswith('wav')
window_fn= lambda x: | np.hanning(x) | numpy.hanning |
import numpy as np
from datayoink.coordconverter import get_axis_info, get_step, get_x_scale, pixel_to_coords, closest,\
unify_x, get_pixels_2d, create_pixel_dict, create_coordinate_dict, get_start_end
def test_get_axis_info():
"""
Tests the get_axis_info function
"""
# the output is a dictionary with the fields: pixel_origin, x_scale, y_scale, step, and units
axis_info_dict = get_axis_info([1], [5], [20], [250], [10], [25], [30, 280], 30, ['volts', 'amps'])
assert isinstance(axis_info_dict, dict), 'axis_info_dict is not a dictionary'
for field in ['step', 'pixel_origin', 'x_scale', 'y_scale', 'units', 'y_pixel_range', 'x_pixel_range']:
assert field in axis_info_dict.keys(), 'axis_info_dict is missing fields'
return
def test_get_step():
"""
Tests the get_step function
"""
step1 = get_step(19, 10, 200)
step2 = get_step(18, 10, 200)
step3 = get_step(16, 10, 200)
# the step size * the number of points should be close to the length of the axis
# step size is an integer
for step in [step1, step2, step3]:
assert isinstance(step, int), 'the step size is not an integer'
# the length of the axis/ step size should be close to but less than the max points
assert np.isclose(190 / step1, 19), 'length of axis/step size not ~< max points'
assert ((190 / step2) < 18) and ((190 / step2) > 17), 'length of axis/step size not ~< max points'
assert ((190 / step3) < 16) and ((190 / step3) > 15), 'length of axis/step size not ~< max points'
return
def test_get_x_scale():
"""
Tests the get_x_scale function
"""
x_scale = get_x_scale(1, 5, 20, 250)
# x_scale * coordinate range should equal pixel range
assert np.isclose(x_scale * (5 - 1), (250 - 20)), 'the x scaling is incorrect'
assert np.isclose(x_scale, 57.5), 'the x scaling is incorrect'
x_scale = get_x_scale(-1, -5, 20, 250)
assert np.isclose(x_scale * (-5 + 1), (250 - 20)), 'the x scaling is incorrect'
assert np.isclose(x_scale, -57.5), 'the x scaling is incorrect'
return
def test_pixel_to_coords():
"""
Tests the pixel_to_coords function (and by extension the x_pixel_to_coords function)
"""
axis_info_dict1 = {'pixel_origin': (20, 100), 'y_scale': 5.3, 'x_scale': 20.5}
axis_info_dict2 = {'pixel_origin': (20, 100), 'y_scale': -0.2, 'x_scale': 0.005}
# the output coordinates should be within the coordinate ranges for each axis
# given a scale and a location, test a few cases (+-0)
coords1 = pixel_to_coords((20, 100), axis_info_dict1) # (0,0)
coords2 = pixel_to_coords((20, 100), axis_info_dict2) # (0,0)
coords3 = pixel_to_coords((55, 33), axis_info_dict1) # (1.707317, 12.641509)
coords4 = pixel_to_coords((55, 33), axis_info_dict2) # (7000, -335)
coords5 = pixel_to_coords((55, 105), axis_info_dict2) # (1.707317, 25)
assert np.isclose(coords1[0], 0), 'pixel to coordinate conversion is incorrect'
assert np.isclose(coords1[1], 0), 'pixel to coordinate conversion is incorrect'
assert np.isclose(coords2[0], 0), 'pixel to coordinate conversion is incorrect'
assert np.isclose(coords2[1], 0), 'pixel to coordinate conversion is incorrect'
assert np.isclose(coords3[0], 1.707317), 'pixel to coordinate conversion is incorrect'
assert np.isclose(coords3[1], 12.64150943), 'pixel to coordinate conversion is incorrect'
assert np.isclose(coords4[0], 7000), 'pixel to coordinate conversion is incorrect'
assert np.isclose(coords4[1], -335), 'pixel to coordinate conversion is incorrect'
assert np.isclose(coords5[1], 25), 'pixel to coordinate conversion is incorrect'
return
def test_closest():
"""
Tests the closest function
"""
lst = [0, 2, 1, 3, 4, 5, 6]
# val is equidistant to two values in list, first one in list is chosen
assert closest(lst, 1.5) == 2, 'closest value is incorrect'
assert closest(lst, 3.5) == 3, 'closest value is incorrect'
# val is equal to one value in list
assert closest(lst, 2) == 2, 'closest value is incorrect'
# val is closer to one in particular
assert closest(lst, 1.8) == 2, 'closest value is incorrect'
return
def test_unify_x():
"""
Tests the unify_x function
"""
axis_info_dict = {'step': 3}
pixel_lst = [(20, 100), (20, 90), (21, 91), (22, 85), (22, 83), (23, 80), (24, 81), (24, 83), (25, 80), (29, 50),
(29, 45), (30, 30), (30, 10)]
pixels_y = [i[1] for i in pixel_lst]
pixels_x = [i[0] for i in pixel_lst]
unified_pixel_lst = unify_x(pixel_lst, axis_info_dict)
unified_x = [i[0] for i in unified_pixel_lst]
unified_y = [i[1] for i in unified_pixel_lst]
x_spaces = | np.diff(unified_x) | numpy.diff |
"""
This module contains the implementation of block norms, i.e.
l1/l*, linf/l* norms. These are used in multiresponse LASSOs.
"""
from __future__ import print_function, division, absolute_import
import warnings
from copy import copy
import numpy as np
from . import seminorms
from ..identity_quadratic import identity_quadratic
from ..problems.composite import smooth_conjugate
from ..objdoctemplates import objective_doc_templater
from ..doctemplates import (doc_template_user, doc_template_provider)
from ..atoms import _work_out_conjugate
from .block_norms import l1_l2
from .sparse_group_lasso import _gauge_function_dual_strong, _inside_set_strong
# for the docstring, we need l1norm
l1norm = seminorms.l1norm
@objective_doc_templater()
class sparse_group_block(l1_l2):
objective_template = r"""w_1\|%(var)s\|_{1,1} + w_1\|%(var)s\|_{1,2}"""
objective_vars = l1_l2.objective_vars.copy()
objective_vars['var'] = 'B'
objective_vars['normklass'] = 'sparse_group_block'
objective_vars['dualnormklass'] = 'sparse_group_block_dual'
objective_vars['initargs'] = '(5, 4), 1, 2'
objective_vars['shape'] = r'n \times p'
def __init__(self,
shape,
l1_weight,
l2_weight,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
l1_l2.__init__(self,
shape,
lagrange=lagrange,
bound=bound,
offset=offset,
quadratic=quadratic,
initial=initial)
self.l1_weight = l1_weight
self.l2_weight = l2_weight
@doc_template_user
def lagrange_prox(self, arg, lipschitz=1, lagrange=None):
arg = arg.reshape(self.shape)
lagrange = seminorms.seminorm.lagrange_prox(self, arg, lipschitz, lagrange)
return _lagrange_prox(arg,
lagrange * self.l1_weight / lipschitz,
lagrange * self.l2_weight / lipschitz)
@doc_template_user
def bound_prox(self, arg, bound=None):
raise NotImplementedError('sparse_group_block bound form not implemented')
@doc_template_user
def constraint(self, x):
x = x.reshape(self.shape)
l1_norms = np.fabs(x).sum()
l2_norms = np.sqrt(np.sum(x**2), 1).sum()
norm_sum = self.l1_weight * l1_norms + self.l2_weight * l2_norms
if norm_sum <= self.bound * (1 + self.tol):
return 0
return np.inf
@doc_template_user
def seminorm(self, x, lagrange=None, check_feasibility=False):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.seminorm(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
l1_norms = np.fabs(x).sum()
l2_norms = np.sqrt(np.sum(x**2, 1)).sum()
return lagrange * (self.l1_weight * l1_norms +
self.l2_weight * l2_norms)
@doc_template_user
def get_conjugate(self):
if self.quadratic.coef == 0:
offset, outq = _work_out_conjugate(self.offset,
self.quadratic)
cls = sparse_group_block_pairs[self.__class__]
conj_atom = self.atom.conjugate
atom = cls(self.shape,
self.l1_weight,
self.l2_weight,
offset=offset,
lagrange=conj_atom.lagrange,
bound=conj_atom.bound,
quadratic=outq)
else:
atom = smooth_conjugate(self)
self._conjugate = atom
self._conjugate._conjugate = self
return self._conjugate
conjugate = property(get_conjugate)
def __copy__(self):
return self.__class__(self.shape,
self.l1_weight,
self.l2_weight,
quadratic=self.quadratic,
initial=self.coefs,
bound=copy(self.bound),
lagrange=copy(self.lagrange),
offset=copy(self.offset))
def terms(self, arg):
"""
Return the args that are summed
in computing the seminorm.
>>> import regreg.api as rr
>>> groups = [1,1,2,2,2]
>>> penalty = rr.group_lasso(groups, lagrange=1.)
>>> arg = [2,4,5,3,4]
>>> list(penalty.terms(arg)) # doctest: +ELLIPSIS
[6.3245..., 12.2474...]
>>> penalty.seminorm(arg) # doctest: +ELLIPSIS
18.5720...
>>> np.sqrt((2**2 + 4**2)*2), np.sqrt((5**2 + 3**2 + 4**2) * 3.) # doctest: +ELLIPSIS
(6.3245..., 12.2474...)
>>> np.sqrt((2**2 + 4**2)*2) + np.sqrt((5**2 + 3**2 + 4**2) * 3.) # doctest: +ELLIPSIS
18.5720...
"""
terms = (np.fabs(arg).sum(1) * self.l1_weight +
np.sqrt((arg**2).sum(1)) * self.l1_weight)
return terms
class sparse_group_block_dual(sparse_group_block):
objective_template = r"""\|%(var)s\|_{w_1,w_2,\text{block}}"""
objective_vars = l1_l2.objective_vars.copy()
objective_vars['var'] = 'B'
objective_vars['normklass'] = 'sparse_group_block_dual'
objective_vars['dualnormklass'] = 'sparse_group_block'
objective_vars['initargs'] = '(5, 4), 1, 2'
objective_vars['shape'] = r'n \times p'
def __init__(self,
shape,
l1_weight,
l2_weight,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
l1_l2.__init__(self,
shape,
lagrange=lagrange,
bound=bound,
offset=offset,
quadratic=quadratic,
initial=initial)
self.l1_weight = l1_weight
self.l2_weight = l2_weight
@doc_template_user
def lagrange_prox(self, arg, lipschitz=1, lagrange=None):
raise NotImplementedError('sparse_group_block Lagrange form not implemented')
@doc_template_user
def bound_prox(self, arg, bound=None):
arg = arg.reshape(self.shape)
bound = seminorms.seminorm.bound_prox(self, arg, bound)
_prox = _lagrange_prox(arg,
bound * self.l1_weight,
bound * self.l2_weight)
return arg - _prox
@doc_template_user
def constraint(self, x):
x = x.reshape(self.shape)
dual_norm = _gauge_function_dual(x,
self.l1_weight,
self.l2_weight)
if dual_norm <= self.bound * (1 + self.tol):
return 0
return np.inf
@doc_template_user
def seminorm(self, x, lagrange=None, check_feasibility=False):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.seminorm(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
return lagrange * _gauge_function_dual(x,
self.l1_weight,
self.l2_weight)
@doc_template_user
def get_conjugate(self):
if self.quadratic.coef == 0:
offset, outq = _work_out_conjugate(self.offset,
self.quadratic)
cls = sparse_group_block_pairs[self.__class__]
conj_atom = self.atom.conjugate
atom = cls(self.shape,
self.l1_weight,
self.l2_weight,
offset=offset,
lagrange=conj_atom.lagrange,
bound=conj_atom.bound,
quadratic=outq)
else:
atom = smooth_conjugate(self)
self._conjugate = atom
self._conjugate._conjugate = self
return self._conjugate
conjugate = property(get_conjugate)
def terms(self, arg):
"""
Return the args that are maximized
in computing the seminorm.
>>> import regreg.api as rr
>>> groups = [1,1,2,2,2]
>>> penalty = rr.group_lasso_dual(groups, lagrange=1.)
>>> arg = [2,4,5,3,4]
>>> list(penalty.terms(arg)) # doctest: +ELLIPSIS
[3.1622..., 4.0824...]
>>> np.sqrt((2**2 + 4**2)/2), np.sqrt((5**2 + 3**2 + 4**2) / 3.) # doctest: +ELLIPSIS
(3.1622..., 4.0824...)
>>> penalty.seminorm(arg) # doctest: +ELLIPSIS
4.0824...
"""
return np.array([_gauge_function_dual_strong(arg[i],
self.l1_weight,
self.l2_weight)[0] for i in range(arg.shape[0])])
# fast Lagrange prox
def _lagrange_prox(arg, l1_weight, l2_weight):
soft_thresh = np.sign(arg) * np.maximum(np.fabs(arg) - l1_weight, 0)
norms = np.sqrt(np.sum(soft_thresh**2, 1))
norm_factors = | np.maximum(norms - l2_weight, 0) | numpy.maximum |
# <NAME>
import argparse, sys, os
import numpy as np
import pylab as plt
from glob import glob
from spectral.io import envi
from scipy.stats import norm
from scipy.linalg import solve, inv
from astropy import modeling
from sklearn.linear_model import RANSACRegressor
from scipy.optimize import minimize
from scipy.interpolate import BSpline,interp1d
from skimage.filters import threshold_otsu
from scipy.ndimage import gaussian_filter
from makelinearity import linearize
from fpa import FPA
import scipy.linalg as linalg
import json
def find_header(infile):
if os.path.exists(infile+'.hdr'):
return infile+'.hdr'
elif os.path.exists('.'.join(infile.split('.')[:-1])+'.hdr'):
return '.'.join(infile.split('.')[:-1])+'.hdr'
else:
raise FileNotFoundError('Did not find header file')
def main():
description = "Calculate Linearity Correction"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('input',nargs='+')
parser.add_argument('basis')
parser.add_argument('--config')
parser.add_argument('--linearity_nbasis',default=2)
parser.add_argument('--width',default=37)
parser.add_argument('--margin',default=9)
parser.add_argument('--draft',default=None)
parser.add_argument('output')
args = parser.parse_args()
fpa = FPA(args.config)
margin = int(args.margin)
width = int(args.width)
xs,ys = [],[]
nfiles = len(args.input)
illums =[]
out = np.zeros((fpa.native_rows,fpa.native_columns,args.linearity_nbasis))
if args.draft is not None:
out = envi.open(args.draft+'.hdr').load()
basis = np.squeeze(envi.open(args.basis+'.hdr').load())
evec = np.squeeze(basis[1:,:].T)
if evec.shape[1] != args.linearity_nbasis:
raise IndexError('Linearity basis does not match file size')
evec[ | np.isnan(evec) | numpy.isnan |
# Python 3.5
# Script written by <NAME> (<EMAIL>), <NAME> (<EMAIL>), and <NAME> (<EMAIL>)
# VERSION 0.1 - JUNE 2020
#--------TURN OFF MAGMASAT WARNING--------#
import warnings
warnings.filterwarnings("ignore", message="rubicon.objc.ctypes_patch has only been tested ")
warnings.filterwarnings("ignore", message="The handle")
#-----------------IMPORTS-----------------#
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from cycler import cycler
from abc import ABC, abstractmethod
from scipy.optimize import root_scalar
from scipy.optimize import root
from scipy.optimize import minimize
import sys
import sympy
from copy import copy
# import anvil_server
#--------------MELTS preamble---------------#
from thermoengine import equilibrate
# instantiate thermoengine equilibrate MELTS instance
melts = equilibrate.MELTSmodel('1.2.0')
# Suppress phases not required in the melts simulation
phases = melts.get_phase_names()
for phase in phases:
melts.set_phase_inclusion_status({phase: False})
melts.set_phase_inclusion_status({'Fluid': True, 'Liquid': True})
#----------DEFINE SOME CONSTANTS-------------#
oxides = ['SiO2', 'TiO2', 'Al2O3', 'Fe2O3', 'Cr2O3', 'FeO', 'MnO', 'MgO', 'NiO', 'CoO', 'CaO', 'Na2O', 'K2O', 'P2O5',
'H2O', 'CO2']
anhydrous_oxides = ['SiO2', 'TiO2', 'Al2O3', 'Fe2O3', 'Cr2O3', 'FeO', 'MnO', 'MgO', 'NiO', 'CoO', 'CaO', 'Na2O', 'K2O', 'P2O5']
volatiles = ['H2O', 'CO2']
oxideMass = {'SiO2': 28.085+32, 'MgO': 24.305+16, 'FeO': 55.845+16, 'CaO': 40.078+16, 'Al2O3': 2*26.982+16*3, 'Na2O': 22.99*2+16,
'K2O': 39.098*2+16, 'MnO': 54.938+16, 'TiO2': 47.867+32, 'P2O5': 2*30.974+5*16, 'Cr2O3': 51.996*2+3*16,
'NiO': 58.693+16, 'CoO': 28.01+16, 'Fe2O3': 55.845*2+16*3,
'H2O': 18.02, 'CO2': 44.01}
CationNum = {'SiO2': 1, 'MgO': 1, 'FeO': 1, 'CaO': 1, 'Al2O3': 2, 'Na2O': 2,
'K2O': 2, 'MnO': 1, 'TiO2': 1, 'P2O5': 2, 'Cr2O3': 2,
'NiO': 1, 'CoO': 1, 'Fe2O3': 2, 'H2O': 2, 'CO2': 1}
OxygenNum = {'SiO2': 2, 'MgO': 1, 'FeO': 1, 'CaO': 1, 'Al2O3': 3, 'Na2O': 1,
'K2O': 1, 'MnO': 1, 'TiO2': 2, 'P2O5': 5, 'Cr2O3': 3,
'NiO': 1, 'CoO': 1, 'Fe2O3': 3, 'H2O': 1, 'CO2': 2}
CationCharge = {'SiO2': 4, 'MgO': 2, 'FeO': 2, 'CaO': 2, 'Al2O3': 3, 'Na2O': 1,
'K2O': 1, 'MnO': 2, 'TiO2': 4, 'P2O5': 5, 'Cr2O3': 3,
'NiO': 2, 'CoO': 2, 'Fe2O3': 3, 'H2O': 1, 'CO2': 4}
CationMass = {'SiO2': 28.085, 'MgO': 24.305, 'FeO': 55.845, 'CaO': 40.078, 'Al2O3': 26.982, 'Na2O': 22.990,
'K2O': 39.098, 'MnO': 54.938, 'TiO2': 47.867, 'P2O5': 30.974, 'Cr2O3': 51.996,
'NiO': 58.693, 'CoO': 28.01, 'Fe2O3': 55.845, 'H2O': 2, 'CO2': 12.01}
oxides_to_cations = {'SiO2': 'Si', 'MgO': 'Mg', 'FeO': 'Fe', 'CaO': 'Ca', 'Al2O3': 'Al', 'Na2O': 'Na',
'K2O': 'K', 'MnO': 'Mn', 'TiO2': 'Ti', 'P2O5': 'P', 'Cr2O3': 'Cr',
'NiO': 'Ni', 'CoO': 'Co', 'Fe2O3': 'Fe3', 'H2O': 'H', 'CO2': 'C'}
cations_to_oxides = {'Si': 'SiO2', 'Mg': 'MgO', 'Fe': 'FeO', 'Ca': 'CaO', 'Al': 'Al2O3', 'Na': 'Na2O',
'K': 'K2O', 'Mn': 'MnO', 'Ti': 'TiO2', 'P': 'P2O5', 'Cr': 'Cr2O3',
'Ni': 'NiO', 'Co': 'CoO', 'Fe3': 'Fe2O3', 'H': 'H2O', 'C': 'CO2'}
#----------DEFINE SOME EXCEPTIONS--------------#
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class SaturationError(Error):
"""Exception raised for errors thrown when a sample does not reach saturation.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
#----------DEFINE CUSTOM PLOTTING FORMATTING------------#
style = "seaborn-colorblind"
plt.style.use(style)
plt.rcParams["mathtext.default"] = "regular"
plt.rcParams["mathtext.fontset"] = "dejavusans"
mpl.rcParams['patch.linewidth'] = 1
mpl.rcParams['axes.linewidth'] = 1
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 14
#Define color cycler based on plot style set here
the_rc = plt.style.library[style] #get style formatting set by plt.style.use()
color_list = the_rc['axes.prop_cycle'].by_key()['color'] #list of colors by hex code
color_cyler = the_rc['axes.prop_cycle'] #get the cycler
def printTable(myDict):
""" Pretty print a dictionary (as pandas DataFrame)
Parameters
----------
myDict: dict
A dictionary
Returns
-------
pandas DataFrame
The input dictionary converted to a pandas DataFrame
"""
try:
oxidesum = sum(myDict[oxide] for oxide in oxides)
myDict.update({"Sum oxides": oxidesum})
except:
pass
table = pd.DataFrame([v for v in myDict.values()], columns = ['value'],
index = [k for k in myDict.keys()])
return table
#----------DEFINE SOME UNIVERSAL INFORMATIVE METHODS--------------#
def get_model_names():
"""
Returns all available model names as a list of strings.
"""
model_names = []
for key, value in default_models.items():
model_names.append(key)
return model_names
#----------DEFINE SOME BASIC DATA TRANSFORMATION METHODS-----------#
def mol_to_wtpercent(sample):
"""
Takes in a pandas DataFrame containing multi-sample input or a dictionary containing single-sample input
and returns a pandas DataFrame object with oxide values converted from mole percent to wt percent.
Parameters
----------
oxides: pandas DataFrame object or dictionary
Variable name referring to the pandas DataFrame object that contains user-imported data or a dictionary
for single-sample input.
"""
data = sample
if isinstance(sample, pd.DataFrame):
for key, value in oxideMass.items():
data.loc[:, key] *= value
data["MPOSum"] = sum([data[oxide] for oxide in oxides])
for oxide in oxides:
data.loc[:, oxide] /= data['MPOSum']
data.loc[:, oxide] *= 100
del data['MPOSum']
elif isinstance(sample, dict):
for oxide in oxides:
if oxide in data.keys():
pass
else:
data[oxide] = 0.0
data = {oxide: data[oxide] for oxide in oxides}
for key, value in oxideMass.items():
data.update({key: (data[key] * value)})
MPOSum = sum(data.values())
for key, value in data.items():
data.update({key: 100 * value / MPOSum})
return data
def wtpercentOxides_to_molCations(oxides):
"""Takes in a pandas Series containing major element oxides in wt%, and converts it
to molar proportions of cations (normalised to 1).
Parameters
----------
oxides dict or pandas Series
Major element oxides in wt%.
Returns
-------
dict or pandas Series
Molar proportions of cations, normalised to 1.
"""
molCations = {}
_oxides = oxides.copy()
if type(oxides) == dict:
oxideslist = list(_oxides.keys())
elif type(oxides) == pd.core.series.Series:
oxideslist = list(_oxides.index)
else:
raise InputError("The composition input must be a pandas Series or dictionary.")
for ox in oxideslist:
cation = oxides_to_cations[ox]
molCations[cation] = CationNum[ox]*_oxides[ox]/oxideMass[ox]
if type(oxides) == pd.core.series.Series:
molCations = pd.Series(molCations)
molCations = molCations/molCations.sum()
else:
total = np.sum(list(molCations.values()))
for ox in oxideslist:
cation = oxides_to_cations[ox]
molCations[cation] = molCations[cation]/total
return molCations
def wtpercentOxides_to_molOxides(oxides):
""" Takes in a pandas Series or dict containing major element oxides in wt%, and converts it
to molar proportions (normalised to 1).
Parameters
----------
oxides dict or pandas Series
Major element oxides in wt%
Returns
-------
dict or pandas Series
Molar proportions of major element oxides, normalised to 1.
"""
molOxides = {}
_oxides = oxides.copy()
if type(oxides) == dict or type(oxides) == pd.core.series.Series:
if type(oxides) == dict:
oxideslist = list(oxides.keys())
elif type(oxides) == pd.core.series.Series:
oxideslist = list(oxides.index)
for ox in oxideslist:
molOxides[ox] = _oxides[ox]/oxideMass[ox]
if type(oxides) == pd.core.series.Series:
molOxides = pd.Series(molOxides)
molOxides = molOxides/molOxides.sum()
else:
total = np.sum(list(molOxides.values()))
for ox in oxideslist:
molOxides[ox] = molOxides[ox]/total
return molOxides
elif isinstance(sample, pd.DataFrame):
data = sample
for key, value in oxideMass.items():
data.loc[:, key] /= value
data["MPOSum"] = sum([data[oxide] for oxide in oxides])
for oxide in oxides:
data.loc[:, oxide] /= data['MPOSum']
del data['MPOSum']
return data
else:
raise InputError("The composition input must be a pandas Series or dictionary.")
def wtpercentOxides_to_molSingleO(oxides,exclude_volatiles=False):
""" Takes in a pandas Series containing major element oxides in wt%, and constructs
the chemical formula, on a single oxygen basis.
Parameters
----------
oxides dict or pandas Series
Major element oxides in wt%
Returns
-------
dict or pandas Series
The chemical formula of the composition, on a single oxygen basis. Each element is
a separate entry in the Series.
"""
molCations = {}
_oxides = oxides.copy()
if type(oxides) == dict:
oxideslist = list(oxides.keys())
elif type(oxides) == pd.core.series.Series:
oxideslist = list(oxides.index)
else:
raise InputError("The composition input must be a pandas Series or dictionary.")
total_O = 0.0
for ox in oxideslist:
if exclude_volatiles == False or (ox != 'H2O' and ox != 'CO2'):
cation = oxides_to_cations[ox]
molCations[cation] = CationNum[ox]*oxides[ox]/oxideMass[ox]
total_O += OxygenNum[ox]*oxides[ox]/oxideMass[ox]
if type(oxides) == pd.core.series.Series:
molCations = pd.Series(molCations)
molCations = molCations/total_O
else:
# total = np.sum(list(molCations.values()))
for ox in oxideslist:
if exclude_volatiles == False or (ox != 'H2O' and ox != 'CO2'):
cation = oxides_to_cations[ox]
molCations[cation] = molCations[cation]/total_O
return molCations
def wtpercentOxides_to_formulaWeight(sample,exclude_volatiles=False):
""" Converts major element oxides in wt% to the formula weight (on a 1 oxygen basis).
Parameters
----------
sample dict or pandas Series
Major element oxides in wt%.
exclude_volatiles bool
If True H2O and CO2 will be excluded from the formula weight calculation.
Returns
-------
float
The formula weight of the composition, on a one oxygen basis.
"""
if type(sample) == dict:
_sample = pd.Series(sample.copy())
elif type(sample) != pd.core.series.Series:
raise InputError("The composition input must be a pandas Series or dictionary.")
else:
_sample = sample.copy()
cations = wtpercentOxides_to_molSingleO(_sample,exclude_volatiles=exclude_volatiles)
if type(cations) != dict:
cations = dict(cations)
# if exclude_volatiles == True:
# if 'C' in cations:
# cations.pop('C')
# if 'H' in cations:
# cations.pop('H')
# newsum = 0
# for cation in cations:
# newsum += OxygenNum[cations_to_oxides[cation]]
# for cation in cations:
# cations[cation] = cations[cation]/newsum
FW = 15.999
for cation in list(cations.keys()):
FW += cations[cation]*CationMass[cations_to_oxides[cation]]
return FW
#----------DATA TRANSFORMATION FOR PANDAS DATAFRAMES---------#
def fluid_molfrac_to_wt(data, H2O_colname='XH2O_fl_VESIcal', CO2_colname='XCO2_fl_VESIcal'):
"""
Takes in a pandas dataframe object and converts only the fluid composition from mole fraction to wt%, leaving the melt composition
in tact. The user must specify the names of the XH2O_fl and XCO2_fl columns.
Parameters
----------
data: pandas DataFrame
Sample composition(s) containing columns for H2O and CO2 concentrations in the fluid.
H2O_colname: str
OPTIONAL. The default value is 'XH2O_fl', which is what is returned by ExcelFile() core calculations.
String containing the name of the column corresponding to the H2O concentration in the fluid, in mol fraction.
CO2_colname: str
OPTIONAL. The default value is 'XCO2_fl', which is what is returned by ExcelFile() core calculations.
String containing the name of the column corresponding to the CO2 concentration in the fluid, in mol fraction.
Returns
-------
pandas DataFrame
Original data passed plus newly calculated values are returned.
"""
convData = data.copy()
MPO_H2O_list = []
MPO_CO2_list = []
for index, row in convData.iterrows():
MPO_H2O_list.append(row[H2O_colname] * oxideMass["H2O"])
MPO_CO2_list.append(row[CO2_colname] * oxideMass["CO2"])
convData["MPO_H2O"] = MPO_H2O_list
convData["MPO_CO2"] = MPO_CO2_list
convData["H2O_fl_wt"] = 100 * convData["MPO_H2O"] / (convData["MPO_H2O"] + convData["MPO_CO2"])
convData["CO2_fl_wt"] = 100 * convData["MPO_CO2"] / (convData["MPO_H2O"] + convData["MPO_CO2"])
del convData["MPO_H2O"]
del convData["MPO_CO2"]
return convData
def fluid_wt_to_molfrac(data, H2O_colname='H2O_fl_wt', CO2_colname='CO2_fl_wt'):
"""
Takes in a pandas dataframe object and converts only the fluid composition from wt% to mole fraction, leaving the melt composition
in tact. The user must specify the names of the H2O_fl_wt and CO2_fl_wt columns.
Parameters
----------
data: pandas DataFrame
DataFrame containing columns for H2O and CO2 concentrations in the fluid.
H2O_colname: str
OPTIONAL. The default value is 'H2O_fl_wt', which is what is returned by ExcelFile() core calculations.
String containing the name of the column corresponding to the H2O concentration in the fluid, in wt%.
CO2_colname: str
OPTIONAL. The default value is 'CO2_fl_wt', which is what is returned by ExcelFile() core calculations.
String containing the name of the column corresponding to the CO2 concentration in the fluid, in wt%.
Returns
-------
pandas DataFrame
Original data passed plus newly calculated values are returned.
"""
convData = data.copy()
MPO_H2O_list = []
MPO_CO2_list = []
for index, row in convData.iterrows():
MPO_H2O_list.append(row[H2O_colname] / oxideMass["H2O"])
MPO_CO2_list.append(row[CO2_colname] / oxideMass["CO2"])
convData["MPO_H2O"] = MPO_H2O_list
convData["MPO_CO2"] = MPO_CO2_list
convData["XH2O_fl"] = convData["MPO_H2O"] / (convData["MPO_H2O"] + convData["MPO_CO2"])
convData["XCO2_fl"] = convData["MPO_CO2"] / (convData["MPO_H2O"] + convData["MPO_CO2"])
del convData["MPO_H2O"]
del convData["MPO_CO2"]
return convData
#----------DEFINE SOME NORMALIZATION METHODS-----------#
def normalize(sample):
"""Normalizes an input composition to 100%. This is the 'standard' normalization routine.
Parameters
----------
sample: pandas Series, dictionary, pandas DataFrame, or ExcelFile object
A single composition can be passed as a dictionary. Multiple compositions can be passed either as
a pandas DataFrame or an ExcelFile object. Compositional information as oxides must be present.
Returns
-------
Sample passed as > Returned as
pandas Series > pandas Series
dictionary > dictionary
pandas DataFrame > pandas DataFrame
ExcelFile object > pandas DataFrame
Normalized major element oxides.
"""
def single_normalize(sample):
single_sample = sample
return {k: 100.0 * v / sum(single_sample.values()) for k, v in single_sample.items()}
def multi_normalize(sample):
multi_sample = sample.copy()
multi_sample["Sum"] = sum([multi_sample[oxide] for oxide in oxides])
for column in multi_sample:
if column in oxides:
multi_sample[column] = 100.0*multi_sample[column]/multi_sample["Sum"]
del multi_sample["Sum"]
return multi_sample
if isinstance(sample, dict):
_sample = sample.copy()
return single_normalize(_sample)
elif isinstance(sample, pd.core.series.Series):
_sample = pd.Series(sample.copy())
sample_dict = sample.to_dict()
return pd.Series(single_normalize(sample_dict))
elif isinstance(sample, ExcelFile):
_sample = sample
data = _sample.data
return multi_normalize(data)
elif isinstance(sample, pd.DataFrame):
return multi_normalize(sample)
def normalize_FixedVolatiles(sample):
""" Normalizes major element oxides to 100 wt%, including volatiles. The volatile
wt% will remain fixed, whilst the other major element oxides are reduced proportionally
so that the total is 100 wt%.
Parameters
----------
sample: pandas Series, dictionary, pandas DataFrame, or ExcelFile object
Major element oxides in wt%
Returns
-------
Sample passed as > Returned as
pandas Series > pandas Series
dictionary > dictionary
pandas DataFrame > pandas DataFrame
ExcelFile object > pandas DataFrame
Normalized major element oxides.
"""
def single_FixedVolatiles(sample):
normalized = pd.Series({},dtype=float)
volatiles = 0
if 'CO2' in list(_sample.index):
volatiles += _sample['CO2']
if 'H2O' in list(_sample.index):
volatiles += _sample['H2O']
for ox in list(_sample.index):
if ox != 'H2O' and ox != 'CO2':
normalized[ox] = _sample[ox]
normalized = normalized/np.sum(normalized)*(100-volatiles)
if 'CO2' in list(_sample.index):
normalized['CO2'] = _sample['CO2']
if 'H2O' in list(_sample.index):
normalized['H2O'] = _sample['H2O']
return normalized
def multi_FixedVolatiles(sample):
multi_sample = sample.copy()
multi_sample["Sum_anhy"] = sum([multi_sample[oxide] for oxide in anhydrous_oxides])
multi_sample["Sum_vols"] = sum([multi_sample[vol] for vol in volatiles])
for column in multi_sample:
if column in anhydrous_oxides:
multi_sample[column] = 100.0*multi_sample[column]/multi_sample["Sum_anhy"]
multi_sample[column] = multi_sample[column] / (100.0/(100.0-multi_sample["Sum_vols"]))
del multi_sample["Sum_anhy"]
del multi_sample["Sum_vols"]
return multi_sample
if isinstance(sample, dict):
_sample = pd.Series(sample.copy())
return single_FixedVolatiles(_sample).to_dict()
elif isinstance(sample, pd.core.series.Series):
_sample = pd.Series(sample.copy())
return single_FixedVolatiles(_sample)
elif isinstance(sample, ExcelFile):
_sample = sample
data = _sample.data
return multi_FixedVolatiles(data)
elif isinstance(sample, pd.DataFrame):
return multi_FixedVolatiles(sample)
else:
raise InputError("The composition input must be a pandas Series or dictionary for single sample \
or a pandas DataFrame or ExcelFile object for multi-sample.")
def normalize_AdditionalVolatiles(sample):
"""Normalises major element oxide wt% to 100%, assuming it is volatile-free. If
H2O or CO2 are passed to the function, their un-normalized values will be retained
in addition to the normalized non-volatile oxides, summing to >100%.
Parameters
----------
sample: pandas Series, dictionary, pandas DataFrame, or ExcelFile object
Major element oxides in wt%
Returns
-------
Sample passed as > Returned as
pandas Series > pandas Series
dictionary > dictionary
pandas DataFrame > pandas DataFrame
ExcelFile object > pandas DataFrame
Normalized major element oxides.
"""
def single_AdditionalVolatiles(sample):
normalized = pd.Series({})
for ox in list(_sample.index):
if ox != 'H2O' and ox != 'CO2':
normalized[ox] = _sample[ox]
normalized = normalized/np.sum(normalized)*100
if 'H2O' in _sample.index:
normalized['H2O'] = _sample['H2O']
if 'CO2' in _sample.index:
normalized['CO2'] = _sample['CO2']
return normalized
def multi_AdditionalVolatiles(sample):
multi_sample = sample.copy()
multi_sample["Sum"] = sum([multi_sample[oxide] for oxide in anhydrous_oxides])
for column in multi_sample:
if column in anhydrous_oxides:
multi_sample[column] = 100.0*multi_sample[column]/multi_sample["Sum"]
del multi_sample["Sum"]
return multi_sample
if isinstance(sample, dict):
_sample = pd.Series(sample.copy())
return single_AdditionalVolatiles(_sample).to_dict()
elif isinstance(sample, pd.core.series.Series):
_sample = pd.Series(sample.copy())
return single_AdditionalVolatiles(sample)
elif isinstance(sample, ExcelFile):
_sample = sample
data = _sample.data
return multi_AdditionalVolatiles(data)
elif isinstance(sample, pd.DataFrame):
return multi_AdditionalVolatiles(sample)
else:
raise InputError("The composition input must be a pandas Series or dictionary for single sample \
or a pandas DataFrame or ExcelFile object for multi-sample.")
#------------DEFINE MAJOR CLASSES-------------------#
class ExcelFile(object):
"""An excel file with sample names and oxide compositions
Attributes
----------
filename: str
Path to the excel file, e.g., "my_file.xlsx"
sheet_name: str
OPTIONAL. Default value is 0 which gets the first sheet in the excel spreadsheet file. This implements the pandas.
read_excel() sheet_name parameter. But functionality to read in more than one sheet at a time (e.g., pandas.read_excel(sheet_name=None))
is not yet imlpemented in VESIcal. From the pandas 1.0.4 documentation:
Available cases:
- Defaults to 0: 1st sheet as a DataFrame
- 1: 2nd sheet as a DataFrame
- "Sheet1": Load sheet with name “Sheet1”
input_type: str or int
OPTIONAL. Default is 'wtpercent'. String defining whether the oxide composition is given in wt percent
("wtpercent", which is the default), mole percent ("molpercent"), or mole fraction ("molfrac").
label: str
OPTIONAL. Default is 'Label'. Name of the column within the passed Excel file referring to sample names.
"""
def __init__(self, filename, sheet_name=0, input_type='wtpercent', label='Label', **kwargs):
"""Return an ExcelFile object whoes parameters are defined here."""
if isinstance(sheet_name, str) or isinstance(sheet_name, int):
pass
else:
raise InputError("If sheet_name is passed, it must be of type str or int. Currently, VESIcal cannot import more than one sheet at a time.")
self.input_type = input_type
data = pd.read_excel(filename, sheet_name=sheet_name)
data = data.fillna(0)
try:
data = data.set_index(label)
except:
raise InputError(
"Imported file must contain a column of sample names. If this column is not titled 'Label' (the default value), you must pass the column name to arg label. For example: ExcelFile('myfile.xslx', label='SampleNames')") #TODO test
if 'model' in kwargs:
warnings.warn("You don't need to pass a model here, so it will be ignored. You can specify a model when performing calculations on your dataset (e.g., calculate_dissolved_volatiles())",RuntimeWarning)
total_iron_columns = ["FeOt", "FeOT", "FeOtot", "FeOtotal", "FeOstar", "FeO*"]
for name in total_iron_columns:
if name in data.columns:
if 'FeO' in data.columns:
warnings.warn("Both " + str(name) + " and FeO columns were passed. " + str(name) + " column will be ignored.",RuntimeWarning)
else:
warnings.warn("Total iron column " + str(name) + " detected. This column will be treated as FeO. If Fe2O3 data are not given, Fe2O3 will be 0.0.",RuntimeWarning)
data['FeO'] = data[name]
for oxide in oxides:
if oxide in data.columns:
pass
else:
data[oxide] = 0.0
# TODO test all input types produce correct values
if input_type == "wtpercent":
pass
if input_type == "molpercent":
data = mol_to_wtpercent(data)
if input_type == "molfrac":
data = mol_to_wtpercent(data)
self.data = data
def preprocess_sample(self,sample):
"""
Adds 0.0 values to any oxide data not passed.
Parameters
----------
sample: pandas DataFrame
self.data composition of samples in wt% oxides
Returns
-------
pandas DataFrame
"""
for oxide in oxides:
if oxide in self.data.columns:
pass
else:
self.data[oxide] = 0.0
return sample
def get_sample_oxide_comp(self, sample, norm='none'):
"""
Returns oxide composition of a single sample from a user-imported excel file as a dictionary
Parameters
----------
sample: string
Name of the desired sample
norm_style: string
OPTIONAL. Default value is 'standard'. This specifies the style of normalization applied to the sample.
'standard' normalizes the entire input composition (including any volatiles) to 100%.
'fixedvolatiles' normalizes oxides to 100%, including volatiles. The volatile
wt% will remain fixed, whilst the other major element oxides are reduced proportionally
so that the total is 100 wt%.
'additionalvolatiles' normalizes oxides to 100%, assuming it is volatile-free. If
H2O or CO2 are passed to the function, their un-normalized values will be retained
in addition to the normalized non-volatile oxides, summing to >100%.
'none' returns the value-for-value un-normalized composition.
Returns
-------
dictionary
Composition of the sample as oxides
"""
if norm == 'none' or norm == 'standard' or norm == 'fixedvolatiles' or norm == 'additionalvolatiles':
pass
else:
raise InputError('norm must be either none, standard, fixedvolatiles, or additionalvolatiles.')
data = self.data
my_sample = pd.DataFrame(data.loc[sample])
sample_dict = (my_sample.to_dict()[sample])
sample_oxides = {}
for item, value in sample_dict.items():
if item in oxides:
sample_oxides.update({item: value})
if norm == 'standard':
return normalize(sample_oxides)
if norm == 'fixedvolatiles':
return normalize_FixedVolatiles(sample_oxides)
if norm == 'additionalvolatiles':
return normalize_AdditionalVolatiles(sample_oxides)
if norm == 'none':
return sample_oxides
def get_XH2O_fluid(self, sample, temperature, pressure, H2O, CO2):
"""An internally used function to calculate fluid composition.
Parameters
----------
sample: dictionary
Sample composition in wt% oxides
temperature: float
Temperature in degrees C.
pressure: float
Pressure in bars
H2O: float
wt% H2O in the system
CO2: float
wt% CO2 in the system
Returns
-------
float
Mole fraction of H2O in the H2O-CO2 fluid
"""
pressureMPa = pressure / 10.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
bulk_comp["H2O"] = H2O
bulk_comp["CO2"] = CO2
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid', mode='component')
#NOTE mode='component' returns endmember component keys with values in mol fraction.
if "Water" in fluid_comp:
H2O_fl = fluid_comp["Water"]
else:
H2O_fl = 0.0
# if H2O_fl == 0:
# raise SaturationError("Composition not fluid saturated.")
return H2O_fl
def save_excelfile(self, filename, calculations, sheet_name=None): #TODO how to handle if user just wants to normalize data?
"""
Saves data calculated by the user in batch processing mode (using the ExcelFile class methods) to an organized
excel file, with the original user data plus any calculated data.
Parameters
----------
filename: string
Name of the file. Extension (.xlsx) should be passed along with the name itself, all in quotes (e.g., 'myfile.xlsx').
calculations: list
List of variables containing calculated outputs from any of the core ExcelFile functions: calculate_dissolved_volatiles,
calculate_equilibrium_fluid_comp, and calculate_saturation_pressure.
sheet_name: None or list
OPTIONAL. Default value is None. Allows user to set the name of the sheet or sheets written to the Excel file.
Returns
-------
Excel File
Creates and saves an Excel file with data from each calculation saved to its own sheet.
"""
if isinstance(calculations, list):
if isinstance(sheet_name, list) or sheet_name is None:
pass
else:
raise InputError("calculations and sheet_name must be type list. If you only have one calculation or sheet_name to pass, make sure they are passed in square brackets []")
with pd.ExcelWriter(filename) as writer:
self.data.to_excel(writer, 'Original_User_Data')
if sheet_name is None:
for n, df in enumerate(calculations):
df.to_excel(writer, 'Calc%s' % n)
elif isinstance(sheet_name, list):
if len(sheet_name) == len(calculations):
pass
else:
raise InputError("calculations and sheet_name must have the same length")
for i in range(len(calculations)):
if isinstance(sheet_name[i], str):
calculations[i].to_excel(writer, sheet_name[i])
else:
raise InputError("if sheet_name is passed, it must be list of strings")
else:
raise InputError("sheet_name must be type list")
return print("Saved " + str(filename))
def calculate_dissolved_volatiles(self, temperature, pressure, X_fluid=1, print_status=True, model='MagmaSat', record_errors=False, **kwargs):
"""
Calculates the amount of H2O and CO2 dissolved in a magma at the given P/T conditions and fluid composition. Fluid composition
will be matched to within 0.0001 mole fraction.
Parameters
----------
temperature: float, int, or str
Temperature, in degrees C. Can be passed as float, in which case the
passed value is used as the temperature for all samples. Alternatively, temperature information for each individual
sample may already be present in the ExcelFile object. If so, pass the str value corresponding to the column
title in the ExcelFile object.
presure: float, int, or str
Pressure, in bars. Can be passed as float or int, in which case the
passed value is used as the pressure for all samples. Alternatively, pressure information for each individual
sample may already be present in the ExcelFile object. If so, pass the str value corresponding to the column
title in the ExcelFile object.
X_fluid: float, int, or str
OPTIONAL: Default value is 1. The mole fraction of H2O in the H2O-CO2 fluid. X_fluid=1 is a pure H2O fluid. X_fluid=0 is a
pure CO2 fluid. Can be passed as a float or int, in which case the passed value is used as the X_fluid for all samples.
Alternatively, X_fluid information for each individual sample may already be present in the ExcelFile object. If so, pass
the str value corresponding to the column title in the ExcelFile object.
print_status: bool
OPTIONAL: The default value is True, in which case the progress of the calculation will be printed to the terminal.
If set to False, nothing will be printed. MagmaSat calculations tend to be slow, and so a value of True is recommended
for most use cases.
model: string
The default value is 'MagmaSat'. Any other model name can be passed here as a string (in single quotes).
record_errors: bool
OPTIONAL: If True, any errors arising during the calculation will be recorded as a column.
Returns
-------
pandas DataFrame
Original data passed plus newly calculated values are returned.
"""
data = self.preprocess_sample(self.data)
dissolved_data = data.copy()
if isinstance(temperature, str):
file_has_temp = True
temp_name = temperature
elif isinstance(temperature, float) or isinstance(temperature, int):
file_has_temp = False
else:
raise InputError("temp must be type str or float or int")
if isinstance(pressure, str):
file_has_press = True
press_name = pressure
elif isinstance(pressure, float) or isinstance(pressure, int):
file_has_press = False
else:
raise InputError("pressure must be type str or float or int")
if isinstance(X_fluid, str):
file_has_X = True
X_name = X_fluid
elif isinstance(X_fluid, float) or isinstance(X_fluid, int):
file_has_X = False
if X_fluid != 0 and X_fluid !=1:
if X_fluid < 0.001 or X_fluid > 0.999:
raise InputError("X_fluid is calculated to a precision of 0.0001 mole fraction. \
Value for X_fluid must be between 0.0001 and 0.9999.")
else:
raise InputError("X_fluid must be type str or float or int")
H2Ovals = []
CO2vals = []
warnings = []
errors = []
if model in get_models(models='mixed'):
for index, row in dissolved_data.iterrows():
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
if file_has_X == True:
X_fluid = row[X_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_dissolved_volatiles(sample=bulk_comp, pressure=pressure, temperature=temperature,
X_fluid=(X_fluid, 1-X_fluid), model=model,
silence_warnings=True, **kwargs)
H2Ovals.append(calc.result['H2O_liq'])
CO2vals.append(calc.result['CO2_liq'])
warnings.append(calc.calib_check)
errors.append('')
except Exception as inst:
H2Ovals.append(np.nan)
CO2vals.append(np.nan)
warnings.append('Calculation Failed.')
errors.append(sys.exc_info()[0])
dissolved_data["H2O_liq_VESIcal"] = H2Ovals
dissolved_data["CO2_liq_VESIcal"] = CO2vals
if file_has_temp == False:
dissolved_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
dissolved_data["Pressure_bars_VESIcal"] = pressure
if file_has_X == False:
dissolved_data["X_fluid_input_VESIcal"] = X_fluid
dissolved_data["Model"] = model
dissolved_data["Warnings"] = warnings
if record_errors == True:
dissolved_data["Errors"] = errors
return dissolved_data
elif model == 'MagmaSat':
XH2Ovals = []
XCO2vals = []
FluidProportionvals = []
for index, row in dissolved_data.iterrows():
if print_status == True:
print("Calculating sample " + str(index))
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
if file_has_X == True:
X_fluid = row[X_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_dissolved_volatiles(sample=bulk_comp, pressure=pressure, temperature=temperature,
X_fluid=X_fluid, model=model, silence_warnings=True,
verbose=True)
H2Ovals.append(calc.result['H2O_liq'])
CO2vals.append(calc.result['CO2_liq'])
XH2Ovals.append(calc.result['XH2O_fl'])
XCO2vals.append(calc.result['XCO2_fl'])
FluidProportionvals.append(calc.result['FluidProportion_wt'])
warnings.append(calc.calib_check)
errors.append('')
except Exception as inst:
H2Ovals.append(np.nan)
CO2vals.append(np.nan)
XH2Ovals.append(np.nan)
XCO2vals.append(np.nan)
FluidProportionvals.append(np.nan)
warnings.append('Calculation Failed.')
errors.append(sys.exc_info()[0])
dissolved_data["H2O_liq_VESIcal"] = H2Ovals
dissolved_data["CO2_liq_VESIcal"] = CO2vals
if file_has_temp == False:
dissolved_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
dissolved_data["Pressure_bars_VESIcal"] = pressure
if file_has_X == False:
dissolved_data["X_fluid_input_VESIcal"] = X_fluid
dissolved_data["Model"] = model
dissolved_data["Warnings"] = warnings
if record_errors == True:
dissolved_data["Errors"] = errors
return dissolved_data
else:
XH2Ovals = []
XCO2vals = []
FluidProportionvals = []
for index, row in dissolved_data.iterrows():
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
if file_has_X == True:
X_fluid = row[X_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
if 'Water' in model:
try:
calc = calculate_dissolved_volatiles(sample=bulk_comp, pressure=pressure, temperature=temperature,
X_fluid=X_fluid, model=model, silence_warnings=True)
H2Ovals.append(calc.result)
warnings.append(calc.calib_check)
except:
H2Ovals.append(0)
warnings.append('Calculation Failed #001')
if 'Carbon' in model:
try:
calc = calculate_dissolved_volatiles(sample=bulk_comp, pressure=pressure, temperature=temperature,
X_fluid=X_fluid, model=model, silence_warnings=True)
CO2vals.append(calc.result)
warnings.append(calc.calib_check)
except:
CO2vals.append(0)
warnings.append('Calculation Failed #002')
if 'Water' in model:
dissolved_data["H2O_liq_VESIcal"] = H2Ovals
if 'Carbon' in model:
dissolved_data["CO2_liq_VESIcal"] = CO2vals
if file_has_temp == False:
dissolved_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
dissolved_data["Pressure_bars_VESIcal"] = pressure
if file_has_X == False:
dissolved_data["X_fluid_input_VESIcal"] = X_fluid
dissolved_data["Model"] = model
dissolved_data["Warnings"] = warnings
return dissolved_data
def calculate_equilibrium_fluid_comp(self, temperature, pressure, print_status=False, model='MagmaSat', **kwargs):
#TODO make molfrac the default
"""
Returns H2O and CO2 concentrations in wt% or mole fraction in a fluid in equilibrium with the given sample(s) at the given P/T condition.
Parameters
----------
sample: ExcelFile object
Compositional information on samples in oxides.
temperature: float, int, or str
Temperature, in degrees C. Can be passed as float, in which case the
passed value is used as the temperature for all samples. Alternatively, temperature information for each individual
sample may already be present in the ExcelFile object. If so, pass the str value corresponding to the column
title in the ExcelFile object.
presure: float, int, or str
Pressure, in bars. Can be passed as float or int, in which case the
passed value is used as the pressure for all samples. Alternatively, pressure information for each individual
sample may already be present in the ExcelFile object. If so, pass the str value corresponding to the column
title in the ExcelFile object.
model: string
OPTIONAL: Default is 'MagmaSat'. Any other model name can be passed here.
Returns
-------
pandas DataFrame
Original data passed plus newly calculated values are returned.
"""
data = self.preprocess_sample(self.data)
fluid_data = data.copy()
if isinstance(temperature, str):
file_has_temp = True
temp_name = temperature
elif isinstance(temperature, float) or isinstance(temperature, int):
file_has_temp = False
else:
raise InputError("temp must be type str or float or int")
if isinstance(pressure, str):
file_has_press = True
press_name = pressure
elif isinstance(pressure, float) or isinstance(pressure, int):
file_has_press = False
else:
raise InputError("pressure must be type str or float or int")
H2Ovals = []
CO2vals = []
warnings = []
if model in get_models(models='mixed') or model == "MooreWater":
for index, row in fluid_data.iterrows():
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_equilibrium_fluid_comp(sample=bulk_comp, pressure=pressure, temperature=temperature,
model=model, silence_warnings=True, **kwargs)
H2Ovals.append(calc.result['H2O'])
CO2vals.append(calc.result['CO2'])
warnings.append(calc.calib_check)
except:
H2Ovals.append(np.nan)
CO2vals.append(np.nan)
warnings.append("Calculation Failed.")
fluid_data["XH2O_fl_VESIcal"] = H2Ovals
fluid_data["XCO2_fl_VESIcal"] = CO2vals
if file_has_temp == False:
fluid_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
fluid_data["Pressure_bars_VESIcal"] = pressure
fluid_data["Model"] = model
fluid_data["Warnings"] = warnings
return fluid_data
elif model == 'MagmaSat':
for index, row in fluid_data.iterrows():
if print_status == True:
print("Calculating sample " + str(index))
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_equilibrium_fluid_comp(sample=bulk_comp, pressure=pressure, temperature=temperature, model=model, silence_warnings=True)
H2Ovals.append(calc.result['H2O'])
CO2vals.append(calc.result['CO2'])
warnings.append(calc.calib_check)
except:
H2Ovals.append(np.nan)
CO2vals.append(np.nan)
warnings.append("Calculation Failed.")
fluid_data["XH2O_fl_VESIcal"] = H2Ovals
fluid_data["XCO2_fl_VESIcal"] = CO2vals
if file_has_temp == False:
fluid_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
fluid_data["Pressure_bars_VESIcal"] = pressure
fluid_data["Model"] = model
fluid_data["Warnings"] = warnings
return fluid_data
else:
saturated = []
for index, row in fluid_data.iterrows():
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_equilibrium_fluid_comp(sample=bulk_comp, pressure=pressure, temperature=temperature, model=model, silence_warnings=True)
saturated.append(calc.result)
warnings.append(calc.calib_check)
except:
saturated.append(np.nan)
warnings.append("Calculation Failed.")
fluid_data["Saturated_VESIcal"] = saturated
if file_has_temp == False:
fluid_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
fluid_data["Pressure_bars_VESIcal"] = pressure
fluid_data["Model"] = model
fluid_data["Warnings"] = warnings
return fluid_data
def calculate_saturation_pressure(self, temperature, print_status=True, model='MagmaSat', **kwargs): #TODO fix weird printing
"""
Calculates the saturation pressure of multiple sample compositions in the ExcelFile.
Parameters
----------
temperature: float, int, or str
Temperature at which to calculate saturation pressures, in degrees C. Can be passed as float or int, in which case the
passed value is used as the temperature for all samples. Alternatively, temperature information for each individual
sample may already be present in the passed ExcelFile object. If so, pass the str value corresponding to the column
title in the passed ExcelFile object.
print_status: bool
OPTIONAL: The default value is True, in which case the progress of the calculation will be printed to the terminal.
If set to False, nothing will be printed. MagmaSat calculations tend to be slow, and so a value of True is recommended
more most use cases.
model: string
OPTIONAL: Default is 'MagmaSat'. Any other model name can be passed here.
Returns
-------
pandas DataFrame object
Values returned are saturation pressure in bars, the mass of fluid present, and the composition of the
fluid present.
"""
data = self.preprocess_sample(self.data)
satp_data = data.copy()
if isinstance(temperature, str):
file_has_temp = True
temp_name = temperature
elif isinstance(temperature, float) or isinstance(temperature, int):
file_has_temp = False
else:
raise InputError("temperature must be type str or float or int")
if model != 'MagmaSat':
satP = []
warnings = []
for index, row in satp_data.iterrows():
try:
if file_has_temp == True:
temperature = row[temp_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_saturation_pressure(sample=bulk_comp, temperature=temperature,
model=model, silence_warnings=True, **kwargs)
satP.append(calc.result)
warnings.append(calc.calib_check)
except:
satP.append(np.nan)
warnings.append("Calculation Failed")
satp_data["SaturationP_bars_VESIcal"] = satP
if file_has_temp == False:
satp_data["Temperature_C_VESIcal"] = temperature
satp_data["Model"] = model
satp_data["Warnings"] = warnings
return satp_data
else:
satP = []
flmass = []
flH2O = []
flCO2 = []
flsystem_wtper = []
warnings = []
for index, row in satp_data.iterrows():
if print_status == True:
print("Calculating sample " + str(index))
try:
if file_has_temp == True:
temperature = row[temp_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_saturation_pressure(sample=bulk_comp, temperature=temperature, model=model, verbose=True, silence_warnings=True)
satP.append(calc.result["SaturationP_bars"])
flmass.append(calc.result["FluidMass_grams"])
flsystem_wtper.append(calc.result["FluidProportion_wt"])
flH2O.append(calc.result["XH2O_fl"])
flCO2.append(calc.result["XCO2_fl"])
warnings.append(calc.calib_check)
except:
satP.append(np.nan)
flmass.append(np.nan)
flsystem_wtper.append(np.nan)
flH2O.append(np.nan)
flCO2.append(np.nan)
warnings.append("Calculation Failed")
satp_data["SaturationP_bars_VESIcal"] = satP
if file_has_temp == False:
satp_data["Temperature_C_VESIcal"] = temperature
satp_data["XH2O_fl_VESIcal"] = flH2O
satp_data["XCO2_fl_VESIcal"] = flCO2
satp_data["FluidMass_grams_VESIcal"] = flmass
satp_data["FluidSystem_wt_VESIcal"] = flsystem_wtper
satp_data["Model"] = model
satp_data["Warnings"] = warnings
if print_status == True:
print("Done!")
return satp_data
class CalibrationRange(object):
""" The CalibrationRange object allows the range of allowable parameters to be specified and
used in checking and reporting of the results.
"""
def __init__(self, parameter_name, value, checkfunction=None, units='', model_name='',
fail_msg='',fail_dict={}, pass_msg='', pass_dict={}, description_msg='', description_dict={}):
self.parameter_name = parameter_name
self.value = value
self.checkfunction = checkfunction
self.units = units
self.model_name = model_name
self.fail_msg = (copy(fail_msg), copy(fail_dict))
self.pass_msg = (copy(pass_msg), copy(pass_dict))
self.description_msg = (copy(description_msg), copy(description_dict))
def check(self,parameters):
"""Method for checking whether parameters satisfy the calibration range."""
if self.parameter_name in parameters:
return self.checkfunction(self.value,parameters[self.parameter_name])
else:
return None
def string(self,parameters,report_nonexistance=True):
"""Returns a string statement of the calibration check"""
if type(parameters) == type(None):
msgdict = self.description_msg[1]
if type(self.value) == float or type(self.value) == int:
msgdict['calib_val'] = self.value
elif type(self.value) == list or type(self.value) == tuple or type(self.value) == np.ndarray:
for i in range(len(self.value)):
msgdict['calib_val'+str(i)] = self.value[i]
if 'param_name' not in msgdict:
msgdict['param_name'] = self.parameter_name
if 'units' not in msgdict:
msgdict['units'] = self.units
if 'model_name' not in msgdict:
msgdict['model_name'] = self.model_name
return self.description_msg[0].format(**msgdict)
else:
check = self.check(parameters)
if check == True:
msgdict = self.pass_msg[1]
msgdict['param_val'] = parameters[self.parameter_name]
if type(self.value) == float or type(self.value) == int:
msgdict['calib_val'] = self.value
elif type(self.value) == list or type(self.value) == tuple or type(self.value) == np.ndarray:
for i in range(len(self.value)):
msgdict['calib_val'+str(i)] = self.value[i]
if 'param_name' not in msgdict:
msgdict['param_name'] = self.parameter_name
if 'units' not in msgdict:
msgdict['units'] = self.units
if 'model_name' not in msgdict:
msgdict['model_name'] = self.model_name
return self.pass_msg[0].format(**msgdict)
elif check == False:
msgdict = self.fail_msg[1]
msgdict['param_val'] = parameters[self.parameter_name]
if type(self.value) == float or type(self.value) == int:
msgdict['calib_val'] = self.value
elif type(self.value) == list or type(self.value) == tuple or type(self.value) == np.ndarray:
for i in range(len(self.value)):
msgdict['calib_val'+str(i)] = self.value[i]
if 'param_name' not in msgdict:
msgdict['param_name'] = self.parameter_name
if 'units' not in msgdict:
msgdict['units'] = self.units
if 'model_name' not in msgdict:
msgdict['model_name'] = self.model_name
return self.fail_msg[0].format(**msgdict)
else:
if report_nonexistance == True:
return "A value for {} was not provided.".format(self.parameter_name)
else:
return ''
# class old_CalibrationRange(object):
# """ The CalibrationRange object allows the range of allowable parameters to be specified and
# used in checking and reporting of the results.
# """
# def __init__(self,parameter_name,value,unit='',modelname='',explanation_string=None,
# parameter_string=None,value_fmt="{:.1f}"):
# self.parameter_name = parameter_name
# self.value = value
# self.value_fmt = value_fmt
# self.model_name = modelname
# self.unit = unit
# self.explanation_string = explanation_string
# if parameter_string is not None:
# self.parameter_string = parameter_string
# else:
# self.parameter_string = parameter_name
#
# @abstractmethod
# def check(self,parameters):
# """Method for checking whether parameters satisfy the calibration range."""
# return True
#
# @abstractmethod
# def string(self,parameters):
# """Returns a string statement of the calibration check"""
# return 'No string return defined. '
class Model(object):
"""The model object implements a volatile solubility model. It is composed
of the methods needed to evaluate :func:`VESIcal.calculate_dissolved_volatiles`,
:func:`VESIcal.calculate_equilibrium_fluid_comp`, and :func:`calculate_saturation_pressure`. The
fugacity and activity models for the volatiles species must be specified,
defaulting to ideal.
"""
def __init__(self):
self.set_volatile_species(None)
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([])
self.set_solubility_dependence(False)
def set_volatile_species(self,volatile_species):
if type(volatile_species) == str:
volatile_species = [volatile_species]
elif type(volatile_species) != list:
raise InputError("volatile_species must be a str or list.")
self.volatile_species = volatile_species
def set_fugacity_model(self,fugacity_model):
self.fugacity_model = fugacity_model
def set_activity_model(self,activity_model):
self.activity_model = activity_model
def set_calibration_ranges(self,calibration_ranges):
self.calibration_ranges = calibration_ranges
def set_solubility_dependence(self,solubility_dependence):
self.solubility_dependence = solubility_dependence
@abstractmethod
def calculate_dissolved_volatiles(self,**kwargs):
pass
@abstractmethod
def calculate_equilibrium_fluid_comp(self,**kwargs):
pass
@abstractmethod
def calculate_saturation_pressure(self,**kwargs):
pass
@abstractmethod
def preprocess_sample(self,**kwargs):
pass
# @abstractmethod
def check_calibration_range(self,parameters,report_nonexistance=True):
""" Checks whether the given parameters are within the ranges defined by the
CalibrationRange objects for the model and its fugacity and activity models. An empty
string will be returned if all parameters are within the calibration range. If a
parameter is not within the calibration range, a description of the problem will be
returned in the string.
Parameters
----------
parameters dict
Dictionary keys are the names of the parameters to be checked, e.g., pressure
temperature, SiO2, etc. Values are the values of each parameter. A complete set
need not be given.
Returns
-------
str
String description of any parameters falling outside of the calibration range.
"""
s = ''
for cr in self.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
for cr in self.fugacity_model.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
for cr in self.activity_model.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
return s
def get_calibration_range(self):
""" Returns a string describing the calibration ranges defined by the CalibrationRange
objects for each model, and its associated fugacity and activity models.
Returns
-------
str
String description of the calibration range objects."""
s = ''
for cr in self.calibration_ranges:
s += cr.string(None)
for cr in self.fugacity_model.calibration_ranges:
s += cr.string(None)
for cr in self.activity_model.calibration_ranges:
s += cr.string(None)
return s
class FugacityModel(object):
""" The fugacity model object is for implementations of fugacity models
for individual volatile species, though it may depend on the mole
fraction of other volatile species. It contains all the methods required
to calculate the fugacity at a given pressure and mole fraction.
"""
def __init__(self):
self.set_calibration_ranges([])
def set_calibration_ranges(self,calibration_ranges):
self.calibration_ranges = calibration_ranges
@abstractmethod
def fugacity(self,pressure,**kwargs):
"""
"""
# @abstractmethod
def check_calibration_range(self,parameters,report_nonexistance=True):
s = ''
for cr in self.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
return s
class activity_model(object):
""" The activity model object is for implementing activity models
for volatile species in melts. It contains all the methods required to
evaluate the activity.
"""
def __init__(self):
self.set_calibration_ranges([])
def set_calibration_ranges(self,calibration_ranges):
self.calibration_ranges = calibration_ranges
@abstractmethod
def activity(self,X,**kwargs):
"""
"""
# @abstractmethod
def check_calibration_range(self,parameters,report_nonexistance=True):
s = ''
for cr in self.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
return s
class Calculate(object):
""" The Calculate object is a template for implementing user-friendly methods for
running calculations using the volatile solubility models. All Calculate methods
have a common workflow- sample is read in, preprocessed, the calculation is performed,
the calibration range is checked, and the results stored.
"""
def __init__(self,sample,model='MagmaSat',silence_warnings=False,preprocess_sample=False,**kwargs):
if model == 'MagmaSat':
self.model = MagmaSat()
elif type(model) == str:
self.model = default_models[model]
else:
self.model = model
self.sample = sample.copy()
if preprocess_sample == True:
self.sample = self.model.preprocess_sample(self.sample)
self.result = self.calculate(sample=self.sample,**kwargs)
self.calib_check = self.check_calibration_range(sample=self.sample,**kwargs)
if self.calib_check is not None and silence_warnings == False:
if self.calib_check != '':
warnings.warn(self.calib_check,RuntimeWarning)
@abstractmethod
def calculate(self):
""" """
@abstractmethod
def check_calibration_range(self):
""" """
#-------------DEFAULT CALIBRATIONRANGE OBJECTS---------------#
def crf_EqualTo(calibval,paramval):
return calibval == paramval
crmsg_EqualTo_pass = "The {param_name} ({param_val:.1f} {units}) is equal to {calib_val:.1f} {units} as required by the calibration range of the {model_name} model. "
crmsg_EqualTo_fail = "The {param_name} is outside the calibration range of the {model_name} model, as {param_val:.1f} {units} is not equal to {calib_val:.1f} {units}. "
crmsg_EqualTo_description = "The {model_name} model is calibrated for {param_name} equal to {calib_val:.1f} {units}. "
def crf_GreaterThan(calibval,paramval):
return paramval > calibval
crmsg_GreaterThan_pass = "The {param_name} ({param_val:.1f} {units}) is greater than {calib_val:.1f} {units} as required by the calibration range of the {model_name} model. "
crmsg_GreaterThan_fail = "The {param_name} is outside the calibration range of the {model_name} model, as {param_val:.1f} {units} is not greater than {calib_val:.1f} {units}. "
crmsg_GreaterThan_description = "The {model_name} model is calibrated for {param_name} greater than {calib_val:.1f} {units}. "
def crf_LessThan(calibval,paramval):
return paramval < calibval
crmsg_LessThan_pass = "The {param_name} ({param_val:.1f} {units}) is less than {calib_val:.1f} {units} as required by the calibration range of the {model_name} model. "
crmsg_LessThan_fail = "The {param_name} is outside the calibration range of the {model_name} model, as {param_val:.1f} {units} is not less than {calib_val:.1f} {units}. "
crmsg_LessThan_description = "The {model_name} model is calibrated for {param_name} less than {calib_val:.1f} {units}. "
def crf_Between(calibval,paramval):
return paramval > calibval[0] and paramval < calibval[1]
crmsg_Between_pass = "The {param_name} ({param_val:.1f} {units}) is between {calib_val0:.1f} and {calib_val1:.1f} {units} as required by the calibration range of the {model_name} model. "
crmsg_Between_fail = "The {param_name} is outside the calibration range of the {model_name} model, as {param_val:.1f} {units} is not between {calib_val0:.1f} and {calib_val1:.1f} {units}. "
crmsg_Between_description = "The {model_name} model is calibrated for {param_name} between {calib_val0:.1f} and {calib_val1:.1f} {units}. "
def crf_LiuComp(calibval=None,sample={}):
SiTest = sample['SiO2'] >= 75.0 and sample['SiO2'] <= 77.0
NaTest = sample['Na2O'] >= 3.4 and sample['Na2O'] <= 4.7
KTest = sample['K2O'] >= 3.6 and sample['K2O'] <= 5.7
AlTest = sample['Al2O3'] >= 12.1 and sample['Al2O3'] <= 13.5
return all([SiTest, NaTest, KTest, AlTest])
crmsg_LiuComp_pass = "The sample appears to be similar in composition to the rhyolites and haplogranites used to calibrate the Liu et al. model."
crmsg_LiuComp_fail = "As the Liu et al. model incorperates no term for compositional dependence, users must take extreme care when extrapolating this model to compositions which differ significantly from the haplogranites and rhyolites in the calibration dataset. These warnings are simply a guide; we suggest that users carefully compare their major element data to the calibration dataset to check for suitability."
crmsg_LiuComp_description = "The Liu et al. model is suitable for haplogranites and rhyolites."
#-------------FUGACITY MODELS--------------------------------#
class fugacity_idealgas(FugacityModel):
""" An instance of FugacityModel for an ideal gas.
"""
def fugacity(self,pressure,X_fluid=1.0,**kwargs):
""" Returns the fugacity of an ideal gas, i.e., the partial pressure.
Parameters
----------
pressure float
Total pressure of the system, in bars.
X_fluid float
The mole fraction of the species in the vapour phase.
Returns
-------
float
Fugacity (partial pressure) in bars
"""
return pressure*X_fluid
class fugacity_KJ81_co2(FugacityModel):
""" Implementation of the Kerrick and Jacobs (1981) EOS for mixed fluids. This class
will return the properties of the CO2 component of the mixed fluid.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',20000.0,crf_LessThan,'bar','Kerrick and Jacobs (1981) EOS',
fail_msg=crmsg_LessThan_fail, pass_msg=crmsg_LessThan_pass, description_msg=crmsg_LessThan_description),
CalibrationRange('temperature',1050,crf_LessThan,'oC','Kerrick and Jacobs (1981) EOS',
fail_msg=crmsg_LessThan_fail, pass_msg=crmsg_LessThan_pass, description_msg=crmsg_LessThan_description)])
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
""" Calculates the fugacity of CO2 in a mixed CO2-H2O fluid. Above 1050C,
it assumes H2O and CO2 do not interact, as the equations are not defined
beyond this point.
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
if X_fluid == 0:
return 0
elif temperature >= 1050.0:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,1.0))*X_fluid
else:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,X_fluid))*X_fluid
def volume(self,P,T,X_fluid):
""" Calculates the volume of the mixed fluid, by solving Eq (28) of Kerrick and
Jacobs (1981) using scipy.root_scalar.
Parameters
----------
P float
Total pressure of the system, in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid
Returns
-------
float
Volume of the mixed fluid.
"""
if X_fluid != 1.0:
# x0 = self.volume(P,T,1.0)*X_fluid + self.volume_h(P,T)*(1-X_fluid)
# print(x0)
if P >= 20000 and T<800-273.15:
x0 = (X_fluid*25+(1-X_fluid)*15)
else:
x0 = (X_fluid*35+(1-X_fluid)*15)
else:
if P >= 20000 and T<800-273.15:
x0 = 25
else:
x0=35
return root_scalar(self.root_volume,x0=x0,x1=x0*0.9,args=(P,T,X_fluid)).root
def root_volume(self,v,P,T,X_fluid):
""" Returns the difference between the lhs and rhs of Eq (28) of Kerrick and Jacobs (1981).
For use with a root finder to obtain the volume of the mixed fluid.
Parameters
----------
v float
Guess for the volume
P float
Total system pressure in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
Difference between lhs and rhs of Eq (28) of Kerrick and Jacobs (1981), in bars.
"""
T = T + 273.15
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = c['b']
cm = c['c']
c12= c['c']
dm = c['d']
d12= c['d']
em = c['e']
e12 =c['e']
else:
bm = X_fluid*c['b'] + (1-X_fluid)*h['b']
c12 = (c['c']*h['c'])**0.5
cm = c['c']*X_fluid**2 + h['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = c['d']*X_fluid**2 + h['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = c['e']*X_fluid**2 + h['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - am / (T**0.5 * v * (v+bm))
return -(P - pt1 - pt2)
def volume_h(self,P,T):
""" Calculates the volume of a pure H2O fluid, by solving Eq (14) of
Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
Difference between lhs and rhs of Eq (14) of Kerrick and Jacobs (1981), in bars.
"""
return root_scalar(self.root_volume_h,x0=15,x1=35,args=(P,T)).root
def root_volume_h(self,v,P,T):
""" Returns the difference between the lhs and rhs of Eq (14) of
Kerrick and Jacobs (1981). For use with a root solver to identify the
volume of a pure H2O fluid.
Parameters
----------
v float
Guess for the volume
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
float
The difference between the lhs and rhs of Eq (14) of Kerrick and Jacobs (1981),
in bars.
"""
T = T + 273.15
h = {}
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
h['a'] = h['c'] + h['d']/v + h['e']/v**2
y = h['b']/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - h['a'] / (T**0.5 * v * (v+h['b']))
return -(P - pt1 - pt2)
def lnPhi_mix(self,P,T,X_fluid):
""" Calculates the natural log of the fugacity coefficient for CO2 in a
mixed CO2-H2O fluid. Uses Eq (27) of Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC
X_fluid float
The mole fraction of CO2 in the fluid.
Returns
-------
float
The natural log of the fugacity coefficient for CO2 in a mixed fluid.
"""
T = T + 273.15
v = self.volume(P,T-273.15,X_fluid)
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = c['b']
cm = c['c']
c12= c['c']
dm = c['d']
d12= c['d']
em = c['e']
e12 =c['e']
else:
bm = X_fluid*c['b'] + (1-X_fluid)*h['b']
c12 = (c['c']*h['c'])**0.5
cm = c['c']*X_fluid**2 + h['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = c['d']*X_fluid**2 + h['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = c['e']*X_fluid**2 + h['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
# Z = (1+y+y**2-y**3)/(1-y)**2 - am/(83.14*T**1.5*(v+bm))
Z = v*P/(83.14*T)
lnPhi = 0
lnPhi += (4*y-3*y**2)/(1-y)**2 + (c['b']/bm * (4*y-2*y**2)/(1-y)**3)
lnPhi += - (2*c['c']*X_fluid+2*(1-X_fluid)*c12)/(83.14*T**1.5*bm)*np.log((v+bm)/v)
lnPhi += - cm*c['b']/(83.14*T**1.5*bm*(v+bm))
lnPhi += cm*c['b']/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += - (2*c['d']*X_fluid+2*d12*(1-X_fluid)+dm)/(83.14*T**1.5*bm*v)
lnPhi += (2*c['d']*X_fluid+2*(1-X_fluid)*d12+dm)/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += c['b']*dm/(83.14*T**1.5*v*bm*(v+bm)) + 2*c['b']*dm/(83.14*T**1.5*bm**2*(v+bm))
lnPhi += - 2*c['b']*dm/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += - (2*c['e']*X_fluid + 2*(1-X_fluid)*e12+2*em)/(83.14*T**1.5*2*bm*v**2)
lnPhi += (2*c['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**2*v)
lnPhi += - (2*c['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += em*c['b']/(83.14*T**1.5*2*bm*v**2*(v+bm)) - 3*em*c['b']/(83.14*T**1.5*2*bm**2*v*(v+bm))
lnPhi += 3*em*c['b']/(83.14*T**1.5*bm**4)*np.log((v+bm)/v) - 3*em*c['b']/(83.14*T**1.5*bm**3*(v+bm))
lnPhi += - np.log(Z)
return lnPhi
class fugacity_KJ81_h2o(FugacityModel):
"""Implementation of the Kerrick and Jacobs (1981) EOS for mixed fluids. This class
will return the properties of the H2O component of the mixed fluid.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',20000.0,crf_LessThan,'bar','Kerrick and Jacobs (1981) EOS',
fail_msg=crmsg_LessThan_fail, pass_msg=crmsg_LessThan_pass, description_msg=crmsg_LessThan_description),
CalibrationRange('temperature',1050,crf_LessThan,'oC','Kerrick and Jacobs (1981) EOS',
fail_msg=crmsg_LessThan_fail, pass_msg=crmsg_LessThan_pass, description_msg=crmsg_LessThan_description)])
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
""" Calculates the fugacity of H2O in a mixed CO2-H2O fluid. Above 1050C,
it assumes H2O and CO2 do not interact, as the equations are not defined
beyond this point.
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
fugacity of H2O in bars
"""
if X_fluid == 0:
return 0
elif temperature >= 1050:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,1.0))*X_fluid
else:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,X_fluid))*X_fluid
def volume(self,P,T,X_fluid):
""" Calculates the volume of the mixed fluid, by solving Eq (28) of Kerrick and
Jacobs (1981) using scipy.root_scalar.
Parameters
----------
P float
Total pressure of the system, in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid
Returns
-------
float
Volume of the mixed fluid.
"""
if X_fluid != 1.0:
# x0 = self.volume(P,T,1.0)*X_fluid + self.volume_h(P,T)*(1-X_fluid)
# print(x0)
if P >= 20000 and T<800-273.15:
x0 = ((1-X_fluid)*25+X_fluid*15)
else:
x0 = ((1-X_fluid)*35+X_fluid*15)
else:
if P >= 20000 and T<800-273.15:
x0 = 10
else:
x0=15
return root_scalar(self.root_volume,x0=x0,x1=x0*0.9,args=(P,T,X_fluid)).root
def root_volume(self,v,P,T,X_fluid):
""" Returns the difference between the lhs and rhs of Eq (28) of Kerrick and Jacobs (1981).
For use with a root finder to obtain the volume of the mixed fluid.
Parameters
----------
v float
Guess for the volume
P float
Total system pressure in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
Difference between lhs and rhs of Eq (28) of Kerrick and Jacobs (1981), in bars.
"""
T = T + 273.15
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = h['b']
cm = h['c']
dm = h['d']
em = h['e']
c12= h['c']
d12= h['d']
e12= h['e']
else:
bm = X_fluid*h['b'] + (1-X_fluid)*c['b']
c12 = (c['c']*h['c'])**0.5
cm = h['c']*X_fluid**2 + c['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = h['d']*X_fluid**2 + c['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = h['e']*X_fluid**2 + c['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - am / (T**0.5 * v * (v+bm))
return -(P - pt1 - pt2)
def volume_c(self,P,T):
""" Calculates the volume of a pure CO2 fluid, by solving Eq (14) of
Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
Difference between lhs and rhs of Eq (14) of Kerrick and Jacobs (1981), in bars.
"""
return root_scalar(self.root_volume_c,x0=15,x1=35,args=(P,T)).root
def root_volume_c(self,v,P,T):
""" Returns the difference between the lhs and rhs of Eq (14) of
Kerrick and Jacobs (1981). For use with a root solver to identify the
volume of a pure H2O fluid.
Parameters
----------
v float
Guess for the volume
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
float
The difference between the lhs and rhs of Eq (14) of Kerrick and Jacobs (1981),
in bars.
"""
T = T + 273.15
c = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
c['a'] = c['c'] + c['d']/v + c['e']/v**2
y = c['b']/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - c['a'] / (T**0.5 * v * (v+c['b']))
return -(P - pt1 - pt2)
def lnPhi_mix(self,P,T,X_fluid):
""" Calculates the natural log of the fugacity coefficient for H2O in a
mixed CO2-H2O fluid. Uses Eq (27) of Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC
X_fluid float
The mole fraction of H2O in the fluid.
Returns
-------
float
The natural log of the fugacity coefficient for H2O in a mixed fluid.
"""
T = T + 273.15
v = self.volume(P,T-273.15,X_fluid)
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = h['b']
cm = h['c']
dm = h['d']
em = h['e']
c12= h['c']
d12= h['d']
e12= h['e']
else:
bm = X_fluid*h['b'] + (1-X_fluid)*c['b']
c12 = (c['c']*h['c'])**0.5
cm = h['c']*X_fluid**2 + c['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = h['d']*X_fluid**2 + c['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = h['e']*X_fluid**2 + c['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
# Z = (1+y+y**2-y**3)/(1-y)**2 - am/(83.14*T**1.5*(v+bm))
Z = v*P/(83.14*T)
lnPhi = 0
lnPhi += (4*y-3*y**2)/(1-y)**2 + (h['b']/bm * (4*y-2*y**2)/(1-y)**3)
lnPhi += - (2*h['c']*X_fluid+2*(1-X_fluid)*c12)/(83.14*T**1.5*bm)*np.log((v+bm)/v)
lnPhi += - cm*h['b']/(83.14*T**1.5*bm*(v+bm))
lnPhi += cm*h['b']/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += - (2*h['d']*X_fluid+2*d12*(1-X_fluid)+dm)/(83.14*T**1.5*bm*v)
lnPhi += (2*h['d']*X_fluid+2*(1-X_fluid)*d12+dm)/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += h['b']*dm/(83.14*T**1.5*v*bm*(v+bm)) + 2*h['b']*dm/(83.14*T**1.5*bm**2*(v+bm))
lnPhi += - 2*h['b']*dm/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += - (2*h['e']*X_fluid + 2*(1-X_fluid)*e12+2*em)/(83.14*T**1.5*2*bm*v**2)
lnPhi += (2*h['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**2*v)
lnPhi += - (2*h['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += em*h['b']/(83.14*T**1.5*2*bm*v**2*(v+bm)) - 3*em*h['b']/(83.14*T**1.5*2*bm**2*v*(v+bm))
lnPhi += 3*em*h['b']/(83.14*T**1.5*bm**4)*np.log((v+bm)/v) - 3*em*h['b']/(83.14*T**1.5*bm**3*(v+bm))
lnPhi += - np.log(Z)
return lnPhi
class fugacity_ZD09_co2(FugacityModel):
""" Implementation of the Zhang and Duan (2009) fugacity model for pure CO2
fluids."""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Zhang and Duan (2009) EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[200,2300],crf_Between,'oC','Zhang and Duan (2009) EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of a pure CO2 fluid, or a mixed fluid assuming
ideal mixing. Implements eqn (14) of Zhang and Duan (2009).
Paramters
---------
pressure float
Pressure in bars
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid. Default is 1.0.
Returns
-------
float
Fugacity of CO2, standard state 1 bar.
"""
P = pressure/10
T = temperature + 273.15
a = np.array([0.0,
2.95177298930e-2,
-6.33756452413e3,
-2.75265428882e5,
1.29128089283e-3,
-1.45797416153e2,
7.65938947237e4,
2.58661493537e-6,
0.52126532146,
-1.39839523753e2,
-2.36335007175e-8,
5.35026383543e-3,
-0.27110649951,
2.50387836486e4,
0.73226726041,
1.5483335997e-2])
e = 235.0
s = 3.79
Pm = 3.0636*P*s**3/e
Tm = 154*T/e
Vm = root_scalar(self.Vm,x0=200,x1=100,args=(P,T)).root
S1 = ((a[1]+a[2]/Tm**2+a[3]/Tm**3)/Vm+
(a[4]+a[5]/Tm**2+a[6]/Tm**3)/(2*Vm**2)+
(a[7]+a[8]/Tm**2+a[9]/Tm**3)/(4*Vm**4)+
(a[10]+a[11]/Tm**2+a[12]/Tm**3)/(5*Vm**5)+
(a[13]/(2*a[15]*Tm**3)*(a[14]+1-(a[14]+1+a[15]/Vm**2)*
np.exp(-a[15]/Vm**2)))
)
Z = Pm*Vm/(8.314*Tm)
lnfc = Z - 1 - np.log(Z) + S1
return P*np.exp(lnfc)*10
def Vm(self,Vm,P,T):
""" Function to use for solving for the parameter Vm, defined by eqn (8) of
Zhang and Duan (2009). Called by scipy.fsolve in the fugacity method.
Parameters
----------
Vm float
Guessed value of Vm
P float
Pressure in MPa
T float
Temperature in K
Returns
-------
float
Difference between (rearranged) LHS and RHS of eqn (8) of Zhang and Duan (2009).
"""
Pm = 3.0636*P*3.79**3/235.0
Tm = 154*T/235.0
a = np.array([0.0,
2.95177298930e-2,
-6.33756452413e3,
-2.75265428882e5,
1.29128089283e-3,
-1.45797416153e2,
7.65938947237e4,
2.58661493537e-6,
0.52126532146,
-1.39839523753e2,
-2.36335007175e-8,
5.35026383543e-3,
-0.27110649951,
2.50387836486e4,
0.73226726041,
1.5483335997e-2])
return ((1+(a[1]+a[2]/Tm**2+a[3]/Tm**3)/Vm+
(a[4]+a[5]/Tm**2+a[6]/Tm**3)/Vm**2+
(a[7]+a[8]/Tm**2+a[9]/Tm**3)/Vm**4)*0.08314*Tm/Pm - Vm
)
class fugacity_MRK_co2(FugacityModel):
""" Modified Redlick Kwong fugacity model as used by VolatileCalc. Python implementation by
<NAME> (github.com/DJRgeoscience/VolatileCalcForPython), based on VB code by Newman &
Lowenstern.
"""
def __init__(self):
self.set_calibration_ranges([])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of CO2 in a pure or mixed H2O-CO2 fluid (assuming ideal mixing).
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
fug = self.MRK(pressure,temperature+273.15)
return fug*X_fluid
def FNA(self,TK):
return (166800000 - 193080 * (TK - 273.15) + 186.4 * (TK - 273.15)**2 - 0.071288 * ((TK - 273.15)**3)) * 1.01325
def FNB(self,TK):
return 1.01325 * (73030000 - 71400 * (TK - 273.15) + 21.57 * (TK - 273.15)**2)
def FNC(self,TK):
R = 83.14321
return 1.01325 * (np.exp(-11.071 + 5953 / TK - 2746000 / TK**2 + 464600000 / TK**3) * 0.5 * R * R * TK**2.5 / 1.02668 + 40123800)
def FNF(self,V,TK,A,B,P):
R = 83.14321
return R * TK / (V - B) - A / ((V * V + B * V) * TK**0.5) - P
def MRK(self,P,TK): #Redlich-Kwong routine to estimate endmember H2O and CO2 fugacities
R = 83.14321
B_1 = 14.6
B_2 = 29.7
for X_1 in [0,1]:
B = X_1 * B_1 + (1 - X_1) * B_2
A = X_1**2 * self.FNA(TK) + 2 * X_1 * (1 - X_1) * self.FNC(TK) + (1 - X_1)**2 * self.FNB(TK)
Temp2 = B + 5
Q = 1
Temp1 = 0
while abs(Temp2 - Temp1) >= 0.00001:
Temp1 = Temp2
F_1 = (self.FNF(Temp1 + 0.01, TK, A, B, P) - self.FNF(Temp1, TK, A, B, P)) / 0.01
Temp2 = Temp1 - Q * self.FNF(Temp1, TK, A, B, P) / F_1
F_2 = (self.FNF(Temp2 + 0.01, TK, A, B, P) - self.FNF(Temp2, TK, A, B, P)) / 0.01
if F_2 * F_1 <= 0:
Q = Q / 2.
if abs(Temp2 - Temp1) > 0.00001:
F_1 = F_2
V = Temp2
G_1 = np.log(V / (V - B)) + B_1 / (V - B) - 2 * (X_1 * self.FNA(TK) + (1 - X_1) * self.FNC(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_1 = G_1 + (np.log((V + B) / V) - B / (V + B)) * A * B_1 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_1 = np.exp(G_1)
G_2 = np.log(V / (V - B)) + B_2 / (V - B) - 2 * (X_1 * self.FNC(TK) + (1 - X_1) * self.FNB(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_2 = G_2 + (np.log((V + B) / V) - B / (V + B)) * A * B_2 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_2 = np.exp(G_2)
if X_1 == 0:
fCO2o = G_2 * P #The fugacity of CO2
# return fCO2o
if X_1 == 1:
fH2Oo = G_1 * P #The fugacity of H2O
# return fH2Oo
return fCO2o
class fugacity_MRK_h2o(FugacityModel):
""" Modified Redlick Kwong fugacity model as used by VolatileCalc. Python implementation by
<NAME> (github.com/DJRgeoscience/VolatileCalcForPython), based on VB code by Newman &
Lowenstern.
"""
def __init__(self):
self.set_calibration_ranges([])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of H2O in a pure or mixed H2O-CO2 fluid (assuming ideal mixing).
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
fug = self.MRK(pressure,temperature+273.15)
return fug*X_fluid
def FNA(self,TK):
return (166800000 - 193080 * (TK - 273.15) + 186.4 * (TK - 273.15)**2 - 0.071288 * ((TK - 273.15)**3)) * 1.01325
def FNB(self,TK):
return 1.01325 * (73030000 - 71400 * (TK - 273.15) + 21.57 * (TK - 273.15)**2)
def FNC(self,TK):
R = 83.14321
return 1.01325 * (np.exp(-11.071 + 5953 / TK - 2746000 / TK**2 + 464600000 / TK**3) * 0.5 * R * R * TK**2.5 / 1.02668 + 40123800)
def FNF(self,V,TK,A,B,P):
R = 83.14321
return R * TK / (V - B) - A / ((V * V + B * V) * TK**0.5) - P
def MRK(self,P,TK): #Redlich-Kwong routine to estimate endmember H2O and CO2 fugacities
R = 83.14321
B_1 = 14.6
B_2 = 29.7
# X_1 = 1
for X_1 in [0,1]:
B = X_1 * B_1 + (1 - X_1) * B_2
A = X_1**2 * self.FNA(TK) + 2 * X_1 * (1 - X_1) * self.FNC(TK) + (1 - X_1)**2 * self.FNB(TK)
Temp2 = B + 5
Q = 1
Temp1 = 0
while abs(Temp2 - Temp1) >= 0.00001:
Temp1 = Temp2
F_1 = (self.FNF(Temp1 + 0.01, TK, A, B, P) - self.FNF(Temp1, TK, A, B, P)) / 0.01
Temp2 = Temp1 - Q * self.FNF(Temp1, TK, A, B, P) / F_1
F_2 = (self.FNF(Temp2 + 0.01, TK, A, B, P) - self.FNF(Temp2, TK, A, B, P)) / 0.01
if F_2 * F_1 <= 0:
Q = Q / 2.
if abs(Temp2 - Temp1) > 0.00001:
F_1 = F_2
V = Temp2
G_1 = np.log(V / (V - B)) + B_1 / (V - B) - 2 * (X_1 * self.FNA(TK) + (1 - X_1) * self.FNC(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_1 = G_1 + (np.log((V + B) / V) - B / (V + B)) * A * B_1 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_1 = np.exp(G_1)
G_2 = np.log(V / (V - B)) + B_2 / (V - B) - 2 * (X_1 * self.FNC(TK) + (1 - X_1) * self.FNB(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_2 = G_2 + (np.log((V + B) / V) - B / (V + B)) * A * B_2 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_2 = np.exp(G_2)
if X_1 == 0:
fCO2o = G_2 * P #The fugacity of CO2
# return fCO2o
if X_1 == 1:
fH2Oo = G_1 * P #The fugacity of H2O
# return fH2Oo
return fH2Oo
class fugacity_HB_co2(FugacityModel):
"""
Implementation of the Holloway and Blank (1994) Modified Redlich Kwong EoS for CO2.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500.0,crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
self.HBmodel = fugacity_HollowayBlank()
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
return self.HBmodel.fugacity(pressure=pressure, temperature=temperature, species='CO2')*X_fluid
class fugacity_HB_h2o(FugacityModel):
"""
Implementation of the Holloway and Blank (1994) Modified Redlich Kwong EoS for H2O.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500.0,crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
self.HBmodel = fugacity_HollowayBlank()
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
return self.HBmodel.fugacity(pressure=pressure, temperature=temperature, species='H2O')*X_fluid
class fugacity_HollowayBlank(FugacityModel):
"""
Implementation of the Modified Redlich Kwong presented in Holloway and Blank (1994) Reviews
in Mineralogy and Geochemistry vol. 30. Originally written in Quickbasic. CO2 calculations
translated to Matlab by <NAME> and translated to python by <NAME> for VESIcal.
H2O calculations translated to VisualBasic by <NAME> and translated to python by
<NAME> for VESIcal.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','MRK EOS (Holloway and Blank, 1994)',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500,crf_GreaterThan,'oC','MRK EOS (Holloway and Blank, 1994)',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
def REDKW(self, BP, A2B):
"""
The RK routine. A routine to calculate compressibility factor and fugacity coefficient
with the Redlich-Kwong equation following Edmister (1968). This solution for supercritical
fluid.
Parameters
----------
BP: float
B parameter sum from RKCALC
A2B: float
A parameter sum from RKCALC
Returns
-------
float
XLNFP (fugacity coefficient?)
"""
if A2B < 1*10**(-10):
A2B = 0.001
#Define constants
TH = 0.333333
RR = -A2B*BP**2
QQ = BP*(A2B-BP-1)
XN = QQ*TH+RR-0.074074
XM = QQ-TH
XNN = XN*XN*0.25
XMM = XM**3 / 27.0
ARG = XNN+XMM
if ARG > 0:
X = np.sqrt(ARG)
F = 1
XN2 = -XN*0.5
iXMM = XN2+X
if iXMM < 0:
F = -1
XMM = F*((F*iXMM)**TH)
F = 1
iXNN = XN2 - X
if iXNN < 0:
F = -1
XNN = F*((F*iXNN)**TH)
Z = XMM+XNN+TH
ZBP = Z-BP
if ZBP < 0.000001:
ZBP = 0.000001
BPZ = 1+BP/Z
FP = Z-1-np.log(ZBP)-A2B*np.log(BPZ)
if FP < -37 or FP > 37:
FP = 0.000001
elif ARG <0:
COSPHI = np.sqrt(-XNN/XMM)
if XN > 0:
COSPHI = -COSPHI
TANPHI = np.sqrt(1-COSPHI**2)/COSPHI
PHI = np.arctan(TANPHI)*TH
FAC = 2*np.sqrt(-XM*TH)
#sort for largest root
R1 = np.cos(PHI)
R2 = np.cos(PHI+2.0944)
R3 = np.cos(PHI+4.18879)
RH = R2
if R1 > R2:
RH = R1
if R3 > RH:
RH = R3
Z = RH*FAC+TH
ZBP = Z-BP
if ZBP < 0.000001:
ZBP = 0.000001
BPZ = 1+BP/Z
FP = Z-1-np.log(ZBP)-A2B*np.log(BPZ)
if FP < -37 or FP > 37:
FP = 0.000001
else:
FP = 1
Z = 1
XLNFP = FP
return XLNFP
def Saxena(self, TK, pb):
"""
High pressure corresponding states routines from Saxena and Fei (1987) GCA
vol. 51, 783-791.
Parameters
----------
TK: float
Temperature in K.
pb: float
Pressure in bars.
Returns
-------
float
XLNF, Natural log of the ratio F(P)/F(4000 bar)
"""
#Define integration limit
PO = 4000
#Critical temperatures and pressures for CO2
TR = TK/304.2
PR = pb/73.9
PC = 73.9
#Virial coeficients
A = 2.0614-2.2351/TR**2 - 0.39411*np.log(TR)
B = 0.055125/TR + 0.039344/TR**2
C = -1.8935*10**(-6)/TR - 1.1092*10**(-5)/TR**2 - 2.1892*10**(-5)/TR**3
D = 5.0527*10**(-11)/TR - 6.3033*10**(-21)/TR**3
#Calculate molar volume
Z = A+B*PR+C*PR**2+D*PR**3
V = Z*83.0117*TK/pb
#integrate from PO (4000 bars) to P to calculate ln fugacity
LNF = A*np.log(pb/PO)+(B/PC)*(pb-PO)+(C/(2*PC**2))*(pb**2-PO**2)
LNF = LNF+(D/(3*PC**3))*(pb**3-PO**3)
XLNF = LNF
return XLNF
def RKCALC(self, temperature, pressure, species):
"""
Calculation of pure gas MRK properties following Holloway 1981, 1987
Parameters
----------
temperature: float
Temperature in degrees K.
pressure: float
Pressure in atmospheres.
Returns
-------
float
Natural log of the fugacity of a pure gas.
"""
#Define constants
R = 82.05736
RR = 6732.2
pb = 1.013*pressure
PBLN = np.log(pb)
TCEL = temperature-273.15
RXT = R*temperature
RT = R*temperature**1.5 * 10**(-6)
if species == 'CO2':
#Calculate T-dependent MRK A parameter CO2
ACO2M = 73.03 - 0.0714*TCEL + 2.157*10**(-5)*TCEL**2
#Define MRK B parameter for CO2
BSUM = 29.7
ASUM = ACO2M / (BSUM*RT)
elif species == 'H2O':
#Calculate T-dependent MRK A parameter H2O
AH2OM = 115.98 - np.double(0.0016295)*temperature - 1.4984*10**(-5)*temperature**2
#Define MRK B parameter for H2O
BSUM = 14.5
ASUM = AH2OM / (BSUM*RT)
BSUM = pressure*BSUM/RXT
XLNFP = self.REDKW(BSUM, ASUM)
#Convert to ln(fugacity)
PUREG = XLNFP + PBLN
return PUREG
def fugacity(self, pressure, temperature, species, **kwargs):
"""
Calculates fugacity.
Parameters
----------
temperature: float
Temperature in degrees C.
pressure: float
Pressure in bars.
species: str
Choose which species to calculate. Options are 'H2O' and 'CO2'.
Returns
-------
float
Fugacity coefficient for passed species
"""
#convert temp and press to atmospheres and Kelvin
pressureAtmo = pressure/1.013
temperatureK = temperature + 273.15
PO = 4000/1.013
#Use the MRK below 4,000 bars, Saxena above 4,000 bars
if pressure > 4000 and species=='CO2':
iPUREG = self.RKCALC(temperatureK, PO, species)
XLNF = self.Saxena(temperatureK, pressure)
PUREG = iPUREG + XLNF
else:
PUREG = self.RKCALC(temperatureK, pressureAtmo, species)
#Convert from ln(fugacity) to fugacity
stdf = np.exp(PUREG)
return stdf
class fugacity_RK_co2(FugacityModel):
"""
Implementation of the Redlich Kwong EoS for CO2.
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[500],crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
# self.set_calibration_ranges([cr_Between('pressure',[1.0,1e5],'bar','Redlich Kwong EOS'),
# cr_GreaterThan('temperature',500,'oC','Redlich Kwong EOS')])
self.RKmodel = fugacity_RedlichKwong()
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
return self.RKmodel.fugacity(pressure, temperature, X_fluid, 'CO2')
class fugacity_RK_h2o(FugacityModel):
"""
Implementation of the Redlich Kwong EoS for H2O.
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500,crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
self.RKmodel = fugacity_RedlichKwong()
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
return self.RKmodel.fugacity(pressure, temperature, X_fluid, 'H2O')
class fugacity_RedlichKwong(FugacityModel):
"""
Implementation of the Redlich Kwong EoS
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500,crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
def gamma(self, pressure, temperature, species):
"""
Calculates fugacity coefficients.
Parameters
----------
temperature: fload
Temperature in degrees C.
pressure: float
Pressure in bars.
species: str
Choose which species to calculate. Options are 'H2O' and 'CO2'.
Returns
-------
float
Fugacity coefficient for passed species.
"""
temperatureK = temperature + 273.15
R = 8.3145
fluid_species_names = ['CO2', 'H2O']
critical_params = {'CO2':{ "cT": 304.15,
"cP": 73.8659,
"o": 0.225
},
'H2O':{ "cT": 647.25,
"cP": 221.1925,
"o": 0.334
}
}
#Calculate a and b parameters (depend only on critical parameters)...
a = 0.42748 * R**2.0 * critical_params[species]["cT"]**(2.5) / (critical_params[species]["cP"] * 10.0**5)
b = 0.08664 * R * critical_params[species]["cT"] / (critical_params[species]["cP"] * 10.0**5)
kappa = 0.0
#Calculate coefficients in the cubic equation of state...
#coeffs: (C0, C1, C2, A, B)
A = a * pressure * 10.0**5 / (np.sqrt(temperatureK) * (R * temperatureK)**2.0)
B = b * pressure * 10.0**5 / (R * temperatureK)
C2 = -1.0
C1 = A - B - B * B
C0 = -A * B
#Solve the cubic equation for Z0 - Z2, D...
Q1 = C2 * C1 / 6.0 - C0 / 2.0 - C2**3.0 / 27.0
P1 = C2**2.0 / 9.0 - C1 / 3.0
D = Q1**2.0 - P1**3.0
if D >= 0:
kOneThird = 1.0 / 3.0
absQ1PSqrtD = np.fabs(Q1 + np.sqrt(D))
temp1 = absQ1PSqrtD**kOneThird
temp1 *= (Q1 + np.sqrt(D)) / absQ1PSqrtD
absQ1MSqrtD = np.fabs(Q1 - np.sqrt(D))
temp2 = absQ1MSqrtD**kOneThird
temp2 *= (Q1 - np.sqrt(D)) / absQ1MSqrtD
Z0 = temp1 + temp2 - C2 / 3.0
else:
temp1 = Q1**2.0 / (P1**3.0)
temp2 = np.sqrt(1.0 - temp1) / np.sqrt(temp1)
temp2 *= Q1 / np.fabs(Q1)
gamma = np.arctan(temp2)
if gamma < 0:
gamma = gamma + np.pi
Z0 = 2.0 * np.sqrt(P1) * np.cos(gamma/3.0) - C2 / 3.0
Z1 = 2.0 * np.sqrt(P1) * np.cos((gamma + 2.0 * np.pi) / 3.0) - C2/3.0
Z2 = 2.0 * np.sqrt(P1) * np.cos((gamma + 4.0 * np.pi) / 3.0) - C2/3.0
if Z0 < Z1:
temp0 = Z0
Z0 = Z1
Z1 = temp0
if Z1 < Z2:
temp0 = Z1
Z1 = Z2
Z2 = temp0
if Z0 < Z1:
temp0 = Z0
Z0 = Z1
Z1 = temp0
#Calculate Departure Functions
gamma = np.exp(Z0 - 1.0 - np.log(Z0-B) - A * np.log(1.0+B/Z0)/B)
Hdep = R * temperatureK * (Z0 - 1.0 - 1.5*A*np.log(1.0+B/Z0)/B)
Sdep = R * (np.log(Z0-B) - 0.5*A*np.log(1.0+B/Z0)/B)
return gamma
def fugacity(self, pressure, temperature, X_fluid=1.0, species='H2O', **kwargs):
"""
Calculates the fugacity of H2O in a mixed H2O-CO2 fluid using the universal relationships:
P_i = f_i/gamma_i = (fpure_i * Xfluid_i) / gamma_i
See Iacovino (2015) EPSL for further explanation.
"""
gammaH2O = self.gamma(pressure, temperature, 'H2O')
gammaCO2 = self.gamma(pressure, temperature, 'CO2')
fugacityH2Opure = pressure * gammaH2O
fugacityCO2pure = pressure * gammaCO2
if species == 'H2O':
return fugacityH2Opure * X_fluid
elif species == 'CO2':
return fugacityCO2pure * X_fluid
else:
raise InputError("Species must be H2O or CO2.")
#---------------ACTVITY MODELS-------------------------------#
class activity_idealsolution(activity_model):
""" Implements an ideal solution activity model, i.e. it
will always return the mole fraction.
"""
def activity(self,X):
""" The activity of the component in an ideal solution, i.e., it
will return the mole fraction.
Parameters
----------
X float
The mole fraction of the species in the solution.
Returns
-------
float
The activity of the species in the solution, i.e., the mole fraction.
"""
return X
#------------PURE FLUID MODELS-------------------------------#
class ShishkinaCarbon(Model):
""" Implementation of the Shishkina et al. (2014) carbon solubility model, as a Model class.
"""
def __init__(self):
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[500.0,5000.0],crf_Between,'bar','Shishkina et al. carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[1200.0,1250.0],crf_Between,'oC','Shishkina et al. carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def preprocess_sample(self,sample):
""" Returns sample, unmodified. The Pi* compositional parameter is a ratio of cations,
therefore the value is not affected by the normalization of the sample. Shishkina et al.
imply the accuracy of the calculations are little affected whether Fe(tot) or Fe2+ is
used.
Parameters
----------
sample: dict or pandas Series
The major element oxides in wt%.
Returns
-------
dict or pandas Series
The major element oxides in wt%.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
return sample
def PiStar(self,sample):
"""Shishkina et al. (2014) Eq (11)
Calculates the Pi* parameter for use in calculating CO2 solubility.
Parameters
----------
sample: pandas Series or dict
Major element oxides in wt%.
Returns
-------
float
The value of the Pi* compositional parameter.
"""
_mols = wtpercentOxides_to_molCations(sample)
if all(cation in _mols for cation in ['Ca','K','Na','Mg','Fe','Si','Al']) == False:
raise InputError("To calculate PiStar, values for CaO, K2O, Na2O, MgO, FeO, SiO2, and Al2O3\
must be provided in sample.")
_pi = (_mols['Ca'] + 0.8*_mols['K'] + 0.7*_mols['Na'] + 0.4*_mols['Mg'] + 0.4*_mols['Fe'])/\
(_mols['Si']+_mols['Al'])
return _pi
def calculate_dissolved_volatiles(self,pressure,sample,X_fluid=1,**kwargs):
""" Calculates the dissolved CO2 concentration in wt%, using equation (13) of Shishkina et al. (2014).
Parameters
----------
pressure: float
(Total) pressure in bars.
sample: dict or pandas Series
Major element concentrations in wt%. Normalization does not matter.
X_fluid: float
The mol-fraction of the fluid that is CO2. Default is 1, i.e. a pure CO2 fluid.
Returns
-------
float
The dissolved CO2 concentration in wt%.
"""
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure < 0:
raise InputError("pressure must be a positive value.")
PiStar = self.PiStar(sample)
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,**kwargs)
A = 1.150
B = 6.71
C= -1.345
if fugacity == 0:
return 0
else:
return np.exp(A*np.log(fugacity/10)+B*PiStar+C)/1e4
def calculate_equilibrium_fluid_comp(self,pressure,sample,**kwargs):
""" Returns 1.0 if a pure CO2 fluid is saturated.
Returns 0.0 if a pure CO2 fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
sample dict or pandas Series
Major element oxides in wt%
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
if self.calculate_saturation_pressure(sample=sample,**kwargs) < pressure:
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,sample,**kwargs):
""" Calculates the pressure at which a pure CO2 fluid is saturated, for the given
sample composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated calls to the calculate_dissolved_volatiles method.
Parameters
----------
sample dict or pandas Series
Major elements in wt%, including CO2 (also in wt%).
Returns
-------
float
Saturation pressure in bar
"""
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0:
raise InputError("CO2 concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,bracket=[1e-15,1e5],args=(sample,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
sample dict or pandas Series
Major element oxides in wt%, including CO2 (also in wt%).
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved CO2 at the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,sample=sample,**kwargs)-sample['CO2']
class ShishkinaWater(Model):
""" Implementation of the Shishkina et al. (2014) H2O solubility model as a Model class.
"""
def __init__(self):
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[500.0,5000.0],crf_Between,'bar','Shishkina et al. water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[1200.0,1250.0],crf_Between,'oC','Shishkina et al. water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def preprocess_sample(self,sample):
""" Returns sample, renormlized so that the major element oxides (excluding volatiles) sum to 100%.
Normalization must be done this way as the compositional dependence of the solubility takes the
mole fractions of Na2O and K2O as inputs, presumably assuming no volatiles in the bulk composition.
Volatile concentrations are left unchanged.
Parameters
----------
sample: dict or pandas Series
The major element oxides in wt%.
Returns
-------
dict or pandas Series
The major element oxides in wt%.
"""
return normalize_AdditionalVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the dissolved H2O concentration using Eqn (9) of Shishkina et al. (2014).
Parameters
----------
pressure float
Total pressure in bars
sample pandas Series or dict
Major element oxides in wt%. Normalized to zero-volatiles so that the total-alkalis
mol fraction can be determined accurately.
X_fluid float
The mol fraction of H2O in the fluid
Returns
-------
float
The H2O concentration in wt%
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or pandas Series.")
if all(ox in sample for ox in ['Na2O','K2O']) == False:
raise InputError("Na2O and K2O must be present in sample.")
if pressure < 0:
raise InputError("Pressure must be positive.")
_mols = wtpercentOxides_to_molCations(sample)
_mol_volatiles = 0
if 'H' in _mols:
_mol_volatiles += _mols['H']
if 'C' in _mols:
_mol_volatiles += _mols['C']
total_alkalis = (_mols['Na'] + _mols['K'])/(1-_mol_volatiles)
fugacity = self.fugacity_model.fugacity(pressure,X_fluid=X_fluid,**kwargs)
a = 3.36e-7 * (fugacity/10)**3 - 2.33e-4*(fugacity/10)**2 + 0.0711*(fugacity/10) - 1.1309
b = -1.2e-5*(fugacity/10)**2 + 0.0196*(fugacity/10)+1.1297
return a*total_alkalis + b
def calculate_equilibrium_fluid_comp(self,pressure,sample,**kwargs):
""" Returns 1.0 if a pure H2O fluid is saturated.
Returns 0.0 if a pure H2O fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
sample pandas Series or dict
Major element oxides in wt%, normalized on the basis of
no volatiles.
Returns
-------
float
1.0 if H2O-fluid saturated, 0.0 otherwise.
"""
if self.calculate_saturation_pressure(sample=sample,**kwargs) < pressure:
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,sample,**kwargs):
""" Calculates the pressure at which a pure H2O fluid is saturated, for the given
sample composition and H2O concentration. Calls the scipy.root_scalar routine, which makes
repeated calls to the calculate_dissolved_volatiles method.
Parameters
----------
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O (also in wt%, not included
in normalization).
Returns
-------
float
Saturation pressure in bar
"""
if 'H2O' not in sample:
raise InputError("sample must contain H2O")
if sample['H2O'] < 0:
raise InputError("H2O concentration must be greater than 0 wt%.")
if sample['H2O'] < self.calculate_dissolved_volatiles(sample=sample,pressure=0,**kwargs):
return np.nan
try:
satP = root_scalar(self.root_saturation_pressure,bracket=[1e-15,1e5],args=(sample,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O (also in wt%, not included
in normalization).
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,sample=sample,**kwargs)-sample['H2O']
class DixonCarbon(Model):
"""
Implementation of the Dixon (1997) carbon solubility model, as a Model class.
"""
def __init__(self):
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_MRK_co2())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([])
self.set_solubility_dependence(False)
def preprocess_sample(self,sample):
""" Returns sample, normalized, keep volatiles unchanged.
Parameters
----------
sample: pandas Series or dict
The major element oxides in wt%.
Returns
-------
pandas Series or dict
The major element oxides in wt%.
"""
return normalize_FixedVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the dissolved CO2 concentration using Eqn (3) of Dixon (1997).
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
The mol fraction of CO2 in the fluid.
Returns
-------
float
The CO2 concentration in wt%.
"""
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure < 0:
raise InputError("Pressure must be positive.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or pandas Series")
if 'SiO2' not in sample:
raise InputError("sample must contain SiO2.")
if pressure == 0:
return 0
Mr = wtpercentOxides_to_formulaWeight(sample)
XCO3 = self.molfrac_molecular(pressure=pressure,sample=sample,X_fluid=X_fluid,**kwargs)
# return (4400 * XCO3) / (36.6 - 44*XCO3)
return (4400 * XCO3) / (Mr - 44*XCO3)
def calculate_equilibrium_fluid_comp(self,pressure,sample,**kwargs):
""" Returns 1.0 if a pure H2O fluid is saturated.
Returns 0.0 if a pure H2O fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
sample pandas Series or dict
Major element oxides in wt% (including CO2).
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
if self.calculate_saturation_pressure(sample=sample,**kwargs) < pressure:
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a pure CO2 fluid is saturated, for the given sample
composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including CO2).
X_fluid float
The mole fraction of CO2 in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0:
raise InputError("Dissolved CO2 concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,x0=100.0,x1=1000.0,args=(sample,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def molfrac_molecular(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the mole fraction of CO3(-2) dissolved when in equilibrium with
a pure CO2 fluid at 1200C, using Eqn (1) of Dixon (1997).
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
Mole fraction of CO3(2-) dissolved."""
DeltaVr = 23.14 #cm3 mole-1
P0 = 1
R = 83.15
T0 = 1473.15
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,**kwargs)
XCO3Std = self.XCO3_Std(sample)
return XCO3Std * fugacity * np.exp(-DeltaVr * (pressure-P0)/(R*T0))
def XCO3_Std(self,sample):
""" Calculates the mole fraction of CO3(2-) dissolved when in equilibrium with pure
CO2 vapour at 1200C and 1 bar, using Eq (8) of Dixon (1997).
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
float
Mole fraction of CO3(2-) dissolved at 1 bar and 1200C.
"""
if sample['SiO2'] > 48.9:
return 3.817e-7
else:
return 8.697e-6 - 1.697e-7*sample['SiO2']
def root_saturation_pressure(self,pressure,sample,kwargs):
""" The function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt% (including CO2).
Returns
-------
float
The difference between the dissolved CO2 the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,sample=sample,**kwargs) - sample['CO2']
class DixonWater(Model):
"""
Implementation of the Dixon (1997) water solubility model, as a Model class.
"""
def __init__(self):
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_MRK_h2o())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([])
self.set_solubility_dependence(False)
def preprocess_sample(self,sample):
""" Returns sample, normalized, holding volatile concentrations constant.
Parameters
----------
sample: pandas Series or dict
The major element oxides in wt%.
Returns
-------
pandas Series or dict
The major element oxides in wt%.
"""
return normalize_FixedVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the dissolved H2O concentration using Eqns (5) and (6) of Dixon (1997).
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
The mol fraction of H2O in the fluid.
Returns
-------
float
The H2O concentration in wt%.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'SiO2' not in sample:
raise InputError("sample must contain SiO2.")
if pressure < 0:
raise InputError("Pressure must be positive")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure == 0:
return 0
XH2O = self.molfrac_molecular(pressure=pressure,sample=sample,X_fluid=X_fluid,**kwargs)
XOH = self.XOH(pressure=pressure,sample=sample,X_fluid=X_fluid,**kwargs)
Mr = wtpercentOxides_to_formulaWeight(sample)
XB = XH2O + 0.5*XOH
# return 1801.5*XB/(36.6-18.6*XB)
return 1801.5*XB/(Mr-18.6*XB)
def calculate_equilibrium_fluid_comp(self,pressure,sample,**kwargs):
""" Returns 1.0 if a pure H2O fluid is saturated.
Returns 0.0 if a pure H2O fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if H2O-fluid saturated, 0.0 otherwise.
"""
if self.calculate_saturation_pressure(sample=sample,**kwargs) < pressure:
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a pure H2O fluid is saturated, for the given sample
composition and H2O concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including H2O).
X_fluid float
The mole fraction of H2O in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if 'H2O' not in sample:
raise InputError("sample must contain H2O")
if sample['H2O'] < 0:
raise InputError("H2O concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,x0=100.0,x1=1000.0,args=(sample,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def molfrac_molecular(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the mole fraction of molecular H2O dissolved when in equilibrium with
a pure H2O fluid at 1200C, using Eqn (2) of Dixon (1997).
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
Mole fraction of molecular H2O dissolved.
"""
VH2O = 12 #cm3 mole-1
P0 = 1
R = 83.15
T0 = 1473.15
XH2OStd = self.XH2O_Std(sample)
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,**kwargs)
return XH2OStd * fugacity * np.exp(-VH2O * (pressure-P0)/(R*T0))
def XH2O_Std(self,sample):
""" Calculates the mole fraction of molecular H2O dissolved when in equilibrium with pure
H2O vapour at 1200C and 1 bar, using Eq (9) of Dixon (1997).
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
float
Mole fraction of molecular water dissolved at 1 bar and 1200C.
"""
if sample['SiO2'] > 48.9:
return 3.28e-5
else:
return -3.04e-5 + 1.29e-6*sample['SiO2']
def XOH(self,pressure,sample,X_fluid=1.0,**kwargs):
"""
Calculates the mole fraction of hydroxyl groups dissolved by solving Eq (4) of
Dixon (1997). Calls scipy.root_scalar to find the root of the XOH_root method.
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
Mole fraction of hydroxyl groups dissolved.
"""
XH2O = self.molfrac_molecular(pressure=pressure,sample=sample,X_fluid=X_fluid,**kwargs)
if XH2O < 1e-14:
return 0
return np.exp(root_scalar(self.XOH_root,x0=np.log(0.5),x1=np.log(0.1),args=(XH2O)).root)
def XOH_root(self,XOH,XH2O):
"""
Method called by scipy.root_scalar when finding the saturation pressure using the
calculate_saturation_pressure method. Implements Eq (4) of Dixon (1997).
Parameters
----------
XOH float
Guess for the mole fraction of hydroxyl groups dissolved in melt.
XH2O float
Mole fraction of molecular water dissolved in melt.
Returns
-------
float
The difference between the RHS and LHS of Eq (4) of Dixon (1997) for the
guessed value of XOH.
"""
A = 0.403
B = 15.333
C = 10.894
XOH = np.exp(XOH)
term = (XOH)**2.0/(XH2O*(1.0-XOH-XH2O))
lhs = - np.log(term)
rhs = A + B*XOH + C*XH2O
return rhs - lhs
def root_saturation_pressure(self,pressure,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,sample=sample,**kwargs) - sample['H2O']
class IaconoMarzianoWater(Model):
"""
Implementation of the Iacono-Marziano et al. (2012) water solubility model, as a Model class. Two
calibrations are provided- the one incorporating the H2O content as a parameter (hydrous), and the
one that does not (anhydrous). Specify which should be used when initialising the model, with the
bool variable hydrous.
"""
def __init__(self,hydrous=True):
"""
Initialise the model.
Parameters
----------
hydrous bool
Whether to use the hydrous parameterization, or not.
"""
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.hydrous = hydrous
self.set_calibration_ranges([])
self.set_solubility_dependence(False) #Not dependent on CO2 conc, H2O dependence dealt with within model.
def preprocess_sample(self,sample):
"""
Returns sample, normalized to 100 wt%, without changing the wt% of H2O and CO2 if the
hydrous parameterization is being used (default). If the anhydrous parameterization is
used, it will normalize without including H2O and CO2.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
pandas Series or dict
Major element oxides normalized to wt%.
"""
if self.hydrous == True:
return normalize_FixedVolatiles(sample)
else:
return normalize_AdditionalVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,temperature,sample,X_fluid=1.0,
hydrous_coeffs=True,webapp_coeffs=False,**kwargs):
"""
Calculates the dissolved H2O concentration, using Eq (13) of Iacono-Marziano et al. (2012).
If using the hydrous parameterization, it will use the scipy.root_scalar routine to find the
root of the root_dissolved_volatiles method.
Parameters
----------
pressure float
Total pressure in bars.
temperature float
Temperature in C
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid. Default is 1.0.
hydrous_coeffs bool
Use the hydrous or anhydrous NBO/O paramterisation (True for hydrous). Default is True.
webapp_coeffs bool
If True, use the pre-review hydrous coefficients, as implemented in the IM webapp.
Default is False.
Returns
-------
float
Dissolved H2O concentration in wt%.
"""
temperature = temperature + 273.15 #translate T from C to K
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if pressure < 0:
raise InputError("Pressure must be positive.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure == 0:
return 0
if hydrous_coeffs == True:
if X_fluid==0:
return 0
H2O = root_scalar(self.root_dissolved_volatiles,args=(pressure,temperature,sample,X_fluid,hydrous_coeffs,kwargs),
x0=1.0,x1=2.0).root
return H2O
else:
a = 0.54
b = 1.24
B = -2.95
C = 0.02
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,temperature=temperature-273.15,**kwargs)
if fugacity == 0:
return 0
NBO_O = self.NBO_O(sample=sample,hydrous_coeffs=False)
H2O = np.exp(a*np.log(fugacity) + b*NBO_O + B + C*pressure/temperature)
return H2O
def calculate_equilibrium_fluid_comp(self,pressure,temperature,sample,**kwargs):
""" Returns 1.0 if a pure H2O fluid is saturated.
Returns 0.0 if a pure H2O fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if H2O-fluid saturated, 0.0 otherwise.
"""
if pressure > self.calculate_saturation_pressure(temperature=temperature,sample=sample,**kwargs):
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,temperature,sample,**kwargs):
"""
Calculates the pressure at which a pure H2O fluid is saturated, for the given sample
composition and H2O concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
X_fluid float
The mole fraction of H2O in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'H2O' not in sample:
raise InputError("sample must contain H2O.")
if sample['H2O'] < 0.0:
raise InputError("Dissolved H2O must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,sample,kwargs),
bracket=[1e-15,1e5]).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,temperature,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return sample['H2O'] - self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,**kwargs)
def root_dissolved_volatiles(self,h2o,pressure,temperature,sample,X_fluid,webapp_coeffs,kwargs):
""" Function called by calculate_dissolved_volatiles method when the hydrous parameterization is
being used.
Parameters
----------
h2o float
Guess for the H2O concentration in wt%.
pressure float
Total pressure in bars.
temperature float
Temperature in K.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid.
kwargs dictionary
Keyword arguments
Returns
-------
float
Difference between H2O guessed and the H2O calculated.
"""
if webapp_coeffs == False:
a = 0.53
b = 2.35
B = -3.37
C = -0.02
else:
a = 0.52096846
b = 2.11575907
B = -3.24443335
C = -0.02238884
sample_copy = sample.copy()
sample_copy['H2O'] = h2o
NBO_O = self.NBO_O(sample=sample_copy,hydrous_coeffs=True)
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,temperature=temperature-273.15,**kwargs)
return h2o - np.exp(a*np.log(fugacity) + b*NBO_O + B + C*pressure/temperature)
def NBO_O(self,sample,hydrous_coeffs=True):
"""
Calculates NBO/O according to Appendix A.1. of Iacono-Marziano et al. (2012). NBO/O
is calculated on either a hydrous or anhyrous basis, as set when initialising the
Model class.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including H2O if using the hydrous parameterization).
Returns
-------
float
NBO/O.
"""
if all(ox in sample for ox in ['K2O','Na2O','CaO','MgO','FeO','Al2O3','SiO2','TiO2','Al2O3']) == False:
raise InputError("sample must contain K2O, Na2O, CaO, MgO, FeO, Al2O3, SiO2, TiO2 and Al2O3.")
X = wtpercentOxides_to_molOxides(sample)
NBO = 2*(X['K2O']+X['Na2O']+X['CaO']+X['MgO']+X['FeO']-X['Al2O3'])
O = 2*X['SiO2']+2*X['TiO2']+3*X['Al2O3']+X['MgO']+X['FeO']+X['CaO']+X['Na2O']+X['K2O']
if hydrous_coeffs == True:
if 'H2O' not in sample:
raise InputError("sample must contain H2O.")
NBO = NBO + 2*X['H2O']
O = O + X['H2O']
return NBO/O
class IaconoMarzianoCarbon(Model):
"""
Implementation of the Iacono-Marziano et al. (2012) carbon solubility model, as a Model class. Two
calibrations are provided- the one incorporating the H2O content as a parameter (hydrous), and the
one that does not (anhydrous). Specify which should be used when initialising the model, with the
bool variable hydrous.
"""
def __init__(self):
"""
Initialise the model.
Parameters
----------
hydrous bool
Whether to use the hydrous parameterization, or not.
"""
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([])
self.set_solubility_dependence(True)
def preprocess_sample(self,sample):
"""
Returns sample, normalized to 100 wt%, without changing the wt% of H2O and CO2 if the
hydrous parameterization is being used (default). If the anhydrous parameterization is
used, it will normalize without including H2O and CO2.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
pandas Series or dict
Major element oxides normalized to wt%.
"""
if self.hydrous == True:
return normalize_FixedVolatiles(sample)
else:
return normalize_AdditionalVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,temperature,sample,X_fluid=1,
hydrous_coeffs=True, **kwargs):
"""
Calculates the dissolved CO2 concentration, using Eq (12) of Iacono-Marziano et al. (2012).
If using the hydrous parameterization, it will use the scipy.root_scalar routine to find the
root of the root_dissolved_volatiles method.
Parameters
----------
pressure float
Total pressure in bars.
temperature float
Temperature in C
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid. Default is 1.0.
hydrous_coeffs bool
Use the hydrous or anhydrous NBO/O paramterisation (True for hydrous). Default is True.
Returns
-------
float
Dissolved H2O concentration in wt%.
"""
temperature = temperature + 273.15 #translate T from C to K
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if pressure < 0:
raise InputError("Pressure must be positive.")
if temperature <= 0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure == 0:
return 0
if hydrous_coeffs == True:
if 'H2O' not in sample:
raise InputError("sample must contain H2O if using the hydrous parameterization.")
if sample['H2O'] < 0:
raise InputError("Dissolved H2O must be positive.")
im_h2o_model = IaconoMarzianoWater()
h2o = im_h2o_model.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature-273.15,
sample=sample,X_fluid=1-X_fluid,**kwargs)
sample_h2o = sample.copy()
sample_h2o['H2O'] = h2o
d = np.array([-16.4,4.4,-17.1,22.8])
a = 1.0
b = 17.3
B = -6.0
C = 0.12
NBO_O = self.NBO_O(sample=sample_h2o,hydrous_coeffs=True)
molarProps = wtpercentOxides_to_molOxides(sample_h2o)
else:
d = np.array([2.3,3.8,-16.3,20.1])
a = 1.0
b = 15.8
B = -5.3
C = 0.14
NBO_O = self.NBO_O(sample=sample,hydrous_coeffs=False)
molarProps = wtpercentOxides_to_molOxides(sample)
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,temperature=temperature-273.15,**kwargs)
if fugacity == 0:
return 0
if all(ox in molarProps for ox in ['Al2O3','CaO','K2O','Na2O','FeO','MgO','Na2O','K2O']) == False:
raise InputError("sample must contain Al2O3, CaO, K2O, Na2O, FeO, MgO, Na2O, and K2O.")
x = list()
if 'H2O' in molarProps:
x.append(molarProps['H2O'])
else:
x.append(0.0)
x.append(molarProps['Al2O3']/(molarProps['CaO']+molarProps['K2O']+molarProps['Na2O']))
x.append((molarProps['FeO']+molarProps['MgO']))
x.append((molarProps['Na2O']+molarProps['K2O']))
x = np.array(x)
CO3 = np.exp(np.sum(x*d) + a*np.log(fugacity) + b*NBO_O + B + C*pressure/temperature)
CO2 = CO3/1e4#/(12+16*3)*(12+16*2)/1e4
return CO2
def calculate_equilibrium_fluid_comp(self,pressure,temperature,sample,**kwargs):
""" Returns 1.0 if a pure CO2 fluid is saturated.
Returns 0.0 if a pure CO2 fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
if pressure > self.calculate_saturation_pressure(temperature=temperature,sample=sample,**kwargs):
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,temperature,sample,**kwargs):
"""
Calculates the pressure at which a pure CO2 fluid is saturated, for the given sample
composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including CO2).
Returns
-------
float
Calculated saturation pressure in bars.
"""
if temperature <= 0:
raise InputError("Temperature must be greater than 0K.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2")
if sample['CO2'] < 0:
raise InputError("Dissolved CO2 must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,sample,kwargs),
bracket=[1e-15,1e5]).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,temperature,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including CO2.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved CO2 at the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return sample['CO2'] - self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,**kwargs)
def NBO_O(self,sample,hydrous_coeffs=True):
"""
Calculates NBO/O according to Appendix A.1. of Iacono-Marziano et al. (2012). NBO/O
is calculated on either a hydrous or anhyrous basis, as set when initialising the
Model class.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including H2O if using the hydrous parameterization).
Returns
-------
float
NBO/O.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series,")
if all(ox in sample for ox in ['K2O','Na2O','CaO','MgO','FeO','Al2O3','SiO2','TiO2']) == False:
raise InputError("sample must contain K2O, Na2O, CaO, MgO, FeO, Al2O3, SiO2, and TiO2.")
X = wtpercentOxides_to_molOxides(sample)
NBO = 2*(X['K2O']+X['Na2O']+X['CaO']+X['MgO']+X['FeO']-X['Al2O3'])
O = 2*X['SiO2']+2*X['TiO2']+3*X['Al2O3']+X['MgO']+X['FeO']+X['CaO']+X['Na2O']+X['K2O']
if hydrous_coeffs == True:
if 'H2O' not in X:
raise InputError("sample must contain H2O if using the hydrous parameterization.")
NBO = NBO + 2*X['H2O']
O = O + X['H2O']
return NBO/O
class EguchiCarbon(Model):
"""
Implementation of the Eguchi and Dasgupta (2018) CO2 solubility model for andesitic melts.
Uses the Zhang and Duan (2009) CO2 EOS for fugacity calculations, assuming a pure CO2 fluid,
or ideal mixing for mixed fluids.
"""
def __init__(self):
warnings.warn("Eguchi model is not working correctly. Do not use any results calculated.")
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_ZD09_co2())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[500.0,50000.0],crf_Between,'bar','Eguchi & Dasgupta (2018) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[950.0,1600],crf_Between,'oC','Eguchi & Dasgupta (2018) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def preprocess_sample(self,sample,ferric_total=0.15):
""" Returns normalized sample composition, with ferric iron. Where a sample
already contains ferric iron, the composition will be normalized to 100 wt%
(excluding H2O and CO2). Where a sample contains only FeO, ferric iron will
be calculated using the ferric/total iron ratio provided.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
ferric_total float
Mole ratio of ferric to total iron to be used
for calculating Fe2O3 and FeO when only FeO is
provided. Default is 0.15.
Returns
-------
pandas Series or dict
Normalized major element oxides in wt%.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'FeO' not in sample:
raise InputError("sample must contain FeO.")
_sample = sample.copy()
for ox in ['TiO2','P2O5']:
if ox not in _sample:
_sample[ox] = 0.0
if 'Fe2O3' not in _sample:
Fe_t = _sample['FeO']/oxideMass['FeO']
Fe3 = ferric_total*Fe_t
Fe2 = Fe_t - Fe3
_sample['FeO'] = Fe2*oxideMass['FeO']
_sample['Fe2O3'] = Fe3*oxideMass['Fe2O3']/2
return normalize_AdditionalVolatiles(_sample)
def calculate_dissolved_volatiles(self,pressure,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the dissolved (total) CO2 using eqs (9) and (10) of Eguchi and Dasgupta (2018).
Parameters
----------
pressure float
Pressure in bars
temperature float
Temperature in C
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
The mole fraction of CO2 in the fluid.
Returns
-------
float
Dissolved CO2 concentration.
"""
if pressure < 0:
raise InputError("Pressure must be greater than 0 bar.")
if pressure == 0:
return 0
XCO3 = self.Xi_melt(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,species='CO3')
XCO2 = self.Xi_melt(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,species='CO2')
FW_one = wtpercentOxides_to_formulaWeight(sample)
CO2_CO2 = ((44.01*XCO2)/(44.01*XCO2+(1-(XCO2+XCO3))*FW_one))*100
CO2_CO3 = ((44.01*XCO3)/(44.01*XCO3+(1-(XCO2+XCO3))*FW_one))*100
return CO2_CO2 + CO2_CO3
def calculate_equilibrium_fluid_comp(self,pressure,temperature,sample,**kwargs):
""" Returns 1.0 if a pure CO2 fluid is saturated.
Returns 0.0 if a pure CO2 fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
satP = self.calculate_saturation_pressure(temperature=temperature,sample=sample,X_fluid=1.0,**kwargs)
if pressure < satP:
return 1.0
else:
return 0.0
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a pure CO2 fluid is saturated, for the given sample
composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including CO2).
X_fluid float
The mole fraction of H2O in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0.0:
raise InputError("Concentration of CO2 must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,x0=1000.0,x1=2000.0,
args=(temperature,sample,X_fluid,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including CO2.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved CO2 at the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return sample['CO2'] - self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs)
def Xi_melt(self,pressure,temperature,sample,species,X_fluid=1.0,**kwargs):
"""
Calculates the mole fraction of dissolved molecular CO2 or carbonate CO3(2-), using
eqn (9) of Eguchi and Dasgupta (2018).
Parameters
----------
pressure float
Pressure in bars.
temperature float
Temperature in C.
sample pandas Series or dict
Major element oxides in wt%.
species str
Which species to calculate, molecular CO2 'CO2' or carbonate ion 'CO3'.
X_fluid float
The mole fraction of CO2 in the fluid. Default is 1.0.
Returns
-------
float
Mole fraction of selected species in the melt
"""
temperature = temperature + 273.15 #translate T from C to K
if all(ox in sample for ox in ['MgO','CaO','FeO','Na2O','K2O','MnO','Al2O3','Fe2O3','SiO2','TiO2','P2O5']) == False:
raise InputError("sample must contain MgO, CaO, FeO, Na2O, K2O, MnO, Al2O3, Fe2O3, SiO3, TiO2, and P2O5.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure < 0:
raise InputError("Pressure must be positive.")
if temperature <= 0:
raise InputError("Temperature must be greater than 0K.")
if species == 'CO3':
DH = -1.65e5
DV = 2.38e-5
DS = -43.64
B = 1.47e3
yNBO = 3.29
A_CaO = 1.68e5
A_Na2O = 1.76e5
A_K2O = 2.11e5
elif species == 'CO2':
DH = -9.02e4
DV = 1.92e-5
DS = -43.08
B = 1.12e3
yNBO = -7.09
A_CaO = 0
A_Na2O = 0
A_K2O = 0
else:
raise InputError("species variable must be either 'CO2' or 'CO3'.")
R = 8.314
# Calculate NBO term
cations = wtpercentOxides_to_molSingleO(sample)
oxides = wtpercentOxides_to_molOxides(sample)
NM = (cations['Mg'] + cations['Ca'] + cations['Fe'] + cations['Na'] +
cations['K'] + cations['Mn'])
Al = cations['Al'] - NM
if Al > 0:
Al = NM
else:
Al = cations['Al']
Fe = cations['Fe3'] + Al
if Al > 0:
Fe = 0
if Al < 0 and Fe > 0:
Fe = - Al
if Al < 0 and Fe < 0:
Fe = cations['Fe3']
Tet = cations['Si'] + cations['Ti'] + cations['P'] + Al + Fe
NBO = 2 - 4*Tet
lnfCO2 = np.log(self.fugacity_model.fugacity(pressure=pressure,temperature=temperature-273.15,X_fluid=X_fluid))
lnXi = ((DH/(R*temperature)-(pressure*1e5*DV)/(R*temperature)+DS/R) +
(A_CaO*oxides['CaO']+A_Na2O*oxides['Na2O']+A_K2O*oxides['K2O'])/(R*temperature) +
(B*lnfCO2/temperature) + yNBO*NBO
)
return np.exp(lnXi)
class MooreWater(Model):
"""
Implementation of the Moore et al. (1998) H2O solubility model for magmas up to 3,000 bars.
"""
def __init__(self):
"""
Initialize the model.
"""
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_HB_h2o())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[1.0,3000.0],crf_Between,'bar','Moore et al. (1998) water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[700.0,1200],crf_Between,'oC','Moore et al. (1998) water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
# self.set_calibration_ranges([cr_Between('pressure',[1.0,3000.0],'bar','Moore et al. (1998) water'),
# cr_Between('temperature',[700.0+273.15,1200+273.15],'oC','Moore et al. (1998) water')])
def preprocess_sample(self, sample):
"""
Returns sample with extranneous (non oxide) information removed and any missing oxides given a value of 0.0.
"""
for oxide in oxides:
if oxide in sample.keys():
pass
else:
sample[oxide] = 0.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
self.bulk_comp_orig = sample
return bulk_comp
def calculate_dissolved_volatiles(self, sample, pressure, temperature, X_fluid=1.0, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1.0. Mole fraction of H2O in the H2O-CO2 fluid.
Returns
-------
float
Calculated dissolved H2O concentration in wt%.
"""
_sample = sample.copy()
_sample['H2O'] = 0.0
_sample['CO2'] = 0.0
_sample = normalize(_sample)
fH2O = self.fugacity_model.fugacity(pressure=pressure,temperature=temperature,X_fluid=X_fluid,**kwargs)
aParam = 2565.0
bParam_Al2O3 = -1.997
bParam_FeOt = -0.9275
bParam_Na2O = 2.736
cParam = 1.171
dParam = -14.21
temperatureK = temperature + 273.15
sample_molfrac = wtpercentOxides_to_molOxides(_sample)
FeOtot = sample_molfrac['FeO'] + sample_molfrac['Fe2O3']*0.8998
b_x_sum = (bParam_Al2O3 * sample_molfrac['Al2O3']) + (bParam_FeOt * FeOtot) + (bParam_Na2O * sample_molfrac['Na2O'])
two_ln_XH2Omelt = (aParam / temperatureK) + b_x_sum * (pressure/temperatureK) + cParam * np.log(fH2O) + dParam
ln_XH2Omelt = two_ln_XH2Omelt / 2.0
XH2Omelt = np.exp(ln_XH2Omelt)
sample_molfrac['H2O'] = XH2Omelt
#Normalize mol fractions to sum to 1, while preserving XH2O
for key, value in sample_molfrac.items():
if key != 'H2O':
sample_molfrac.update({key: value/((1/(1-sample_molfrac['H2O'])))})
sample_wtper = mol_to_wtpercent(sample_molfrac)
return sample_wtper['H2O']
def calculate_equilibrium_fluid_comp(self, sample, pressure, temperature, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
Returns
-------
float
Calculated equilibrium fluid concentration in XH2Ofluid mole fraction.
"""
_sample = sample.copy()
sample_anhy = sample.copy()
sample_anhy["H2O"] = 0.0
sample_anhy["CO2"] = 0.0
aParam = 2565.0
bParam_Al2O3 = -1.997
bParam_FeOt = -0.9275
bParam_Na2O = 2.736
cParam = 1.171
dParam = -14.21
temperatureK = temperature + 273.15
sample_molfrac_anhy = wtpercentOxides_to_molOxides(sample_anhy)
sample_molfrac_hy = wtpercentOxides_to_molOxides(_sample)
FeOtot = sample_molfrac_anhy['FeO'] + sample_molfrac_anhy['Fe2O3']*0.8998
b_x_sum = (bParam_Al2O3 * sample_molfrac_anhy['Al2O3']) + (bParam_FeOt * FeOtot) + (bParam_Na2O * sample_molfrac_anhy['Na2O'])
ln_fH2O = (2 * np.log(sample_molfrac_hy['H2O']) - (aParam/temperatureK) - b_x_sum * (pressure/temperatureK) - dParam) / cParam
fH2O = np.exp(ln_fH2O)
XH2O_fl = fH2O / pressure
# SM: I've changed this to return X_H2O only, as otherwise it doesn't conform to other single-volatile
# models. I'm not sure this is the best solution though.
# return (XCO2_fl, XH2O_fl)
return XH2O_fl
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a an H2O-bearing fluid is saturated. Calls the scipy.root_scalar
routine, which makes repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample dict
Composition of sample in wt% oxides.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1.0. Mole fraction of H2O in the H2O-CO2 fluid.
Returns
-------
float
Calculated saturation pressure in bars.
"""
_sample = sample.copy()
temperatureK = temperature + 273.15
if temperatureK <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'H2O' not in sample:
raise InputError("sample must contain H2O.")
if sample['H2O'] < 0.0:
raise InputError("Dissolved H2O concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,_sample,X_fluid,kwargs),
x0=100.0,x1=2000.0).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs) - sample['H2O']
class LiuWater(Model):
"""
Implementation of the Liu et al. (2005) H2O solubility model for metaluminous high-silica rhyolitic melts.
"""
def __init__(self):
"""
Initialize the model.
"""
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[1.0,5000.0],crf_Between,'bar','Liu et al. (2005) water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[700.0,1200],crf_Between,'oC','Liu et al. (2005) water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('sample',None,crf_LiuComp,None,None,
fail_msg=crmsg_LiuComp_fail, pass_msg=crmsg_LiuComp_pass, description_msg=crmsg_LiuComp_description)])
# self.set_calibration_ranges([cr_Between('pressure',[1.0,5000.0],'bar','Liu et al. (2005) water'),
# cr_Between('temperature',[700.0,1200],'oC','Liu et al. (2005) water')])
def preprocess_sample(self, sample):
"""
Returns sample with extranneous (non oxide) information removed and any missing oxides given a value of 0.0.
"""
for oxide in oxides:
if oxide in sample.keys():
pass
else:
sample[oxide] = 0.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
self.bulk_comp_orig = sample
return bulk_comp
def calculate_dissolved_volatiles(self, sample, pressure, temperature, X_fluid=1.0, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1.0. Mole fraction of H2O in the H2O-CO2 fluid.
Returns
-------
float
Calculated dissolved H2O concentration in wt%.
"""
pressureMPa = pressure / 10.0
Pw = pressureMPa * X_fluid
PCO2 = pressureMPa * (1 - X_fluid)
temperatureK = temperature + 273.15
H2Ot = ((354.94*Pw**(0.5) + 9.623*Pw - 1.5223*Pw**(1.5)) / temperatureK +
0.0012439*Pw**(1.5) + PCO2*(-1.084*10**(-4)*Pw**(0.5) - 1.362*10**(-5)*Pw))
return H2Ot
def calculate_equilibrium_fluid_comp(self, sample, pressure, temperature, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
Returns
-------
float
Calculated equilibrium fluid concentration in XH2Ofluid mole fraction.
"""
temperatureK = temperature + 273.15
pressureMPa = pressure / 10.0
_sample = sample.copy()
H2Ot = _sample["H2O"]
#calculate saturation pressure and assert that input P <= SatP
satP = self.calculate_saturation_pressure(temperature,sample)
is_saturated = satP - pressure
if is_saturated >= 0:
pass
else:
warnings.warn("{:.1f} bars is above the saturation pressure ({:.1f} bars) for this sample. Results from this calculation may be nonsensical.".format(pressure,satP))
#Use sympy to solve solubility equation for XH2Ofluid
XH2Ofluid = sympy.symbols('XH2Ofluid') #XH2Ofluid is the variable to solve for
equation = ((354.94*(XH2Ofluid*pressureMPa)**(0.5) + 9.623*(XH2Ofluid*pressureMPa)
- 1.5223*(XH2Ofluid*pressureMPa)**(1.5)) / temperatureK
+ 0.0012439*(XH2Ofluid*pressureMPa)**(1.5)
+ pressureMPa*(1-XH2Ofluid)*(-1.084*10**(-4)*(XH2Ofluid*pressureMPa)**(0.5)
- 1.362*10**(-5)*(XH2Ofluid*pressureMPa)) - H2Ot)
XH2Ofluid = sympy.solve(equation, XH2Ofluid)[0]
if XH2Ofluid > 1:
XH2Ofluid = 1
if XH2Ofluid < 0:
XH2Ofluid = 0
return XH2Ofluid
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a an H2O-bearing fluid is saturated. Calls the scipy.root_scalar
routine, which makes repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample dict
Composition of sample in wt% oxides.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1.0. Mole fraction of H2O in the H2O-CO2 fluid.
Returns
-------
float
Calculated saturation pressure in bars.
"""
_sample = sample.copy()
temperatureK = temperature + 273.15
if temperatureK <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'H2O' not in sample:
raise InputError("sample must contain H2O.")
if sample['H2O'] < 0.0:
raise InputError("Dissolved H2O concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,_sample,X_fluid,kwargs),
x0=10.0,x1=200.0).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs) - sample['H2O']
class LiuCarbon(Model):
"""
Implementation of the Liu et al. (2005) H2O-CO2 solubility model for metaluminous high-silica rhyolitic melts.
"""
def __init__(self):
"""
Initialize the model.
"""
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[1.0,5000.0],crf_Between,'bar','Liu et al. (2005) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[700.0,1200],crf_Between,'oC','Liu et al. (2005) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('sample',None,crf_LiuComp,None,None,
fail_msg=crmsg_LiuComp_fail, pass_msg=crmsg_LiuComp_pass, description_msg=crmsg_LiuComp_description)])
def preprocess_sample(self, sample):
"""
Returns sample with extranneous (non oxide) information removed and any missing oxides given a value of 0.0.
"""
for oxide in oxides:
if oxide in sample.keys():
pass
else:
sample[oxide] = 0.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
self.bulk_comp_orig = sample
return bulk_comp
def calculate_dissolved_volatiles(self, sample, pressure, temperature, X_fluid=1, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1. Mole fraction of CO2 in the H2O-CO2 fluid.
Returns
-------
float
Calculated dissolved CO2 concentration in wt%.
"""
pressureMPa = pressure / 10.0
Pw = pressureMPa * (1 - X_fluid)
PCO2 = pressureMPa * X_fluid #(1 - X_fluid)
temperatureK = temperature + 273.15
CO2melt_ppm = (PCO2*(5668 - 55.99*Pw)/temperatureK
+ PCO2*(0.4133*Pw**(0.5) + 2.041*10**(-3)*Pw**(1.5)))
CO2melt = CO2melt_ppm / 10000
return CO2melt
def calculate_equilibrium_fluid_comp(self, sample, pressure, temperature, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
Returns
-------
float
Calculated equilibrium fluid concentration in XCO2fluid mole fraction.
"""
temperatureK = temperature + 273.15
pressureMPa = pressure / 10.0
if temperatureK <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0.0:
raise InputError("Dissolved CO2 concentration must be greater than 0 wt%.")
_sample = sample.copy()
CO2melt_wt = _sample["CO2"]
CO2melt_ppm = CO2melt_wt * 10000
#calculate saturation pressure and assert that input P <= SatP
satP = self.calculate_saturation_pressure(temperature,sample)
is_saturated = satP - pressure
if is_saturated >= 0:
pass
else:
warnings.warn(str(pressure) + " bars is above the saturation pressure (" + str(satP) + " bars) for this sample. Results from this calculation may be nonsensical.")
#Use sympy to solve solubility equation for XH2Ofluid
XCO2fluid = sympy.symbols('XCO2fluid') #XCO2fluid is the variable to solve for
equation = (((XCO2fluid*pressureMPa)*(5668 - 55.99*(pressureMPa*(1-XCO2fluid)))/temperatureK
+ (XCO2fluid*pressureMPa)*(0.4133*(pressureMPa*(1-XCO2fluid))**(0.5)
+ 2.041*10**(-3)*(pressureMPa*(1-XCO2fluid))**(1.5))) - CO2melt_ppm)
XCO2fluid = sympy.solve(equation, XCO2fluid)[0]
if XCO2fluid > 1:
XCO2fluid = 1
if XCO2fluid < 0:
XCO2fluid = 0
return XCO2fluid #1 - XCO2fluid
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a an CO2-bearing fluid is saturated. Calls the scipy.root_scalar
routine, which makes repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample dict
Composition of sample in wt% oxides.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 0. Mole fraction of CO2 in the H2O-CO2 fluid.
Returns
-------
float
Calculated saturation pressure in bars.
"""
_sample = sample.copy()
temperatureK = temperature + 273.15
if temperatureK <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0.0:
raise InputError("Dissolved CO2 concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,_sample,X_fluid,kwargs),
x0=10.0,x1=2000.0).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs) - sample['CO2']
class AllisonCarbon(Model):
"""
Implementation of the Allison et al. (2019) CO2 solubility model. Which type of fit, and
which composition must be selected when the Model is initialized. The fit may be either
thermodynamic or power-law. The composition may be chosen from sunset, sfvf, erebus, vesuvius,
etna, or stromboli. Default is the power-law fit to sunset.
"""
def __init__(self):
"""
Initialize the model.
"""
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_HB_co2())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([CalibrationRange('pressure',[0.0,6000.0],crf_Between,'bar','Allison et al. (2019) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',1200,crf_EqualTo,'oC','Allison et al. (2019) carbon',
fail_msg=crmsg_EqualTo_fail, pass_msg=crmsg_EqualTo_pass, description_msg=crmsg_EqualTo_description)])
self.set_solubility_dependence(False)
def preprocess_sample(self,sample):
"""
Returns sample normalized to 100wt%, keeping the concentrations of H2O and CO2 constant.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
pandas Series
Normalized major element oxides in wt%.
"""
return normalize_AdditionalVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,temperature,sample=None,X_fluid=1.0,
model_loc='sunset',model_fit='thermodynamic',**kwargs):
"""
Calclates the dissolved CO2 concentration using (Eqns) 2-7 or 10-11 from Allison et al. (2019).
Parameters
----------
pressure float
Pressure in bars.
temperature float
Temperature in C.
sample pandas Series, dict or None
Major element oxides in wt%. Required if using the thermodynamic fits, need not be
provided if using the power law fits. Default is None.
X_fluid float
The mole fraction of CO2 in the fluid. Default is 1.0.
model_fit str
Either 'power' for the power-law fits, or 'thermodynamic' for the
thermodynamic fits.
model_loc str
One of 'sunset', 'sfvf', 'erebus', 'vesuvius', 'etna', 'stromboli'.
Returns
-------
float
Dissolved CO2 concentration in wt%.
"""
temperature = temperature + 273.15 #translate T from C to K
if temperature <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if pressure < 0.0:
raise InputError("Pressure must be positive.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if model_fit not in ['power','thermodynamic']:
raise InputError("model_fit must be one of 'power', or 'thermodynamic'.")
if model_loc not in ['sunset','sfvf','erebus','vesuvius','etna','stromboli']:
raise InputError("model_loc must be one of 'sunset', 'sfvf', 'erebus', 'vesuvius', 'etna', or 'stromboli'.")
if pressure == 0:
return 0
if model_fit == 'thermodynamic':
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("Thermodynamic fit requires sample to be a dict or a pandas Series.")
P0 = 1000 # bar
params = dict({'sunset':[16.4,-14.67],
'sfvf':[15.02,-14.87],
'erebus':[15.83,-14.65],
'vesuvius':[24.42,-14.04],
'etna':[21.59,-14.28],
'stromboli':[14.93,-14.68]})
DV = params[model_loc][0]
lnK0 = params[model_loc][1]
lnK = lnK0 - (pressure-P0)*DV/(10*8.3141*temperature)
fCO2 = self.fugacity_model.fugacity(pressure=pressure,temperature=temperature-273.15,X_fluid=X_fluid,**kwargs)
Kf = np.exp(lnK)*fCO2
XCO3 = Kf/(1-Kf)
# FWone = wtpercentOxides_to_formulaWeight(sample)#,exclude_volatiles=True)
FWone = 36.594
wtCO2 = (44.01*XCO3)/((44.01*XCO3)+(1-XCO3)*FWone)*100
return wtCO2
if model_fit == 'power':
params = dict({'stromboli':[1.05,0.883],
'etna':[2.831,0.797],
'vesuvius':[4.796,0.754],
'sfvf':[3.273,0.74],
'sunset':[4.32,0.728],
'erebus':[5.145,0.713]})
fCO2 = self.fugacity_model.fugacity(pressure=pressure,temperature=temperature-273.15,X_fluid=X_fluid,**kwargs)
return params[model_loc][0]*fCO2**params[model_loc][1]/1e4
def calculate_equilibrium_fluid_comp(self,pressure,temperature,sample,**kwargs):
""" Returns 1.0 if a pure CO2 fluid is saturated.
Returns 0.0 if a pure CO2 fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
satP = self.calculate_saturation_pressure(temperature=temperature,sample=sample,X_fluid=1.0,**kwargs)
if pressure < satP:
return 1.0
else:
return 0.0
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a pure CO2 fluid is saturated, for the given sample
composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
temperature float
The temperature of the system in C.
sample pandas Series
Major element oxides in wt% (including CO2).
X_fluid float
The mole fraction of H2O in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if temperature <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0.0:
raise InputError("Dissolved CO2 concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,sample,X_fluid,kwargs),
x0=1000.0,x1=2000.0).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including CO2.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved CO2 at the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return sample['CO2'] - self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs)
#------------MIXED FLUID MODELS-------------------------------#
class MixedFluid(Model):
"""
Implements the generic framework for mixed fluid solubility. Any set of pure fluid solubility
models may be specified.
"""
def __init__(self,models):
"""
Initializes the mixed fluid model.
Parameters
----------
models dictionary
Dictionary with names of volatile species as keys, and the model objects as values.
"""
self.models = tuple(model for model in models.values())
self.set_volatile_species(list(models.keys()))
def preprocess_sample(self,sample):
""" Returns sample, unmodified.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
pandas Series or dict
Major element oxides in wt%.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
_sample = sample.copy()
_sample = self.models[0].preprocess_sample(_sample)
return _sample
def calculate_dissolved_volatiles(self,pressure,X_fluid,returndict=False,**kwargs):
"""
Calculates the dissolved volatile concentrations in wt%, using each model's
calculate_dissolved_volatiles method. At present the volatile concentrations are
not propagated through.
Parameters
----------
pressure float
The total pressure in bars.
X_fluid float, numpy.ndarry, dict, pandas Series
The mole fraction of each species in the fluid. If the mixed fluid model
contains only two species (e.g. CO2 and H2O), the value of the first species in
self.volatile_species may be passed on its own as a float.
returndict bool
If True, the results will be returned in a dict, otherwise they will be returned
as a tuple.
Returns
-------
tuple
Dissolved volatile concentrations of each species in the model, in the order set
by self.volatile_species.
"""
if (type(X_fluid) == float or type(X_fluid) == int) and len(self.volatile_species) == 2:
X_fluid = (X_fluid,1-X_fluid)
elif len(X_fluid) != len(self.volatile_species):
raise InputError("X_fluid must have the same length as the number of volatile species\
in the MixedFluids Model class, or it may have length 1 if two species are present\
in the MixedFluids Model class.")
if np.sum(X_fluid) != 1.0:
raise InputError("X_fluid must sum to 1.0")
if any(val<0 for val in X_fluid) or any(val>1 for val in X_fluid):
raise InputError("Each mole fraction in X_fluid must have a value between 0 and 1.")
if type(X_fluid) == dict or type(X_fluid) == pd.core.series.Series:
X_fluid = tuple(X_fluid[species] for species in self.volatile_species)
# If the models don't depend on the concentration of volatiles, themselves.
if all(model.solubility_dependence == False for model in self.models):
result = tuple(model.calculate_dissolved_volatiles(pressure=pressure,X_fluid=Xi,**kwargs) for model, Xi in zip(self.models,X_fluid))
# If one of the models depends on the other volatile concentration
elif len(self.models) == 2 and self.models[0].solubility_dependence == False and 'sample' in kwargs:
result0 = self.models[0].calculate_dissolved_volatiles(pressure=pressure,X_fluid=X_fluid[0],**kwargs)
samplecopy = kwargs['sample'].copy()
samplecopy[self.volatile_species[0]] = result0
kwargs['sample'] = samplecopy
result1 = self.models[1].calculate_dissolved_volatiles(pressure=pressure,X_fluid=X_fluid[1],**kwargs)
result = (result0,result1)
elif len(self.models) == 2 and self.models[1].solubility_dependence == False and 'sample' in kwargs:
result1 = self.models[1].calculate_dissolved_volatiles(pressure=pressure,X_fluid=X_fluid[1],**kwargs)
samplecopy = kwargs['sample'].copy()
samplecopy[self.volatile_species[1]] = result1
kwargs['sample'] = samplecopy
result0 = self.models[0].calculate_dissolved_volatiles(pressure=pressure,X_fluid=X_fluid[0],**kwargs)
result = (result0,result1)
else:
raise InputError("The solubility dependence of the models is not currently supported by the MixedFluid model.")
if returndict == True:
resultsdict = {}
for i,v in zip(range(len(self.volatile_species)),self.volatile_species):
resultsdict.update({v+'_liq':result[i]})
return resultsdict
else:
return result
def calculate_equilibrium_fluid_comp(self,pressure,sample,return_dict=True,**kwargs):
""" Calculates the composition of the fluid in equilibrium with the dissolved volatile
concentrations passed. If a fluid phase is undersaturated at the chosen pressure (0,0) will
be returned. Note, this currently assumes the given H2O and CO2 concentrations are
the system total, not the total dissolved. If one of the volatile species has a zero or
negative concentration, the pure fluid model for the other volatile species will be used.
Parameters
----------
pressure float
The total pressure in bars.
sample pandas Series or dict
Major element oxides in wt% (including volatiles).
return_dict bool
Set the return type, if true a dict will be returned, if False two floats will be
returned. Default is True.
Returns
-------
dict or floats
Mole fractions of the volatile species in the fluid, in the order given by
self.volatile_species if floats.
"""
if len(self.volatile_species) != 2:
raise InputError("Currently equilibrium fluid compositions can only be calculated when\
two volatile species are present.")
dissolved_at_0bar = [self.models[0].calculate_dissolved_volatiles(sample=sample,pressure=0.0,**kwargs),
self.models[1].calculate_dissolved_volatiles(sample=sample,pressure=0.0,**kwargs)]
if sample[self.volatile_species[0]] <= 0.0 or sample[self.volatile_species[0]] <= dissolved_at_0bar[0]:
Xv0 = 0.0
Xv1 = self.models[1].calculate_equilibrium_fluid_comp(pressure=pressure,sample=sample,**kwargs)
elif sample[self.volatile_species[1]] <= 0.0 or sample[self.volatile_species[1]] <= dissolved_at_0bar[1]:
Xv1 = 0.0
Xv0 = self.models[0].calculate_equilibrium_fluid_comp(pressure=pressure,sample=sample,**kwargs)
else:
satP = self.calculate_saturation_pressure(sample,**kwargs)
if satP < pressure:
if return_dict == True:
return {self.volatile_species[0]:0,self.volatile_species[1]:0}
else:
return (0,0)
molfracs = wtpercentOxides_to_molOxides(sample)
(Xt0, Xt1) = (molfracs[self.volatile_species[0]],molfracs[self.volatile_species[1]])
try:
Xv0 = root_scalar(self.root_for_fluid_comp,bracket=[1e-15,1-1e-15],args=(pressure,Xt0,Xt1,sample,kwargs)).root
Xv1 = 1 - Xv0
except:
try:
Xv0 = root_scalar(self.root_for_fluid_comp,x0=0.5,x1=0.1,args=(pressure,Xt0,Xt1,sample,kwargs)).root
Xv1 = 1 - Xv0
except:
raise SaturationError("Equilibrium fluid not found. Likely an issue with the numerical solver.")
if return_dict == True:
return {self.volatile_species[0]:Xv0,self.volatile_species[1]:Xv1}
else:
return Xv0, Xv1
def calculate_saturation_pressure(self,sample,**kwargs):
"""
Calculates the pressure at which a fluid will be saturated, given the dissolved volatile
concentrations. If one of the volatile species has a zero or negative concentration the
pure fluid model for the other species will be used. If one of the volatile species has a
concentration lower than the concentration dissolved at 0 bar, the pure fluid model for the
other species will be used.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including volatiles).
Returns
-------
float
The saturation pressure in bars.
"""
dissolved_at_0bar = [self.models[0].calculate_dissolved_volatiles(sample=sample,pressure=0.0,**kwargs),
self.models[1].calculate_dissolved_volatiles(sample=sample,pressure=0.0,**kwargs)]
if sample[self.volatile_species[0]] <= 0.0 or sample[self.volatile_species[0]] <= dissolved_at_0bar[0]:
satP = self.models[1].calculate_saturation_pressure(sample=sample,**kwargs)
elif sample[self.volatile_species[1]] <= 0.0 or sample[self.volatile_species[1]] <= dissolved_at_0bar[1]:
satP = self.models[0].calculate_saturation_pressure(sample=sample,**kwargs)
else:
volatile_concs = np.array(tuple(sample[species] for species in self.volatile_species))
x0 = 0
for model in self.models:
xx0 = model.calculate_saturation_pressure(sample=sample,**kwargs)
if np.isnan(xx0) == False:
x0 += xx0
try:
satP = root(self.root_saturation_pressure,x0=[x0,0.5],args=(volatile_concs,sample,kwargs)).x[0]
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def calculate_isobars_and_isopleths(self,pressure_list,isopleth_list=[0,1],points=51,
return_dfs=True,extend_to_zero=True,**kwargs):
"""
Calculates isobars and isopleths. Isobars can be calculated for any number of pressures. Variables
required by each of the pure fluid models must be passed, e.g. sample, temperature, etc.
Parameters
----------
pressure_list list
List of all pressure values at which to calculate isobars, in bars.
isopleth_list list
Default value is None, in which case only isobars will be calculated. List of all
fluid compositions in mole fraction (of the first species in self.volatile_species) at which
to calcualte isopleths. Values can range from 0 to 1.
points int
The number of points in each isobar and isopleth. Default value is 101.
return_dfs bool
If True, the results will be returned as two pandas DataFrames, as produced by the MagmaSat
method. If False the results will be returned as lists of numpy arrays.
Returns
-------
pandas DataFrame object(s) or list(s)
If isopleth_list is not None, two objects will be returned, one with the isobars and the second with
the isopleths. If return_dfs is True, two pandas DataFrames will be returned with column names
'Pressure' or 'XH2O_fl', 'H2O_liq', and 'CO2_liq'. If return_dfs is False, two lists of numpy arrays
will be returned. Each array is an individual isobar or isopleth, in the order passed via pressure_list
or isopleth_list. The arrays are the concentrations of H2O and CO2 in the liquid, in the order of the
species in self.volatile_species.
"""
if len(self.volatile_species) != 2 or 'H2O' not in self.volatile_species or 'CO2' not in self.volatile_species:
raise InputError("calculate_isobars_and_isopleths may only be used with a H2O-CO2 fluid.")
H2O_id = self.volatile_species.index('H2O')
CO2_id = self.volatile_species.index('CO2')
has_isopleths = True
if isopleth_list is None:
has_isopleths = False
isobars_df = pd.DataFrame(columns=['Pressure','H2O_liq','CO2_liq'])
isobars = []
for pressure in pressure_list:
dissolved = np.zeros([2,points])
Xv0 = np.linspace(0.0,1.0,points)
for i in range(points):
dissolved[:,i] = self.calculate_dissolved_volatiles(pressure=pressure,X_fluid=(Xv0[i],1-Xv0[i]),**kwargs)
isobars_df = isobars_df.append({'Pressure':pressure,'H2O_liq':dissolved[H2O_id,i],'CO2_liq':dissolved[CO2_id,i]},ignore_index=True)
isobars.append(dissolved)
if has_isopleths == True:
isopleths_df = pd.DataFrame(columns=['XH2O_fl','H2O_liq','CO2_liq'])
isopleths = []
for isopleth in isopleth_list:
dissolved = np.zeros([2,points])
pmin = np.nanmin(pressure_list)
pmax = np.nanmax(pressure_list)
if pmin == pmax:
pmin = 0.0
pressure = np.linspace(pmin,pmax,points)
for i in range(points):
dissolved[:,i] = self.calculate_dissolved_volatiles(pressure=pressure[i],X_fluid=(isopleth,1-isopleth),**kwargs)
isopleths_df = isopleths_df.append({'XH2O_fl':[isopleth,1-isopleth][H2O_id],'H2O_liq':dissolved[H2O_id,i],'CO2_liq':dissolved[CO2_id,i]},ignore_index=True)
isopleths.append(dissolved)
if return_dfs == True:
if has_isopleths == True:
return (isobars_df, isopleths_df)
else:
return isobars_df
else:
if has_isopleths == True:
return (isobars, isopleths)
else:
return isobars
def calculate_degassing_path(self,sample,pressure='saturation',fractionate_vapor=0.0,final_pressure=100.0,
steps=101,return_dfs=True,round_to_zero=True,**kwargs):
"""
Calculates the dissolved volatiles in a progressively degassing sample.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including volatiles).
pressure string, float, int, list, or numpy array
Defaults to 'saturation', the calculation will begin at the saturation pressure. If a number is passed
as either a float or int, this will be the starting pressure. If a list of numpy array is passed, the
pressure values in the list or array will define the degassing path, i.e. final_pressure and steps
variables will be ignored. Units are bars.
fractionate_vapor float
What proportion of vapor should be removed at each step. If 0.0 (default), the degassing path will
correspond to closed-system degassing. If 1.0, the degassing path will correspond to open-system
degassing.
final_pressure float
The final pressure on the degassing path, in bars. Ignored if a list or numpy array is passed as the
pressure variable. Default is 1 bar.
steps int
The number of steps in the degassing path. Ignored if a list or numpy array are passed as the pressure
variable.
return_dfs bool
If True, the results will be returned in a pandas DataFrame, if False, two numpy arrays will be returned.
round_to_zero bool
If True, the first entry of FluidProportion_wt will be rounded to zero, rather than being a value
within numerical error of zero. Default is True.
Returns
-------
pandas DataFrame or numpy arrays
If return_dfs is True (default), a DataFrame with columns 'Pressure_bars', 'H2O_liq', 'CO2_liq',
'H2O_fl', 'CO2_fl', and 'FluidProportion_wt', is returned. Dissolved volatiles are in wt%,
the proportions of volatiles in the fluid are in mole fraction. Otherwise a numpy array containing
the dissolved volatile concentrations, and a numpy array containing the mole fractions of
volatiles in the fluid is returned. The columns are in the order of the volatiles in
self.volatile_species.
"""
# if 'model' in kwargs and model=='Liu':
# final_pressure = 1.0
wtptoxides = sample.copy()
wtptoxides = normalize_FixedVolatiles(wtptoxides)
wtm0s, wtm1s = (wtptoxides[self.volatile_species[0]],wtptoxides[self.volatile_species[1]])
if pressure == 'saturation':
p0 = self.calculate_saturation_pressure(wtptoxides,**kwargs)
pressures = np.linspace(p0,final_pressure,steps)
elif type(pressure) == float or type(pressure) == int:
pressures = np.linspace(pressure,final_pressure,steps)
elif type(pressure) == list or type(pressure) == np.ndarray:
pressures = pressure
Xv = np.zeros([2,len(pressures)])
wtm = np.zeros([2,len(pressures)])
for i in range(len(pressures)):
try:
X_fluid = self.calculate_equilibrium_fluid_comp(pressure=pressures[i],sample=wtptoxides,return_dict=False,**kwargs)
Xv[:,i] = X_fluid
if X_fluid == (0,0):
wtm[:,i] = (wtptoxides[self.volatile_species[0]],wtptoxides[self.volatile_species[1]])
else:
if X_fluid[0] == 0:
wtm[0,i] = wtptoxides[self.volatile_species[0]]
wtm[1,i] = self.calculate_dissolved_volatiles(pressure=pressures[i],sample=wtptoxides,X_fluid=X_fluid,**kwargs)[1]
elif X_fluid[1] == 0:
wtm[1,i] = wtptoxides[self.volatile_species[1]]
wtm[0,i] = self.calculate_dissolved_volatiles(pressure=pressures[i],sample=wtptoxides,X_fluid=X_fluid,**kwargs)[0]
else:
wtm[:,i] = self.calculate_dissolved_volatiles(pressure=pressures[i],sample=wtptoxides,X_fluid=X_fluid,**kwargs)
wtptoxides[self.volatile_species[0]] = wtm[0,i] + (1-fractionate_vapor)*(wtm0s-wtm[0,i])
wtptoxides[self.volatile_species[1]] = wtm[1,i] + (1-fractionate_vapor)*(wtm1s-wtm[1,i])
# wtptoxides = normalize_FixedVolatiles(wtptoxides)
except:
Xv[:,i] = [np.nan]*np.shape(Xv)[0]
wtm[:,i] = wtm[:,i-1]
if return_dfs == True:
exsolved_degassing_df = pd.DataFrame()
exsolved_degassing_df['Pressure_bars'] = pressures
exsolved_degassing_df['H2O_liq'] = wtm[self.volatile_species.index('H2O'),:]
exsolved_degassing_df['CO2_liq'] = wtm[self.volatile_species.index('CO2'),:]
exsolved_degassing_df['H2O_fl'] = Xv[self.volatile_species.index('H2O'),:]
exsolved_degassing_df['CO2_fl'] = Xv[self.volatile_species.index('CO2'),:]
exsolved_degassing_df['FluidProportion_wt'] = (wtm0s+wtm1s)-exsolved_degassing_df['H2O_liq']-exsolved_degassing_df['CO2_liq']
if round_to_zero == True and np.round(exsolved_degassing_df.loc[0,'FluidProportion_wt'],2)==0:
exsolved_degassing_df.loc[0,'FluidProportion_wt'] = 0.0
return exsolved_degassing_df
else:
return (wtm, Xv)
def root_saturation_pressure(self,x,volatile_concs,sample,kwargs):
""" Function called by scipy.root when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
x numpy array
The guessed value for the root. x[0] is the pressure (in bars) and x[1] is the
mole fraction of the first volatile in self.volatile_species.
volatile_concs numpy array
The dissolved volatile concentrations, in the same order as self.volatile_species.
sample pandas Series or dict
Major element oxides in wt% (including volatiles).
kwargs dictionary
Dictionary of keyword arguments, which may be required by the pure-fluid models.
Returns
-------
numpy array
The difference in the dissolved volatile concentrations, and those predicted with the
pressure and fluid composition specified by x.
"""
if x[1] < 0:
x[1] = 0
elif x[1] > 1:
x[1] = 1
if x[0] <= 0:
x[0] = 1e-15
misfit = np.array(self.calculate_dissolved_volatiles(pressure=x[0],X_fluid=(x[1],1-x[1]),sample=sample,**kwargs)) - volatile_concs
return misfit
def root_for_fluid_comp(self,Xv0,pressure,Xt0,Xt1,sample,kwargs):
""" Function called by scipy.root_scalar when calculating the composition of equilibrium fluid
in the calculate_equilibrium_fluid_comp method.
Parameters
----------
Xv0 float
The guessed mole fraction of the first volatile species in self.volatile_species.
pressure float
The total pressure in bars.
Xt0 float
The total mole fraction of the first volatile species in self.volatile_species.
Xt1 float
The total mole fraction of the second volatile species in self.volatile_species.
sample pandas Series
Major element oxides in wt%
kwargs dictionary
A dictionary of keyword arguments that may be required by the pure fluid models.
Returns
-------
float
The differene in the LHS and RHS of the mass balance equation. Eq X in manuscript.
"""
wtt0 = sample[self.volatile_species[0]]
wtt1 = sample[self.volatile_species[1]]
wtm0, wtm1 = self.calculate_dissolved_volatiles(pressure=pressure,X_fluid=(Xv0,1-Xv0),sample=sample,**kwargs)
Xm0 = Xt0/wtt0*wtm0
Xm1 = Xt1/wtt1*wtm1
if self.volatile_species[0] == 'CO2' and Xv0 != Xm0:
f = (Xt0-Xm0)/(Xv0-Xm0)
return (1-f)*Xm1 + f*(1-Xv0) - Xt1
else:
f = (Xt1-Xm1)/((1-Xv0)-Xm1)
return (1-f)*Xm0 + f*Xv0 - Xt0
def check_calibration_range(self,parameters,report_nonexistance=True):
""" Checks whether the given parameters are within the ranges defined by the
CalibrationRange objects for each model and its fugacity and activity models. An empty
string will be returned if all parameters are within the calibration range. If a
parameter is not within the calibration range, a description of the problem will be
returned in the string.
Parameters
----------
parameters dict
Dictionary keys are the names of the parameters to be checked, e.g., pressure
temperature, SiO2, etc. Values are the values of each parameter. A complete set
need not be given.
Returns
-------
str
String description of any parameters falling outside of the calibration range.
"""
s = ''
for model in self.models:
for cr in model.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
for cr in model.fugacity_model.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
for cr in model.activity_model.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
return s
def get_calibration_range(self):
""" Returns a string describing the calibration ranges defined by the CalibrationRange
objects for each model, and its associated fugacity and activity models.
Returns
-------
str
String description of the calibration range objects."""
s = ''
for model in self.models:
for cr in model.calibration_ranges:
s += cr.string(None)
for cr in model.fugacity_model.calibration_ranges:
s += cr.string(None)
for cr in model.activity_model.calibration_ranges:
s += cr.string(None)
return s
class MagmaSat(Model):
"""
An object to instantiate a thermoengine equilibrate class
"""
def __init__(self):
self.melts_version = '1.2.0' #just here so users can see which version is being used
self.set_volatile_species(['H2O', 'CO2'])
self.set_calibration_ranges([CalibrationRange('pressure',[0.0,30000.0],crf_Between,'bar','MagmaSat',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[550,1730],crf_Between,'oC','MagmaSat',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def preprocess_sample(self,sample): #TODO test this by passing weird shit to sample
"""
Returns sample with 0.0 values for any oxides not passed.
Parameters
----------
sample: dictionary
Sample composition in wt% oxides
Returns
-------
dictionary
Sample composition in wt% oxides
"""
for oxide in oxides:
if oxide in sample.keys():
pass
else:
sample[oxide] = 0.0
self.bulk_comp_orig = sample
return sample
def check_calibration_range(self,parameters,**kwargs):
""" Checks whether supplied parameters and calculated results are within the calibration range
of the model, defined by the CalibrationRange objects. An empty string will be returned if all
parameters are within the calibration range. If a parameter is not within the calibration range,
a description of the problem will be returned in the string.
Parameters
----------
parameters dict
Dictionary keys are the names of the parameters to be checked, e.g., pressure
temperature, SiO2, etc. Values are the values of each parameter. A complete set
need not be given.
Returns
-------
str
String description of any parameters falling outside of the calibration range.
"""
s = ''
for cr in self.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance=False)
return s
def get_calibration_range(self):
""" Returns a string describing the calibration ranges defined by the CalibrationRange
objects for the model.
Returns
-------
str
String description of the calibration range objects."""
s = ''
for cr in self.calibration_ranges:
s += cr.string(None)
return s
def get_fluid_mass(self, sample, temperature, pressure, H2O, CO2):
"""An internally used function to calculate fluid mass.
Parameters
----------
sample: dictionary
Sample composition in wt% oxides
temperature: float
Temperature in degrees C.
pressure: float
Pressure in bars
H2O: float
wt% H2O in the system
CO2: float
wt% CO2 in the system
Returns
-------
float
mass of the fluid in grams
"""
pressureMPa = pressure / 10.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
bulk_comp["H2O"] = H2O
bulk_comp["CO2"] = CO2
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
return fluid_mass
def get_XH2O_fluid(self, sample, temperature, pressure, H2O, CO2):
"""An internally used function to calculate fluid composition.
Parameters
----------
sample: dictionary
Sample composition in wt% oxides
temperature: float
Temperature in degrees C.
pressure: float
Pressure in bars
H2O: float
wt% H2O in the system
CO2: float
wt% CO2 in the system
Returns
-------
float
Mole fraction of H2O in the H2O-CO2 fluid
"""
pressureMPa = pressure / 10.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
bulk_comp["H2O"] = H2O
bulk_comp["CO2"] = CO2
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid', mode='component')
#NOTE mode='component' returns endmember component keys with values in mol fraction.
if "Water" in fluid_comp:
H2O_fl = fluid_comp["Water"]
else:
H2O_fl = 0.0
# if H2O_fl == 0:
# raise SaturationError("Composition not fluid saturated.")
return H2O_fl
def calculate_dissolved_volatiles(self, sample, temperature, pressure, X_fluid=1, H2O_guess=0.0, verbose=False, **kwargs):
#TODO make better initial guess at higher XH2Ofl
#TODO make refinements faster
"""
Calculates the amount of H2O and CO2 dissolved in a magma at saturation at the given P/T conditions and fluid composition.
Fluid composition will be matched to within 0.0001 mole fraction.
Parameters
----------
sample: dict or pandas Series
Compositional information on one sample in oxides.
temperature: float or int
Temperature, in degrees C.
presure: float or int
Pressure, in bars.
X_fluid: float or int
The default value is 1. The mole fraction of H2O in the H2O-CO2 fluid. X_fluid=1 is a pure H2O fluid. X_fluid=0 is a pure CO2 fluid.
verbose: bool
OPTIONAL: Default is False. If set to True, returns H2O and CO2 concentration in the melt, H2O and CO2 concentration in
the fluid, mass of the fluid in grams, and proportion of fluid in the system in wt%.
Returns
-------
dict
A dictionary of dissolved volatile concentrations in wt% with keys H2O and CO2.
"""
sample = self.preprocess_sample(sample)
if isinstance(X_fluid, int) or isinstance(X_fluid, float):
pass
else:
raise InputError("X_fluid must be type int or float")
if isinstance(H2O_guess, int) or isinstance(H2O_guess, float):
pass
else:
raise InputError("H2O_guess must be type int or float")
pressureMPa = pressure / 10.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
if X_fluid != 0 and X_fluid !=1:
if X_fluid < 0.001 or X_fluid > 0.999:
raise InputError("X_fluid is calculated to a precision of 0.0001 mole fraction. \
Value for X_fluid must be between 0.0001 and 0.9999.")
H2O_val = H2O_guess
CO2_val = 0.0
fluid_mass = 0.0
while fluid_mass <= 0:
if X_fluid == 0:
CO2_val += 0.1
elif X_fluid >= 0.5:
H2O_val += 0.2
CO2_val = (H2O_val / X_fluid) - H2O_val #NOTE this is setting XH2Owt of the system (not of the fluid) to X_fluid
#TODO this is what needs to be higher for higher XH2O. Slows down computation by a second or two
else:
H2O_val += 0.1
CO2_val = (H2O_val / X_fluid) - H2O_val #NOTE this is setting XH2Owt of the system (not of the fluid) to X_fluid
#TODO this is what needs to be higher for higher XH2O. Slows down computation by a second or two
fluid_mass = self.get_fluid_mass(sample, temperature, pressure, H2O_val, CO2_val)
bulk_comp["H2O"] = H2O_val
bulk_comp["CO2"] = CO2_val
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
liquid_comp = melts.get_composition_of_phase(xmlout, phase_name='Liquid', mode='oxide_wt')
fluid_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid', mode='component')
if "Water" in fluid_comp:
H2O_fl = fluid_comp["Water"]
else:
H2O_fl = 0.0
XH2O_fluid = H2O_fl
#------Coarse Check------#
while XH2O_fluid < X_fluid - 0.1: #too low coarse check
H2O_val += 0.2
XH2O_fluid = self.get_XH2O_fluid(sample, temperature, pressure, H2O_val, CO2_val)
while XH2O_fluid > X_fluid + 0.1: #too high coarse check
CO2_val += 0.1
XH2O_fluid = self.get_XH2O_fluid(sample, temperature, pressure, H2O_val, CO2_val)
#------Refinement 1------#
while XH2O_fluid < X_fluid - 0.01: #too low refinement 1
H2O_val += 0.05
XH2O_fluid = self.get_XH2O_fluid(sample, temperature, pressure, H2O_val, CO2_val)
while XH2O_fluid > X_fluid + 0.01: #too high refinement 1
CO2_val += 0.01
XH2O_fluid = self.get_XH2O_fluid(sample, temperature, pressure, H2O_val, CO2_val)
#------Refinement 2------#
while XH2O_fluid < X_fluid - 0.001: #too low refinement 2
H2O_val += 0.005
XH2O_fluid = self.get_XH2O_fluid(sample, temperature, pressure, H2O_val, CO2_val)
while XH2O_fluid > X_fluid + 0.001: #too high refinement 2
CO2_val += 0.001
XH2O_fluid = self.get_XH2O_fluid(sample, temperature, pressure, H2O_val, CO2_val)
#------Final refinement------#
while XH2O_fluid < X_fluid - 0.0001: #too low final refinement
H2O_val += 0.001
XH2O_fluid = self.get_XH2O_fluid(sample, temperature, pressure, H2O_val, CO2_val)
while XH2O_fluid > X_fluid + 0.0001: #too high final refinement
CO2_val += 0.0001
XH2O_fluid = self.get_XH2O_fluid(sample, temperature, pressure, H2O_val, CO2_val)
#------Get calculated values------#
bulk_comp["H2O"] = H2O_val
bulk_comp["CO2"] = CO2_val
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
system_mass = melts.get_mass_of_phase(xmlout, phase_name='System')
liquid_comp = melts.get_composition_of_phase(xmlout, phase_name='Liquid', mode='oxide_wt')
fluid_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid', mode='component')
if "H2O" in liquid_comp:
H2O_liq = liquid_comp["H2O"]
else:
H2O_liq = 0
if "CO2" in liquid_comp:
CO2_liq = liquid_comp["CO2"]
else:
CO2_liq = 0
if "Water" in fluid_comp:
H2O_fl = fluid_comp["Water"]
else:
H2O_fl = 0.0
if "Carbon Dioxide" in fluid_comp:
CO2_fl = fluid_comp["Carbon Dioxide"]
else:
CO2_fl = 0.0
XH2O_fluid = H2O_fl
if verbose == True:
return {"temperature": temperature, "pressure": pressure,
"H2O_liq": H2O_liq, "CO2_liq": CO2_liq,
"XH2O_fl": H2O_fl, "XCO2_fl": CO2_fl,
"FluidProportion_wt": 100*fluid_mass/system_mass}
if verbose == False:
return {"CO2": CO2_liq, "H2O": H2O_liq}
def calculate_equilibrium_fluid_comp(self, sample, temperature, pressure, verbose=False, **kwargs): #TODO fix weird printing
"""
Returns H2O and CO2 concentrations in wt% in a fluid in equilibrium with the given sample at the given P/T condition.
Parameters
----------
sample: dict or pandas Series
Compositional information on one sample in oxides.
temperature: float or int
Temperature, in degrees C.
presure: float or int
Pressure, in bars. #TODO check units
verbose: bool
OPTIONAL: Default is False. If set to True, returns H2O and CO2 concentration in the fluid, mass of the fluid in grams,
and proportion of fluid in the system in wt%.
Returns
-------
dict
A dictionary of fluid composition in wt% with keys 'H2O' and 'CO2' is returned. #TODO make list?
"""
sample = self.preprocess_sample(sample)
if isinstance(temperature, float) or isinstance(temperature, int):
pass
else:
raise InputError("temp must be type float or int")
if isinstance(pressure, float) or isinstance(pressure, int):
pass
else:
raise InputError("presure must be type float or int")
pressureMPa = pressure / 10.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
flsystem_wtper = 100 * fluid_mass / (fluid_mass + melts.get_mass_of_phase(xmlout, phase_name='Liquid'))
if fluid_mass > 0.0:
fluid_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid', mode='component')
fluid_comp_H2O = fluid_comp['Water']
fluid_comp_CO2 = fluid_comp['Carbon Dioxide']
else:
fluid_comp_H2O = 0
fluid_comp_CO2 = 0
feasible = melts.set_bulk_composition(bulk_comp) #reset
if verbose == False:
return {'CO2': fluid_comp_CO2, 'H2O': fluid_comp_H2O}
if verbose == True:
return {'CO2': fluid_comp_CO2, 'H2O': fluid_comp_H2O, 'FluidMass_grams': fluid_mass, 'FluidProportion_wt': flsystem_wtper}
def calculate_saturation_pressure(self, sample, temperature, verbose=False, **kwargs):
"""
Calculates the saturation pressure of a sample composition.
Parameters
----------
sample: dict, pandas Series
Compositional information on one sample. A single sample can be passed as a dict or pandas Series.
temperature: flaot or int
Temperature of the sample in degrees C.
verbose: bool
OPTIONAL: Default is False. If set to False, only the saturation pressure is returned. If set to True,
the saturation pressure, mass of fluid in grams, proportion of fluid in wt%, and H2O and CO2 concentrations
in the fluid in mole fraction are all returned in a dict.
Returns
-------
float or dict
If verbose is set to False: Saturation pressure in bars.
If verbose is set to True: dict of all calculated values.
"""
sample = self.preprocess_sample(sample)
bulk_comp_orig = sample
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
feasible = melts.set_bulk_composition(bulk_comp)
#Coarse search
fluid_mass = 0
pressureMPa = 2000 #NOTE that pressure is in MPa for MagmaSat calculations but reported in bars.
while fluid_mass <= 0:
pressureMPa -= 100
if pressureMPa <= 0:
break
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
pressureMPa+=100
#Refined search 1
feasible = melts.set_bulk_composition(bulk_comp)
fluid_mass = 0
while fluid_mass <= 0:
pressureMPa -= 10
if pressureMPa <= 0:
break
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
pressureMPa += 10
#Refined search 2
feasible = melts.set_bulk_composition(bulk_comp)
fluid_mass = 0
while fluid_mass <= 0:
pressureMPa -= 1
if pressureMPa <= 0:
break
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
if pressureMPa != np.nan:
satP = pressureMPa*10 #convert pressure to bars
flmass = fluid_mass
flsystem_wtper = 100 * fluid_mass / (fluid_mass + melts.get_mass_of_phase(xmlout, phase_name='Liquid'))
flcomp = melts.get_composition_of_phase(xmlout, phase_name='Fluid', mode='component')
try:
flH2O = flcomp['Water']
except:
flH2O = 0.0
try:
flCO2 = flcomp['Carbon Dioxide']
except:
flCO2 = 0.0
else:
flmass = np.nan
flsystem_wtper = np.nan
flH2O = np.nan
flCO2 = np.nan
warnmessage = 'Calculation failed.'
feasible = melts.set_bulk_composition(bulk_comp_orig) #this needs to be reset always!
if verbose == False:
try:
warnings.warn(warnmessage)
except:
pass
return satP
elif verbose == True:
try:
warnings.warn(warnmessage)
except:
pass
return {"SaturationP_bars": satP, "FluidMass_grams": flmass, "FluidProportion_wt": flsystem_wtper,
"XH2O_fl": flH2O, "XCO2_fl": flCO2}
def calculate_isobars_and_isopleths(self, sample, temperature, pressure_list, isopleth_list=None, print_status=False, **kwargs):
"""
Calculates isobars and isopleths at a constant temperature for a given sample. Isobars can be calculated
for any number of pressures.
Parameters
----------
sample: dict
Dictionary with values for sample composition as oxides in wt%.
temperature: float
Temperature in degrees C.
pressure_list: list
List of all pressure values at which to calculate isobars, in bars.
isopleth_list: list
OPTIONAL: Default value is None in which case only isobars will be calculated.
List of all fluid compositions in mole fraction H2O (XH2Ofluid) at which to calcualte isopleths. Values can range from 0-1.
print_status: bool
OPTIONAL: Default is False. If set to True, progress of the calculations will be printed to the terminal.
Returns
-------
pandas DataFrame objects
Two pandas DataFrames are returned; the first has isobar data, and the second has isopleth data. Columns in the
isobar dataframe are 'Pressure', 'H2Omelt', and 'CO2melt', correpsonding to pressure in bars and dissolved H2O
and CO2 in the liquid in wt%. Columns in the isopleth dataframe are 'Pressure', 'H2Ofl', and 'CO2fl',
corresponding to pressure in bars and H2O and CO2 concentration in the H2O-CO2 fluid, in wt%.
"""
sample = self.preprocess_sample(sample)
bulk_comp = sample
if isinstance(pressure_list, list):
P_vals = pressure_list
else:
raise InputError("pressure_list must be of type list")
if isopleth_list is None:
has_isopleths = False
iso_vals = [0, 0.25, 0.5, 0.75, 1]
elif isinstance(isopleth_list, list):
iso_vals = isopleth_list
has_isopleths = True
if 0 not in iso_vals:
iso_vals[0:0] = [0]
if 1 not in iso_vals:
iso_vals.append(1)
else:
raise InputError("isopleth_list must be of type list")
isobar_data = []
isopleth_data = []
for X in iso_vals:
isopleth_data.append([X, 0.0, 0.0])
H2O_val = 0.0
CO2_val = 0.0
fluid_mass = 0.0
# Calculate equilibrium phase assemblage for all P/T conditions, check if saturated in fluid...
for i in P_vals:
guess = 0.0
if print_status == True:
print("Calculating isobar at " + str(i) + " bars")
for X in iso_vals:
if print_status == True and has_isopleths == True:
print("Calculating isopleth at " + str(X))
saturated_vols = self.calculate_dissolved_volatiles(sample=sample, temperature=temperature, pressure=i, H2O_guess=guess, X_fluid=X)
isobar_data.append([i, saturated_vols['H2O'], saturated_vols['CO2']])
isopleth_data.append([X, saturated_vols['H2O'], saturated_vols['CO2']])
guess = saturated_vols['H2O']
if print_status == True:
print("Done!")
isobars_df = pd.DataFrame(isobar_data, columns=['Pressure', 'H2O_liq', 'CO2_liq'])
isopleths_df = pd.DataFrame(isopleth_data, columns=['XH2O_fl', 'H2O_liq', 'CO2_liq'])
feasible = melts.set_bulk_composition(self.bulk_comp_orig) #reset
if has_isopleths == True:
return isobars_df, isopleths_df
if has_isopleths == False:
return isobars_df, None #TODO should this just return isobars_df? Currently this requires two items to unpack, I think?
def calculate_degassing_path(self, sample, temperature, pressure='saturation', fractionate_vapor=0.0, init_vapor=0.0, **kwargs):
"""
Calculates degassing path for one sample
Parameters
----------
sample: dict
Dictionary with values for sample composition as oxides in wt%. If pulling from an uploaded file
with data for many samples, first call get_sample_oxide_comp() to get the sample desired. Then pass
the result into this function.
temperature: float
Temperature at which to calculate degassing paths, in degrees C.
pressure: float
OPTIONAL. The perssure at which to begin the degassing calculations. Default value is 'saturation', which runs the
calculation with the initial pressure at the saturation pressure. If a pressure greater than the saturation pressure
is input, the calculation will start at saturation, since this is the first pressure at which any degassing will
occur.
fractionate_vapor: float
OPTIONAL. Proportion of vapor removed at each pressure step.
Default value is 0.0 (completely closed-system degassing). Specifies the type of calculation performed, either
closed system (0.0) or open system (1.0) degassing. If any value between <1.0 is chosen, user can also specify the
'init_vapor' argument (see below). A value in between 0 and 1 will remove that proportion of vapor at each step.
For example, for a value of 0.2, the calculation will remove 20% of the vapor and retain 80% of the vapor at each
pressure step.
init_vapor: float
OPTIONAL. Default value is 0.0. Specifies the amount of vapor (in wt%) coexisting with the melt before
degassing.
Returns
-------
pandas DataFrame object
"""
sample = self.preprocess_sample(sample)
sample = normalize(sample)
bulk_comp_orig = sample
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
feasible = melts.set_bulk_composition(bulk_comp)
# Get saturation pressure
data = self.calculate_saturation_pressure(sample=sample, temperature=temperature, verbose=True)
if pressure == 'saturation' or pressure >= data["SaturationP_bars"]:
SatP_MPa = data["SaturationP_bars"] / 10.0
else:
SatP_MPa = pressure / 10.0
#If pressure is low, use smaller P steps
if SatP_MPa >= 50:
MPa_step = 10
elif SatP_MPa < 50:
MPa_step = 1
P_array = np.arange(1.0, SatP_MPa, MPa_step)
P_array = -np.sort(-P_array)
fl_wtper = data["FluidProportion_wt"]
if fractionate_vapor == 0 or fractionate_vapor == 0.0: #closed-system
while fl_wtper <= init_vapor:
output = melts.equilibrate_tp(temperature, SatP_MPa)
(status, temperature, p, xmlout) = output[0]
fl_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
liq_mass = melts.get_mass_of_phase(xmlout, phase_name='Liquid')
fl_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid')
fl_wtper = 100 * fl_mass / (fl_mass+liq_mass)
try:
bulk_comp["H2O"] += fl_comp["H2O"]*0.0005
except:
bulk_comp["H2O"] = bulk_comp["H2O"] * 1.1
try:
bulk_comp["CO2"] += fl_comp["CO2"]*0.0005
except:
bulk_comp["CO2"] = bulk_comp["CO2"] * 1.1
bulk_comp = normalize(bulk_comp)
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, P_array)
pressure_list = []
H2Oliq = []
CO2liq = []
H2Ofl = []
CO2fl = []
fluid_wtper = []
for i in range(len(output)):
(status, temperature, p, xmlout) = output[i]
liq_comp = melts.get_composition_of_phase(xmlout, phase_name='Liquid')
fl_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid')
liq_mass = melts.get_mass_of_phase(xmlout, phase_name='Liquid')
fl_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
fl_wtper = 100 * fl_mass / (fl_mass+liq_mass)
pressure_list.append(p * 10.0)
try:
H2Oliq.append(liq_comp["H2O"])
except:
H2Oliq.append(0)
try:
CO2liq.append(liq_comp["CO2"])
except:
CO2liq.append(0)
try:
H2Ofl.append(fl_comp["H2O"])
except:
H2Ofl.append(0)
try:
CO2fl.append(fl_comp["CO2"])
except:
CO2fl.append(0)
fluid_wtper.append(fl_wtper)
try:
bulk_comp["H2O"] = liq_comp["H2O"]
except:
bulk_comp["H2O"] = 0
try:
bulk_comp["CO2"] = liq_comp["CO2"]
except:
bulk_comp["CO2"] = 0
fluid_wtper.append(fl_wtper)
feasible = melts.set_bulk_composition(bulk_comp_orig)
fl_wtper = data["FluidProportion_wt"]
exsolved_degassing_df = pd.DataFrame(list(zip(pressure_list, H2Oliq, CO2liq, H2Ofl, CO2fl, fluid_wtper)),
columns =['Pressure_bars', 'H2O_liq', 'CO2_liq', 'H2O_fl', 'CO2_fl', 'FluidProportion_wt'])
return exsolved_degassing_df
else:
pressure = []
H2Oliq = []
CO2liq = []
H2Ofl = []
CO2fl = []
fluid_wtper = []
for i in P_array:
fl_mass = 0.0
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, i)
(status, temperature, p, xmlout) = output[0]
liq_comp = melts.get_composition_of_phase(xmlout, phase_name='Liquid')
fl_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid', mode='component')
liq_mass = melts.get_mass_of_phase(xmlout, phase_name='Liquid')
fl_mass = melts.get_mass_of_phase(xmlout, phase_name='Fluid')
fl_wtper = 100 * fl_mass / (fl_mass+liq_mass)
if fl_mass > 0:
pressure.append(p * 10.0)
try:
H2Oliq.append(liq_comp["H2O"])
except:
H2Oliq.append(0)
try:
CO2liq.append(liq_comp["CO2"])
except:
CO2liq.append(0)
try:
H2Ofl.append(fl_comp["Water"])
except:
H2Ofl.append(0)
try:
CO2fl.append(fl_comp["Carbon Dioxide"])
except:
CO2fl.append(0)
fluid_wtper.append(fl_wtper)
try:
bulk_comp["H2O"] = liq_comp["H2O"] + (bulk_comp["H2O"] - liq_comp["H2O"]) * (1-fractionate_vapor)
except:
bulk_comp["H2O"] = 0
try:
bulk_comp["CO2"] = liq_comp["CO2"] + (bulk_comp["CO2"] - liq_comp["CO2"]) * (1-fractionate_vapor)
except:
bulk_comp["CO2"] = 0
bulk_comp = normalize(bulk_comp)
feasible = melts.set_bulk_composition(bulk_comp_orig) #this needs to be reset always!
open_degassing_df = pd.DataFrame(list(zip(pressure, H2Oliq, CO2liq, H2Ofl, CO2fl, fluid_wtper)),
columns =['Pressure_bars', 'H2O_liq', 'CO2_liq', 'XH2O_fl', 'XCO2_fl', 'FluidProportion_wt'])
return open_degassing_df
#-----------MAGMASAT PLOTTING FUNCTIONS-----------#
def smooth_isobars_and_isopleths(isobars=None, isopleths=None):
"""
Takes in a dataframe with calculated isobar and isopleth information (e.g., output from calculate_isobars_and_isopleths)
and smooths the data for plotting.
Parameters
----------
isobars: pandas DataFrame
OPTIONAL. DataFrame object containing isobar information as calculated by calculate_isobars_and_isopleths.
isopleths: pandas DataFrame
OPTIONAL. DataFrame object containing isopleth information as calculated by calculate_isobars_and_isopleths.
Returns
-------
pandas DataFrame
DataFrame with x and y values for all isobars and all isopleths. Useful if a user wishes to do custom plotting
with isobar and isopleth data rather than using the built-in `plot_isobars_and_isopleths()` function.
"""
if isobars is not None:
P_vals = isobars.Pressure.unique()
isobars_lists = isobars.values.tolist()
# add zero values to volatiles list
isobars_lists.append([0.0, 0.0, 0.0, 0.0])
isobars = {}
# do some data smoothing
for pressure in P_vals:
Pxs = [item[1] for item in isobars_lists if item[0] == pressure]
Pys = [item[2] for item in isobars_lists if item[0] == pressure]
try:
np.seterr(divide='ignore', invalid='ignore') #turn off numpy warning
## calcualte polynomial
Pz = np.polyfit(Pxs, Pys, 3)
Pf = np.poly1d(Pz)
## calculate new x's and y's
Px_new = np.linspace(Pxs[0], Pxs[-1], 50)
Py_new = Pf(Px_new)
# Save x's and y's
isobars.update({str(pressure)+"xvals": Px_new})
isobars.update({str(pressure)+"yvals": Py_new})
except:
isobars.update({str(pressure)+"xvals": Pxs})
isobars.update({str(pressure)+"yvals": Pys})
if isopleths is not None:
XH2O_vals = isopleths.XH2O_fl.unique()
isopleths_lists = isopleths.values.tolist()
isopleths = {}
for Xfl in XH2O_vals:
Xxs = [item[1] for item in isopleths_lists if item[0] == Xfl]
Xys = [item[2] for item in isopleths_lists if item[0] == Xfl]
try:
## calcualte polynomial
Xz = np.polyfit(Xxs, Xys, 2)
Xf = np.poly1d(Xz)
## calculate new x's and y's
Xx_new = np.linspace(Xxs[0], Xxs[-1], 50)
Xy_new = Xf(Xx_new)
# Save x's and y's
isopleths.update({str(Xfl)+"xvals":Xx_new})
isopleths.update({str(Xfl)+"yvals":Xy_new})
except:
isopleths.update({str(Xfl)+"xvals":Xxs})
isopleths.update({str(Xfl)+"yvals":Xys})
np.seterr(divide='warn', invalid='warn') #turn numpy warning back on
if isobars is not None:
if isopleths is not None:
return pd.DataFrame(isobars), pd.DataFrame(isopleths)
else:
return pd.DataFrame(isobars)
else:
if isopleths is not None:
isopleth_frame = pd.DataFrame.from_dict(isopleths, orient='index')
isopleth_frame = isopleth_frame.transpose()
print(isopleth_frame)
return pd.DataFrame(isopleth_frame)
def plot(isobars=None, isopleths=None, degassing_paths=None, custom_H2O=None, custom_CO2=None,
isobar_labels=None, isopleth_labels=None, degassing_path_labels=None, custom_labels=None,
extend_isobars_to_zero=True, **kwargs):
"""
Custom automatic plotting of model calculations in VESIcal.
Isobars, isopleths, and degassing paths can be plotted. Labels can be specified for each.
Any combination of isobars, isopleths, and degassing paths can be plotted.
Parameters
----------
isobars: pandas DataFrame or list
OPTIONAL. DataFrame object containing isobar information as calculated by calculate_isobars_and_isopleths. Or a list
of DataFrame objects.
isopleths: pandas DataFrame or list
OPTIONAL. DataFrame object containing isopleth information as calculated by calculate_isobars_and_isopleths. Or a list
of DataFrame objects.
degassing_paths: list
OPTIONAL. List of DataFrames with degassing information as generated by calculate_degassing_path().
custom_H2O: list
OPTIONAL. List of groups of H2O values to plot as points. For example myfile.data['H2O'] is one group of H2O values.
Must be passed with custom_CO2 and must be same length as custom_CO2.
custom_CO2: list
OPTIONAL. List of groups of CO2 values to plot as points.For example myfile.data['CO2'] is one group of CO2 values.
Must be passed with custom_H2O and must be same length as custom_H2O.
isobar_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case each plotted line will be given the generic
legend name of "Isobars n", with n referring to the nth isobars passed. Isobar pressure is given in parentheses.
The user can pass their own labels as a list of strings. If more than one set of isobars is passed, the labels should
refer to each set of isobars, not each pressure.
isopleth_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case each plotted isopleth will be given the generic
legend name of "Isopleth n", with n referring to the nth isopleths passed. Isopleth XH2O values are given in
parentheses. The user can pass their own labels as a list of strings. If more than one set of isopleths is passed,
the labels should refer to each set of isopleths, not each XH2O value.
degassing_path_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case each plotted line will be given the generic
legend name of "Pathn", with n referring to the nth degassing path passed. The user can pass their own labels
as a list of strings.
custom_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case each group of custom points will be given the
generic legend name of "Customn", with n referring to the nth degassing path passed. The user can pass their own labels
as a list of strings.
extend_isobars_to_zero bool
If True (default), isobars will be extended to zero, even if there is a finite solubility at zero partial pressure.
Returns
-------
matplotlib object
Plot with x-axis as H2O wt% in the melt and y-axis as CO2 wt% in the melt. Isobars, or lines of
constant pressure at which the sample magma composition is saturated, and isopleths, or lines of constant
fluid composition at which the sample magma composition is saturated, are plotted if passed. Degassing
paths, or the concentration of dissolved H2O and CO2 in a melt equilibrated along a path of decreasing
pressure, is plotted if passed.
"""
if custom_H2O is not None:
if custom_CO2 is None:
raise InputError("If x data is passed, y data must also be passed.")
else:
if len(custom_H2O) == len(custom_CO2):
pass
else:
raise InputError("x and y data must be same length")
if custom_CO2 is not None:
if custom_H2O is None:
raise InputError("If y data is passed, x data must also be passed.")
plt.figure(figsize=(12,8))
plt.xlabel('H$_2$O wt%')
plt.ylabel('CO$_2$ wt%')
labels = []
if isobars is not None:
if isinstance(isobars, pd.DataFrame):
isobars = [isobars]
for i in range(len(isobars)):
P_vals = isobars[i].Pressure.unique()
isobars_lists = isobars[i].values.tolist()
# add zero values to volatiles list
isobars_lists.append([0.0, 0.0, 0.0, 0.0])
np.seterr(divide='ignore', invalid='ignore') #turn off numpy warning
warnings.filterwarnings("ignore", message="Polyfit may be poorly conditioned")
# do some data smoothing
P_iter = 0
for pressure in P_vals:
P_iter += 1
Pxs = [item[1] for item in isobars_lists if item[0] == pressure]
Pys = [item[2] for item in isobars_lists if item[0] == pressure]
if len(isobars) > 1:
if P_iter == 1:
P_list = [int(i) for i in P_vals]
if isinstance(isobar_labels, list):
labels.append(str(isobar_labels[i]) + ' (' + ', '.join(map(str, P_list)) + " bars)")
else:
labels.append('Isobars ' + str(i+1) + ' (' + ', '.join(map(str, P_list)) + " bars)")
else:
labels.append('_nolegend_')
try:
np.seterr(divide='ignore', invalid='ignore') #turn off numpy warning
## calcualte polynomial
Pz = np.polyfit(Pxs, Pys, 3)
Pf = np.poly1d(Pz)
## calculate new x's and y's
Px_new = np.linspace(Pxs[0], Pxs[-1], 50)
Py_new = Pf(Px_new)
if extend_isobars_to_zero == True and Px_new[0]*Py_new[0] != 0.0:
if Px_new[0] > Py_new[0]:
Px_newer = np.zeros( | np.shape(Px_new) | numpy.shape |
import pytest
import numpy as np
from functools import reduce
from myml.dl import Tensor
def test_add():
a = Tensor([[1, 2], [3, 4]])
b = Tensor([[0, -1], [-1, 0]])
assert ((a + b).array == a.array + b.array).all()
assert ((b + 2).array == b.array + 2).all()
assert ((2 + b).array == b.array + 2).all()
def test_sub():
a = Tensor([[1, 2], [3, 4]])
b = Tensor([[0, -1], [-1, 0]])
assert ((a - b).array == a.array - b.array).all()
assert ((b - 2).array == b.array - 2).all()
assert ((2 - b).array == 2 - b.array).all()
def test_mul():
a = Tensor([[1, 2], [3, 4]])
b = Tensor([[0, -1], [-1, 0]])
assert ((a * b).array == a.array * b.array).all()
assert ((b * 2).array == b.array * 2).all()
assert ((2 * b).array == b.array * 2).all()
def test_div():
a = Tensor([[1, 2], [3, 4]])
b = Tensor([[0, -1], [-1, 0]])
assert ((a / b).array == a.array / b.array).all()
assert ((b / 2).array == b.array / 2).all()
assert ((2 / b).array == 2 / b.array).all()
def test_pow():
a = Tensor([[1, 2], [3, 4]])
b = Tensor([[0, -1], [-1, 0]])
assert ((a ** b).array == a.array ** b.array).all()
assert ((b ** 2).array == b.array ** 2).all()
assert ((2 ** b).array == 2 ** b.array).all()
def test_matmul():
a = Tensor([[1, 2], [3, 4]])
b = Tensor([[0, -1], [-1, 0]])
assert ((a @ b).array == a.array @ b.array).all()
def test_unary():
a = Tensor([[1, 2], [3, 4]])
assert ((-a).array == -a.array).all()
assert ((+a).array == +a.array).all()
def test_get_array():
a = Tensor([[1, 2], [3, 4]])
assert id(a.array) == id(a._array)
with pytest.raises(AttributeError):
a.array = | np.array([0, 1]) | numpy.array |
import math as mt
import numpy as np
import byxtal.find_csl_dsc as fcd
import byxtal.integer_manipulations as iman
import byxtal.bp_basis as bpb
import byxtal.pick_fz_bpl as pfb
import numpy.linalg as nla
import ovito.data as ovd
from ovito.pipeline import StaticSource, Pipeline
import ovito.modifiers as ovm
from ovito.data import CutoffNeighborFinder
def find_int_solns(a_vec, b_vec):
"""
Given two basis vectors (a_vec and b_vec) in the primitive basis,
find the third basis vector (c_vec) such that the matrix
[a_vec, b_vec, c_vec] is a valid basis.
All the components of the vectors are integers and
the determinant of the matrix must be equal to **1**.
Parameters
-----------------
a_vec: numpy.array
The first basis vector. Must be an integer array.
b_vec: numpy.array
The second basis vector. Must be an integer array.
Returns
------------
l_p2_p1: numpy.array, (3X3, must be an integer array)
A 3x3 numpy array of integers that forms the new basis for the lattice.
"""
a1 = a_vec[0]
a2 = a_vec[1]
a3 = a_vec[2]
b1 = b_vec[0]
b2 = b_vec[1]
b3 = b_vec[2]
a = a2*b3 - a3*b2
b = -(a1*b3 - a3*b1)
c = a1*b2 - a2*b1
d = 1
a = int(a)
b = int(b)
c = int(c)
d = int(d)
p = mt.gcd(a, b)
if p == 0:
if c == 1:
y1 = 0
y2 = 0
y3 = 1
# l_p2_p1 = (np.hstack((a_vec, b_vec, np.array([[y1],[y2],[y3]]))))
l_p2_p1 = np.dstack((a_vec, b_vec, np.array([y1, y2, y3]))).squeeze()
det1 = nla.det(l_p2_p1)
if ((np.abs(det1)-1) > 1e-10):
raise Exception('Error with Diophantine solution')
else:
if det1 == -1:
l_p2_p1[:, 2] = -l_p2_p1[:, 2]
else:
raise Exception('Error with boundary-plane indices')
else:
a1 = int(a/p)
b1 = int(b/p)
# Let u0 and v0 any solution of a'u + b'v = c
int_soln1 = bpb.lbi_dioph_soln(a1, b1, c)
u0 = int(int_soln1[0])
v0 = int(int_soln1[1])
# z0, t0 any solution of cz + pt = d
int_soln2 = bpb.lbi_dioph_soln(c, p, d)
z0 = int(int_soln2[0])
t0 = int(int_soln2[1])
# x0, y0 any solution of a'x + b'y = t0
int_soln3 = bpb.lbi_dioph_soln(a1, b1, t0)
x0 = int(int_soln3[0])
y0 = int(int_soln3[1])
# The general solution of ax + by + cz = d is :
# x = x0 + b'k - u0m
# y = y0 - a'k - v0m
# z = z0 + pm with k and m any integer in \mathbb{Z}
tn1 = 10
ival = np.arange(-(tn1), tn1+1)
k1, m1 = np.meshgrid(ival, ival)
k1 = k1.flatten()
m1 = m1.flatten()
x = x0 + b1*k1 - u0*m1
y = y0 - a1*k1 - v0*m1
z = z0 + p*m1
l2_val = x**2 + y**2 + z**2
ind1 = np.where(l2_val == np.min(l2_val))[0][0]
y1 = x[ind1]
y2 = y[ind1]
y3 = z[ind1]
l_p2_p1 = (np.vstack((a_vec, b_vec, np.array([y1, y2, y3])))).transpose()
det1 = nla.det(l_p2_p1)
if (np.abs(det1-1) > (1e-10*np.max(np.abs(l_p2_p1)))):
raise Exception('Error with Diophantine solution')
else:
if det1 == -1:
l_p2_p1[:, 2] = -l_p2_p1[:, 2]
return (l_p2_p1).astype(int)
def compute_rCut(l2d_bp_po):
"""
Given two vectors in the interface plane, compute the
maximum of the norms of the two vectors.
Parameters
-----------------
l2d_bpb_po: numpy.array
The two vectors, expressed in the **po** reference frame,
that define the two-dimensional box vectors of the interface.
Returns
------------
rCut: float
The cut-off radius for replicating the lattice basis.
"""
bv1 = l2d_bp_po[:, 0]
bv2 = l2d_bp_po[:, 1]
l1 = nla.norm(bv1)
l2 = nla.norm(bv2)
l3 = nla.norm((bv1+bv2))
rCut = np.max([l1, l2, l3])
return rCut
def compute_orientation(l2d_bp_po):
"""
Find the orientation of the lattice **l_po1_go**, such that the
vectors in the **p1** lattice given by **l2d_bp_po** to line up
with x-axis in the xy-plane.
Parameters
-----------------
l2d_bpb_po: numpy.array
The two vectors, expressed in the **po** reference frame,
that define the two-dimensional box vectors of the interface.
Returns
------------
nla.inv(l_po1_go): numpy.array
Orientation of the lattice in p1 which line up with x-axis in the xy-plane.
"""
bv1 = l2d_bp_po[:, 0]
bv2 = l2d_bp_po[:, 1]
l1 = nla.norm(bv1)
l2 = nla.norm(bv2)
# Orientation
l1_uvec = bv1/l1
l2_uvec = bv2/l2
x_vec = np.copy(l1_uvec)
y1_vec = np.copy(l2_uvec)
z_vec = np.cross(x_vec, y1_vec)
z_vec = z_vec/nla.norm(z_vec)
y_vec = np.cross(z_vec, x_vec)
# l_po1_go = (x_vec.row_join(y_vec)).row_join(z_vec)
l_po1_go = (np.vstack((x_vec, y_vec, z_vec))).transpose()
return nla.inv(l_po1_go)
def compute_hkl_p(l2d_bp_po, l_p_po):
"""
Find the **(hkl)** indices of the plane defined the vectors
in the matrix **l2d_bp_po**.
Parameters
-----------------
l2d_bpb_po: numpy.array
The two vectors, expressed in the **po** reference frame,
that define the two-dimensional box vectors of the interface.
l_p_po: numpy array
The primitive basis vectors of the underlying lattice in the orthogonal
reference frame.
Returns
------------
nuI_vec_rp: numpy.array
The (hkl) indices of the plane defined the vectors
in the matrix **l2d_bp_po**.
"""
avec_po = l2d_bp_po[:, 0]
bvec_po = l2d_bp_po[:, 1]
nvec = np.cross(avec_po, bvec_po)
nu_vec = nvec/nla.norm(nvec)
l_rp_po = fcd.reciprocal_mat(l_p_po)
l_po_rp = nla.inv(l_rp_po)
nu_vec_rp = l_po_rp.dot(nu_vec)
nuI_vec_rp, tm1 = iman.int_approx(nu_vec_rp)
nuI_vec_rp = nuI_vec_rp.astype(int)
return nuI_vec_rp
def num_rep_2d(xvec, yvec, rCut):
"""
Find the number of replications necessary such that the
2D-circle of radius r_cut at the center of the primitive-cell
lies completely inside the super-cell.
Parameters
-----------------
xvec : numpy array
The basis vector in x direction in x-z plane
yvec : numpy array
The basis vector in z direction in x-z plane
rCut : float
Cut-off radius for computing Delaunay triangulations
Returns
------------
[int(m_x), int(m_y)] : list
int(m_x) is the number of replications in x direction, int(m_y)
is the number of replication in z direction.
"""
b1 = np.array([0])
# xvec1 = xvec.col_join(Matrix([0]))
xvec1 = np.hstack((xvec, b1))
# yvec1 = yvec.col_join(Matrix([0]))
yvec1 = np.hstack((yvec, b1))
# c_vec_norm = (xvec1.cross(yvec1)).norm()
c_vec_norm = nla.norm(np.cross(xvec1, yvec1))
# d_y = c_vec_norm/(yvec.norm())
d_y = c_vec_norm/(nla.norm(yvec))
# d_x = c_vec_norm/(xvec.norm())
d_x = c_vec_norm/(nla.norm(xvec))
# m_x = (rCut/d_y).ceiling()
m_x = np.ceil(rCut/d_y)
# m_y = (rCut/d_x).ceiling()
m_y = np.ceil(rCut/d_x)
return [int(m_x), int(m_y)]
def replicate_pts(l_bpb_po, rCut):
"""
Replicate the basis, enough times, such that the 2D-circle
of radius r_cut is completely inside the replicated set.
Parameters
-----------------
l_bpb_po: numpy.array
2D basis vector in po
rCut : float
Cut-off radius for computing Delaunay triangulations
Returns
------------
twoD_pts: numpy.ndarray
The coordinates of the replicated points
"""
bx = l_bpb_po[:, 0]
by = l_bpb_po[:, 1]
mx, my = num_rep_2d(bx, by, rCut)
mx1 = np.arange(-mx, mx+1)
my1 = np.arange(-my, my+1)
mx2, my2 = np.meshgrid(mx1, my1)
mx3 = mx2.flatten()
my3 = my2.flatten()
num1 = np.size(mx3)
twoD_pts = np.zeros((num1, 2))
bx1 = bx.reshape(1, 2)
by1 = by.reshape(1, 2)
for ct1 in range(num1):
mx_val = mx3[ct1]
my_val = my3[ct1]
twoD_pts[ct1, :] = (mx_val*bx1 + my_val*by1)
return twoD_pts
def change_basis(twoD_pts, l_bpb_po):
"""
Express the points in **twoD_pts** array in the reference frame
of **l_bpb_po**.
Parameters
-----------------
twoD_pts: numpy.ndarray
The coordinates of the replicated points
l_bpb_po: numpy.array
2D basis vector in po
Returns
------------
(mat1.dot(twoD_pts.transpose())).transpose(): numpy.ndarray
The coordinates of replicated points in the referance frame of l_bpb_po
"""
# mat1 = (l_bpb_po).inv()
mat1 = nla.inv(l_bpb_po)
# return np.array((mat1*(Matrix(twoD_pts.transpose()))).transpose(), dtype='double')
return (mat1.dot(twoD_pts.transpose())).transpose()
def cut_box_pts(twoD_pts, tol=1e-8):
"""
Remove all the points, in 2D, that lie outside the 2D box.
Parameters
-----------------
twoD_pts: numpy.ndarray
The coordinates of the replicated points
tol: float
User defined tolerance
Returns
------------
twoD_pts[tind1,:]: numpy.ndarray
The coordinates of replicated points which lies inside the 2D box
"""
tx1 = twoD_pts[:, 0]
ty1 = twoD_pts[:, 1]
cond1 = (tx1 >= 0-tol)
cond2 = (tx1 <= 1+tol)
cond3 = (ty1 >= 0-tol)
cond4 = (ty1 <= 1+tol)
tind1 = np.where(cond1 & cond2 & cond3 & cond4)[0]
return twoD_pts[tind1, :]
def knnsearch_v1(X, Y):
"""
Given two set of points **X** and **Y**, find the nearest neighbor in
**X** for each query point in **Y** and return the indices of
the nearest neighbors in Idx, a column vector. Idx has the same
number of rows as Y. Additionally returns the column vector **D** that
contains the nearest-neighbor distances.
Parameters
-----------------
X: numpy.ndarray
set of points
Y: numpy.ndarray
set of points
Returns
------------
Idx: numpy.ndarray
the indices of the nearest neighbors
dval: numpy.ndarray
nearest neighbor distances
"""
num_x = np.shape(X)[0]
num_y = | np.shape(Y) | numpy.shape |
import numpy as np
from torch.utils.data import Dataset
class GridSampler(Dataset):
"""
Adapted from NiftyNet
"""
def __init__(self, data, window_size, border):
self.array = data
self.locations = self.grid_spatial_coordinates(
self.array,
window_size,
border,
)
def __len__(self):
return len(self.locations)
def __getitem__(self, index):
# Assume 3D
location = self.locations[index]
i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location
window = self.array[:,i_ini:i_fin, j_ini:j_fin, k_ini:k_fin]
#window = window[np.newaxis, ...] # add channels dimension
sample = dict(
image=window,
location=location,
)
return sample
@staticmethod
def _enumerate_step_points(starting, ending, win_size, step_size):
starting = max(int(starting), 0)
ending = max(int(ending), 0)
win_size = max(int(win_size), 1)
step_size = max(int(step_size), 1)
if starting > ending:
starting, ending = ending, starting
sampling_point_set = []
while (starting + win_size) <= ending:
sampling_point_set.append(starting)
starting = starting + step_size
additional_last_point = ending - win_size
sampling_point_set.append(max(additional_last_point, 0))
sampling_point_set = np.unique(sampling_point_set).flatten()
if len(sampling_point_set) == 2:
sampling_point_set = np.append(
sampling_point_set, np.round(np.mean(sampling_point_set)))
_, uniq_idx = np.unique(sampling_point_set, return_index=True)
return sampling_point_set[np.sort(uniq_idx)]
@staticmethod
def grid_spatial_coordinates(array, window_shape, border):
shape = array.shape[1:]
num_dims = len(shape)
grid_size = [
max(win_size - 2 * border, 0)
for (win_size, border)
in zip(window_shape, border)
]
steps_along_each_dim = [
GridSampler._enumerate_step_points(
starting=0,
ending=shape[i],
win_size=window_shape[i],
step_size=grid_size[i],
)
for i in range(num_dims)
]
starting_coords = np.asanyarray(np.meshgrid(*steps_along_each_dim))
starting_coords = starting_coords.reshape((num_dims, -1)).T
n_locations = starting_coords.shape[0]
# prepare the output coordinates matrix
spatial_coords = np.zeros((n_locations, num_dims * 2), dtype=np.int32)
spatial_coords[:, :num_dims] = starting_coords
for idx in range(num_dims):
spatial_coords[:, num_dims + idx] = (
starting_coords[:, idx]
+ window_shape[idx]
)
max_coordinates = np.max(spatial_coords, axis=0)[num_dims:]
assert np.all(max_coordinates <= shape[:num_dims]), \
"window size greater than the spatial coordinates {} : {}".format(
max_coordinates, shape)
return spatial_coords
class GridAggregator:
"""
Adapted from NiftyNet
"""
def __init__(self, data, window_border):
self.window_border = window_border
self.output_array = np.full(
data.shape[1:],
fill_value=0.0,
)
@staticmethod
def crop_batch(windows, location, border=None):
if not border:
return windows, location
location = location.astype(np.int)
batch_shape = windows.shape
spatial_shape = batch_shape[2:] # ignore batch and channels dim
num_dimensions = 3
for idx in range(num_dimensions):
location[:, idx] = location[:, idx] + border[idx]
location[:, idx + 3] = location[:, idx + 3] - border[idx]
if | np.any(location < 0) | numpy.any |
import os
import numpy as np
import pandas as pd
import yaml
from . import model as model_lib
from . import training, tensorize, io_local
def main():
#Turn off warnings:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
###Load training data - Put the path to your own data here
training_data_path = "/root/training/training_preprocessed.csv"
training_df = pd.read_csv(training_data_path)
###Dump all Peptides containing selenocystein
training_df = training_df.loc[~training_df.modified_sequence.str.contains("U")]
print("CSV Loaded, shape is {}.".format(training_df.shape))
###Load Untrained Retention Time Model and prepare its training data
iRT_model_dir = "/root/training/iRT/"
iRT_model, iRT_config = model_lib.load(iRT_model_dir, trained=False)
iRT_callbacks = training.get_callbacks(iRT_model_dir)
iRT_raw_mean = training_df.uRT.mean()
iRT_raw_var = training_df.uRT.var()
iRT_config['iRT_rescaling_mean'] = float(iRT_raw_mean)
iRT_config['iRT_rescaling_var'] = float(iRT_raw_var)
with open(iRT_model_dir + "config_new.yml", "w") as config_outfile:
yaml.dump(iRT_config, config_outfile)
###Load Untrained Fragmentation Model and prepare its training data
msms_model_dir = "/root/training/msms/"
msms_model, msms_config = model_lib.load(msms_model_dir, trained=False)
msms_callbacks = training.get_callbacks(msms_model_dir)
#The intensity lists are already in proper order, but might have some missing values and need to be padded to the correct length
#(Only a peptide of the maximal length 29 will have 522 values, but all lists need to be of this length)
intensities_length = 522
print("iRT and Fragmentation Intensity Models Loaded.")
#Compile the models once, and then call fit separately - useful if you lack memory or space and have to partition your training data
training.compile_model(iRT_model, iRT_config)
training.compile_model(msms_model, msms_config)
training_tensorized = tensorize.csv(training_df[['modified_sequence', 'collision_energy', 'precursor_charge']], nlosses=3)
print("CSV Tensorized.")
training_tensorized['prediction'] = np.reshape(
np.asarray((training_df.uRT - iRT_raw_mean) / | np.sqrt(iRT_raw_var) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
This module is used for calculations of the orthonormalization matrix for
the boundary wavelets.
The boundary_wavelets.py package is licensed under the MIT "Expat" license.
Copyright (c) 2019: <NAME> and <NAME>.
"""
# =============================================================================
# Imports
# =============================================================================
import numpy as np
from scipy.integrate import simps
import boundwave.boundary_wavelets as BW
# =============================================================================
# Functions
# =============================================================================
def integral(J, k, l, wavelet_coef, phi):
r'''
This function calculates the integral (16) numerically.
INPUT:
J : int
The scale.
k : int
The translation for the first function.
l : int
The translation for the second function.
wavelet_coef : numpy.float64
The wavelet coefficients, must sum to :math:`\sqrt{2}`.
For Daubechies 2 they can be found using
`np.flipud(pywt.Wavelet('db2').dec_lo)`.
phi : numpy.float64
The phi function, can be made with
`pywt.Wavelet(wavelet).wavefun(level=15)`.
OUTPUT:
out : int
The value of the integral.
'''
a = int(len(wavelet_coef) / 2)
OneStep = len(phi) // (2 * a - 1)
phiNorm = np.linalg.norm(BW.downsample(phi, 0, OneStep, J))
phi1 = BW.downsample(phi, k, OneStep, J) / phiNorm
phi2 = BW.downsample(phi, l, OneStep, J) / phiNorm
phiProd = phi1 * phi2
Integ = simps(phiProd)
return Integ
def m_alpha_beta(alpha, beta, J, wavelet_coef, inte_matrix, Side):
r'''
This function calculates an entry in the martix :math:`M` (15).
INPUT:
alpha : int
alpha
beta : int
beta
J : int
The scale.
wavelet_coef : numpy.float64
The wavelet coefficients, must sum to :math:`\sqrt{2}`. For
Daubechies 2 they can be found using
`np.flipud(pywt.Wavelet('db2').dec_lo`).
inte_matrix : numpy.float64
A matrix with the values for the integrals calculated with
the function :py:func:`integral` for k and l in the
interval [-2*a+2,0] or [2**J-2*a+1,2**J-1].
Side : str
`'L'` for left interval boundary and `'R'` for right
interval boundary.
OUTPUT:
M : numpy.float64
Entry (alpha,beta) of the martix M
'''
a = int(len(wavelet_coef) / 2)
Moment = BW.moments(wavelet_coef, a - 1)
M = 0
if Side == 'L':
interval = range(-2 * a + 2, 1)
i = 0
for k in interval:
j = 0
for m in interval:
M += (BW.inner_product_phi_x(alpha, 0, k, Moment) *
BW.inner_product_phi_x(beta, 0, m, Moment) *
inte_matrix[i, j])
j += 1
i += 1
elif Side == 'R':
interval = range(2**J - 2 * a + 1, 2**J)
i = 0
for k in interval:
j = 0
for m in interval:
M += (BW.inner_product_phi_x(alpha, 0, k, Moment) *
BW.inner_product_phi_x(beta, 0, m, Moment) *
inte_matrix[i, j] * 2**(-J * (alpha + beta)))
j += 1
i += 1
else:
print('You must choose a side')
return M
def ortho_matrix(J, wavelet_coef, phi):
r'''
This function findes the orthogonality matrix :math:`A`. First
uses the functions :py:func:`m_alpha_beta` and :py:func:`integral`
to make the matrix M. Then computes a Cholesky decomposition,
which is then inverted.
INPUT:
J : int
The scale.
wavelet_coef : numpy.float64
The wavelet coefficients, must sum to
:math:`\sqrt{2}`. For Daubechies 2 they can be found using
`np.flipud(pywt.Wavelet('db2').dec_lo)`.
phi : numpy.float64
The phi function, can be made with
`pywt.Wavelet(wavelet).wavefun(level=15)`.
OUTPUT:
AL : numpy.float64
Left orthonormalisation matrix; to be used in
:py:func:`boundwave.boundary_wavelets.boundary_wavelets` or
:py:func:`boundwave.fourier_boundary_wavelets.fourier_boundary_wavelets`.
AR : numpy.float64
Right orthonormalisation matrix; to be used in
:py:func:`boundwave.boundary_wavelets.boundary_wavelets` or
:py:func:`boundwave.fourier_boundary_wavelets.fourier_boundary_wavelets`.
'''
a = int(len(wavelet_coef) / 2)
ML = np.zeros((a, a))
MR = np.zeros((a, a))
InteL = np.zeros((2 * a - 1, 2 * a - 1))
k = 0
for i in range(-2 * a + 2, 1):
m = 0
for j in range(-2 * a + 2, i + 1):
InteL[k, m] = integral(J, i, j, wavelet_coef, phi)
InteL[m, k] = InteL[k, m]
m += 1
k += 1
InteR = np.zeros((2 * a - 1, 2 * a - 1))
k = 0
for i in range(2**J - 2 * a + 1, 2**J):
m = 0
for j in range(2**J - 2 * a + 1, i + 1):
InteR[k, m] = integral(J, i, j, wavelet_coef, phi)
InteR[m, k] = InteR[k, m]
m += 1
k += 1
for i in range(a):
for j in range(i + 1):
ML[i, j] = m_alpha_beta(i, j, J, wavelet_coef, InteL, 'L')
ML[j, i] = ML[i, j]
for i in range(a):
for j in range(i + 1):
MR[i, j] = m_alpha_beta(i, j, J, wavelet_coef, InteR, 'R')
MR[j, i] = MR[i, j]
h = 2**(J * np.arange(a))
CL = | np.linalg.cholesky(ML) | numpy.linalg.cholesky |
import cvxpy as cp
import matplotlib.pyplot as matplt
from utils import *
from test_ddpg import *
from ddpg_alg_spinup import ddpg
import tensorflow as tf
from env_mra import ResourceEnv
import numpy as np
import time
import pickle
import scipy.io
from parameters import *
from functions import *
import multiprocessing
def admm_static_algorithm(SliceNum, UENum, RESNum, alpha, weight,):
################################## static allocation ####################################################
real_utility_static = np.zeros(SliceNum)
for i in range(SliceNum):
tmp_utility, tmp_real_utility, x = np.zeros(RESNum), np.zeros(RESNum), np.zeros([SliceNum, RESNum, UENum], dtype=np.float32)
for j in range(RESNum):
tmp_utility[j], x[i, j], tmp_real_utility[j] = \
simple_static_alogrithm(z_minus_u=Rmax/SliceNum,
alpha=alpha[i, j],
weight=weight[i],
UENum=UENum,
minReward=minReward/maxTime)
real_utility_static[i] = np.mean(tmp_real_utility) * maxTime
real_utility_static = np.sum(real_utility_static)
return real_utility_static, 0
################################## static allocation ####################################################
def admm_ddpg_algorithm(SliceNum, UENum, RESNum, alpha, weight, INDEX):
################################## ddpg allocation ####################################################
z = np.zeros([SliceNum, RESNum], dtype=np.float32)
u = np.zeros([SliceNum, RESNum], dtype=np.float32)
x = np.zeros([SliceNum, RESNum, UENum], dtype=np.float32)
z_minus_u = z - u
sum_utility, sum_real_utility, sum_gap, sum_x = [], [], [], []
for ite in range(ADMM_iter):
aug_utility = np.zeros(SliceNum)
real_utility = np.zeros(SliceNum)
# x-update in each slice #####################################
for i in range(SliceNum):
aug_utility[i], tmpx, real_utility[i] = load_and_run_policy(agent_id=INDEX[i],
alpha=alpha[i],
weight=weight[i],
UENum=UENum,
RESNum=RESNum,
aug_penalty=z_minus_u[i])
x[i] = Rmax * np.mean(tmpx, axis=0) # mean for all maxTime
sumx = np.sum(x, axis=2) # the sum resource of all users in each slice
for j in range(RESNum):
# z-update #####################################
z[:, j] = optimize_z_function(sumx=sumx[:, j], u=u[:, j],SliceNum=SliceNum)
# u-update #####################################
u = u + (sumx - z)
z_minus_u = | np.clip(z - u, Rmin, Rmax) | numpy.clip |
import joblib
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from utils.bayesian_optimization import Bayesian_Optimization, UtilityFunction
from utils.utils import plot_gp, posterior
from sklearn.preprocessing import normalize
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = "Arial"
file_names = ['results/P_01/optimizer.joblib',
'results/P_02/optimizer.joblib',
'results/P_03/optimizer_no_habituation.joblib',
'results/P_04/optimizer.joblib',
'results/P_05/optimizer.joblib',
]
optimizer_ls = []
for file_name in file_names:
optimizer_ls.append(joblib.load(file_name))
optimizer_habituation = joblib.load('results/P_03/optimizer.joblib')
utility = UtilityFunction(function="ei", hyperparam=1e-1) #0 to 0.1
x = np.linspace(-1.1, 1.3, 50).reshape(-1, 1)
pbounds = {'x': (-1, 1.2)}
kernel = Matern(length_scale=0.2, length_scale_bounds=(1e-1, 1e1), nu=1.5) + WhiteKernel(noise_level=0.5)
# optimizer._gp.set_params(kernel=kernel, normalize_y=True)
LOG_PATH = 'results/'
pos = [0]
neg = [1,2,3,4]
axis= plt.subplot(111)
# Habituation on P_03
x_obs = np.array([[res["params"]["x"]] for res in optimizer_habituation.res])
y_obs = np.array([res["target"] for res in optimizer_habituation.res])
axis.scatter(x_obs, -y_obs, c='black')
axis.set_title('Individual statistical models for participant P_03',
pad=50,
fontdict={'size': 15, 'fontname':'Arial'},
weight="bold")
color = ['#001fcc', '#4d67ff','#8093ff']
for i in range(3):
x_obs = np.array([[res["params"]["x"]] for res in optimizer_habituation.res[:7+i]])
y_obs = np.array([res["target"] for res in optimizer_habituation.res[:7+i]])
axis.scatter(x_obs[-1], -y_obs[-1], c=color[i])
# Normalize data
y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)
y_obs = y_obs.flatten()
mu, sigma = posterior(optimizer_habituation, x_obs, y_obs, x)
# Rescale data
mu = mu*norm
mu = -mu
sigma = -sigma
sigma = sigma*norm
y_obs = y_obs*norm
axis.plot(x, mu, c=color[i])
plt.legend(['After 7 iterations', '8th iteration', '9th iteration'])
right_side = axis.spines["right"]
right_side.set_visible(False)
right_side = axis.spines["top"]
right_side.set_visible(False)
plt.xticks([-1,0.1,1.2],['stranger','50% mother', 'mother'])
plt.show()
plt.savefig('results/image_paper2.png', transparent=True)
# Code to generate Figure 4
mu_ls = []
std_ls = []
y_obs_ls = []
x_obs_ls = []
for it in range(len(optimizer_ls)):
x_obs = np.array([[res["params"]["x"]] for res in optimizer_ls[it].res])
y_obs = np.array([res["target"] for res in optimizer_ls[it].res])
# Normalize data
y_obs, norm = normalize(y_obs.reshape(1, -1), return_norm=True)
y_obs = y_obs.flatten()
mu, sigma = posterior(optimizer_ls[it], x_obs, y_obs, x)
# Rescale data
mu = mu*norm
if it in neg:
mu = -mu
sigma = -sigma
sigma = sigma*norm
y_obs = y_obs*norm
mu_ls.append(mu)
std_ls.append(sigma)
y_obs_ls.append(y_obs)
x_obs_ls.append(x_obs)
# plotting all data distinguishing by participant and polarity
participant = ['--', '-.', '-', '.', ':']
# legend = ['BP_15', 'BP_16', 'BP_18', 'BP_18', 'BP_20', 'BP_20', 'BP_21', 'BP_21']
legend = ['P_01', 'P_02', 'P_03', 'P_04', 'P_05']
polarity = ['red', 'blue', 'blue', 'blue', 'blue']
axis = plt.subplot(111)
new_legend = []
for it in range(len(file_names)):
axis.plot(x, mu_ls[it], participant[it], c=polarity[it])
new_legend.append(legend[it])
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([mu_ls[it] - std_ls[it]/y_obs_ls[it].shape[0], (mu_ls[it] + std_ls[it]/y_obs_ls[it].shape[0])[::-1]]),
alpha=.1, c=polarity[it], fc='k', ec='None', label='95% confidence interval')
plt.legend(new_legend)
plt.xticks([-1,0.1,1.2],['stranger','50% mother', 'mother'])
plt.legend(new_legend)
plt.xticks([-1,0.1,1.2],['stranger','50% mother', 'mother'])
axis.set_title('Individual statistical models to the participants target response',
pad=30,
fontdict={'size': 15, 'fontname':'Arial'},
weight="bold")
plt.ylabel('Nc Amplitude ($\\mu$V)', fontdict={'fontname':'Arial'})
right_side = axis.spines["right"]
right_side.set_visible(False)
right_side = axis.spines["top"]
right_side.set_visible(False)
plt.show()
# Creating Figure 4
from matplotlib import gridspec
y=None
hps = [1e-1, 5e-1, 5e0]
x_obs = np.array([[res["params"]["x"]] for res in optimizer_ls[2].res[:4]])
y_obs = | np.array([res["target"] for res in optimizer_ls[2].res[:4]]) | numpy.array |
#List of functions :
# colorsGraphs(df, feature, genderConfidence = 1, nbToRemove = 1)
# text_normalizer(s)
# compute_bag_of_words(text)
# print_most_frequent(bow, vocab, gender, n=20)
# model_test(model,X_train,y_train,X_test,y_test, full_voc, displayResults = True, displayColors = False)
# predictors(df, feature, model, modelname, displayResults = True, displayColors = False)
# test_external_data(text, full_voc, model)
# combine_features(model_text, model_pic, model_color, data, voc_text, voc_pic, voc_color, acc_text, acc_pic, acc_color)
import pandas as pd
import numpy as np
from IPython.display import display
import re
#graph
from bokeh.plotting import output_notebook, figure, show
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import ndimage
from matplotlib import pyplot as plt
# 3D visualization
import pylab
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from IPython.display import display
from sklearn import linear_model, metrics
from sklearn import naive_bayes
from sklearn import neural_network
#Definition of function for data exploration for the colors
#feature : 'sidebar_color', 'link_color'
# The colorGraphs function plots the most used colors by gender in 3 bar graphs
def colorsGraphs(df, feature, genderConfidence = 1, nbToRemove = 1):
dfCol = df.loc[:,['gender:confidence', 'gender', feature]] #Remove weird values : E+17...
dfColFiltered = dfCol[(dfCol['gender:confidence'] >= genderConfidence)&((dfCol[feature]).str.contains('E\+') != True)]
dfColFilteredMale = dfColFiltered[dfColFiltered['gender'] == 'male']
dfColFilteredFemale = dfColFiltered[dfColFiltered['gender'] == 'female']
dfColFilteredBrand = dfColFiltered[dfColFiltered['gender'] == 'brand']
colorMale = dfColFilteredMale[feature]
colorFemale = dfColFilteredFemale[feature]
colorBrand = dfColFilteredBrand[feature]
listMale = list(colorMale.values.flatten())
listFemale = list(colorFemale.values.flatten())
listBrand = list(colorBrand.values.flatten())
nCommon = 30
commonFemale = Counter(listFemale).most_common(nCommon)
commonMale = Counter(listMale).most_common(nCommon)
commonBrand = Counter(listBrand).most_common(nCommon)
#print(commonBrand[0])
del commonFemale[0:nbToRemove]
del commonMale[0:nbToRemove]
del commonBrand[0:nbToRemove]
colorsFemale = [x[0] for x in commonFemale]
colorsMale = [x[0] for x in commonMale]
colorsBrand = [x[0] for x in commonBrand]
colorsNumbFemale = [x[1] for x in commonFemale]
colorsNumbMale = [x[1] for x in commonMale]
colorsNumbBrand = [x[1] for x in commonBrand]
colorsHexFemale = ['#' + x + '000000' for x in colorsFemale]
colorsHexFemale = [x[0:7] for x in colorsHexFemale]
colorsHexMale = ['#' + x + '000000' for x in colorsMale]
colorsHexMale = [x[0:7] for x in colorsHexMale]
colorsHexBrand = ['#' + x + '000000' for x in colorsBrand]
colorsHexBrand = [x[0:7] for x in colorsHexBrand]
rangeColFemale = list(range(len(colorsFemale)))
rangeColMale = list(range(len(colorsMale)))
rangeColBrand = list(range(len(colorsBrand)))
fig1, ax1 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColFemale, colorsNumbFemale, bar_width, label = 'Female', color = colorsHexFemale)
plt.yticks(rangeColFemale, colorsHexFemale)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Females for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
fig2, ax2 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColMale, colorsNumbMale, bar_width, label = 'Male', color = colorsHexMale)
plt.yticks(rangeColMale, colorsHexMale)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Males for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
fig3, ax3 = plt.subplots()
bar_width = 0.5
rects1 = plt.barh(rangeColBrand, colorsNumbBrand, bar_width, label = 'Brand', color = colorsHexBrand)
plt.yticks(rangeColBrand, colorsHexBrand)
plt.xlabel('Color')
plt.ylabel(feature)
plt.title('Most used colors by Brands for ' + feature + '\n' + str(nbToRemove) + ' most common occurences removed')
plt.tight_layout()
plt.show()
def text_normalizer(s):
#we will normalize the text by using strings, lowercases and removing all the punctuations
s = str(s)
s = s.lower()
s = re.sub('\W\s',' ',s)
s = re.sub('\s\W',' ',s)
#s = re.sub('\s[^[@\w]]',' ',s) #to keep the @ symbols used for "addressing"
#s = re.sub('@',' search_arobass_sign ',s) #The CountVectorizer cant handle the @
s = re.sub('\s+',' ',s) #replace double spaces with single spaces
return s
# The compute_bag_of_words function returns a table with the # of occurence of a word in the text
# and a vocabulary of all the different words
def compute_bag_of_words(text):
vectorizer = CountVectorizer()
vectors = vectorizer.fit_transform(text)
vocabulary = vectorizer.get_feature_names()
return vectors, vocabulary
#Exploration of which words are most used by which gender
def print_most_frequent(bow, vocab, gender, n=20, feature = 'text'):
switcher = {
'all_text' : "text",
'pic_text' : "profile picture features",
}
featureText = switcher.get(feature, 'text')
color_idx = ['brand', 'female', 'male']
color_table = ['#4a913c', '#f5abb5', '#0084b4']
label_table = ['Most used words by brands for ' + featureText, 'Most used words by females for ' + featureText, 'Most used words by males for ' + featureText]
idx = np.argsort(bow.sum(axis=0))
idx_most_used = np.zeros(n)
occurence_number = np.zeros(n)
words_most_used = ["" for x in range(n)]
for i in range(0,n):
idx_most_used[i] = idx[0, -1-i]
words_most_used[i] = vocab[np.int64(idx_most_used[i])]
occurence_number[i] = (bow.sum(axis=0))[0,idx_most_used[i]]
#print(vocab[j])
fig, ax = plt.subplots()
bar_width = 0.5
word_number = np.arange(n)+1
rects1 = plt.barh(word_number,occurence_number, bar_width, label = label_table[color_idx.index(gender)], color = color_table[color_idx.index(gender)])
plt.yticks(word_number,words_most_used)
plt.ylabel('Most used words')
plt.xlabel('Number of occurences')
plt.title(label_table[color_idx.index(gender)])
plt.tight_layout()
plt.show()
# Definition of functions for data analysis and classification
# The model_test function is used to extract the best word predictors and
# anti-predictors for each gender. The model used must have a coef_ attribute
# representing the weight of each word
def model_test(model,X_train,y_train,X_test,y_test, full_voc, displayResults = True, displayColors = False, featureIntent = 'text'):
switcher = {
'all_text' : "text",
'pic_text' : "profile picture features",
'link_color' : "theme color",
}
featureText = switcher.get(featureIntent, '')
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
# compute MSE
mse = metrics.mean_squared_error(y_test,y_pred)
print('mse: {:.4f}'.format(mse))
# Prints the accuracy of the gender prediction
acc = model.score(X_test,y_test)
print('score: ', acc)
if(displayResults&hasattr(model,'coef_')):
# W contain the weight for each predictor, for each gender
W = model.coef_
# Male Predictors
print('Best 20 male predictors:')
idx_male = np.argsort((W[2,:]))
weight_male_pred = np.zeros(20)
male_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_male[-1-i]
weight_male_pred[i] = W[2,j]
male_pred_label[i] = full_voc[j]
fig1, ax1 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexMale = ['#' + x + '000000' for x in male_pred_label]
colorsHexMale = [x[0:7] for x in colorsHexMale]
rects1 = plt.barh(pred_number,weight_male_pred, bar_width, label = 'Male Predictors', color = colorsHexMale)
plt.yticks(pred_number,colorsHexMale)
else:
rects1 = plt.barh(pred_number,weight_male_pred, bar_width, label = 'Male Predictors', color = '#0084b4')
plt.yticks(pred_number,male_pred_label)
plt.xlabel('Predictor')
plt.ylabel('Weight')
plt.title('Best 20 male predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Male Anti-Predictors
print('Best 20 male anti-predictors for ' + featureText + ':')
idx_male = np.argsort(-(W[2,:]))
weight_male_antipred = np.zeros(20)
male_antipred_label = ["" for x in range(20)]
for i in range(20):
j = idx_male[-1-i]
weight_male_antipred[i] = W[2,j]
male_antipred_label[i] = full_voc[j]
fig2, ax2 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexMaleAnti = ['#' + x + '000000' for x in male_antipred_label]
colorsHexMaleAnti = [x[0:7] for x in colorsHexMaleAnti]
rects1 = plt.barh(pred_number,weight_male_antipred, bar_width, label = 'Male Anti-Predictors', color = colorsHexMaleAnti)
plt.yticks(pred_number,colorsHexMaleAnti)
else:
rects1 = plt.barh(pred_number,weight_male_antipred, bar_width, label = 'Male Anti-Predictors', color = '#0084b4')
plt.yticks(pred_number,male_antipred_label)
plt.xlabel('Anti-Predictor')
plt.ylabel('Weight')
plt.title('Best 20 male anti-predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Female Predictors
print('Best 20 female predictors for ' + featureText + ':')
idx_female = np.argsort((W[1,:]))
weight_female_pred = np.zeros(20)
female_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_female[-1-i]
weight_female_pred[i] = W[1,j]
female_pred_label[i] = full_voc[j]
fig3, ax3 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexFemale = ['#' + x + '000000' for x in female_pred_label]
colorsHexFemale = [x[0:7] for x in colorsHexFemale]
rects1 = plt.barh(pred_number,weight_female_pred, bar_width, label = 'Female Predictors', color = colorsHexFemale)
plt.yticks(pred_number,colorsHexFemale)
else:
rects1 = plt.barh(pred_number,weight_female_pred, bar_width, label = 'Female Predictors', color = '#f5abb5')
plt.yticks(pred_number,female_pred_label)
plt.xlabel('Predictor')
plt.ylabel('Weight')
plt.title('Best 20 Female predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Female Anti-Predictors
print('Best 20 Female anti-predictors for ' + featureText + ':')
idx_female = np.argsort(-(W[1,:]))
weight_female_antipred = np.zeros(20)
female_antipred_label = ["" for x in range(20)]
for i in range(20):
j = idx_female[-1-i]
weight_female_antipred[i] = W[1,j]
female_antipred_label[i] = full_voc[j]
fig4, ax4 = plt.subplots()
bar_width = 0.5
pred_number = np.arange(20)+1
if(displayColors):
colorsHexFemaleAnti = ['#' + x + '000000' for x in female_antipred_label]
colorsHexFemaleAnti = [x[0:7] for x in colorsHexFemaleAnti]
rects1 = plt.barh(pred_number,weight_female_antipred, bar_width, label = 'Female Anti-Predictors', color = colorsHexFemaleAnti)
plt.yticks(pred_number,colorsHexFemaleAnti)
else:
rects1 = plt.barh(pred_number,weight_female_antipred, bar_width, label = 'Female Anti-Predictors', color = '#f5abb5')
plt.yticks(pred_number,female_antipred_label)
plt.xlabel('Anti-Predictor')
plt.ylabel('Weight')
plt.title('Best 20 Female anti-predictors for ' + featureText)
plt.tight_layout()
plt.show()
# Brand Predictors
print('Best 20 brand predictors for ' + featureText + ':')
idx_brand = np.argsort((W[0,:]))
weight_brand_pred = np.zeros(20)
brand_pred_label = ["" for x in range(20)]
for i in range(20):
j = idx_brand[-1-i]
weight_brand_pred[i] = W[0,j]
brand_pred_label[i] = full_voc[j]
fig5, ax5 = plt.subplots()
bar_width = 0.5
pred_number = | np.arange(20) | numpy.arange |
"""
Created on Mon Jun 24 10:52:25 2019
Reads a wav file with SDR IQ capture of FM stations located in :
https://mega.nz/#F!3UUUnSiD!WLhWZ3ff4f4Pi7Ko_zcodQ
Also: https://drive.google.com/open?id=1itb_ePcPeDRXrVBIVL-1Y3wrt8yvpW28
Also generates IQ stream sampled at 2.4Msps to simulate a similar spectrum
sinusoids, this might be useful in an early stage to use a known signal.
@author: f.divruno
"""
#!/usr/bin/env python3
import wave
import numpy as np
import matplotlib.pyplot as plt
# ------------ PARAMETERS
N = 5000 #number of samples to read
nAverages = 10 # number of averages
#folder = "C:\\Users\\F.Divruno\\Downloads\\" # change this to your folder.
#filename = "17-22-08_89100kHz.wav"
folder = "FM_station_data/"
filename = "17-22-08_89100kHz.wav"
CenterFrequency = 89100e3 # Centre freq of the recording is the number at the end of the filename.
# ------------
#Read an IQ recording of FM stations:
wav_in = wave.open(folder+ filename, "r")
sampleFreq = 2.4e6 # sample freq of the SDR to acquire this signals
timeMax = N/sampleFreq # duration of the loaded signals
t = np.linspace(0,timeMax,N)
# Read the file
I = np.zeros(N)
Q = np.zeros(N)
for n in range(N):
aux = wav_in.readframes(1)
I[n] = aux[0]
Q[n] = aux[1]
# Plot the spectrum of the recording
I_fft = np.fft.fftshift(np.fft.fft(I))
Q_fft = np.fft.fftshift(np.fft.fft(Q))
V = abs(I_fft-1j*Q_fft)
freq = np.fft.fftshift(np.fft.fftfreq(N,d=1/sampleFreq) + CenterFrequency)
plt.figure()
plt.subplot(2,1,1)
plt.plot(freq/1e6,20*np.log10(V))
plt.xlabel('MHz')
plt.ylabel('dB')
plt.title('Recording')
#test signal generated with tone signals
I = np.zeros(N)
Q = | np.zeros(N) | numpy.zeros |
# pylint: disable=invalid-name,too-many-lines
"""Density estimation functions for ArviZ."""
import warnings
import numpy as np
from scipy.fftpack import fft
from scipy.optimize import brentq
from scipy.signal import convolve, convolve2d, gaussian # pylint: disable=no-name-in-module
from scipy.sparse import coo_matrix
from scipy.special import ive # pylint: disable=no-name-in-module
from ..utils import _cov, _dot, _stack, conditional_jit
__all__ = ["kde"]
def _bw_scott(x, x_std=None, **kwargs): # pylint: disable=unused-argument
"""Scott's Rule."""
if x_std is None:
x_std = np.std(x)
bw = 1.06 * x_std * len(x) ** (-0.2)
return bw
def _bw_silverman(x, x_std=None, **kwargs): # pylint: disable=unused-argument
"""Silverman's Rule."""
if x_std is None:
x_std = np.std(x)
q75, q25 = np.percentile(x, [75, 25])
x_iqr = q75 - q25
a = min(x_std, x_iqr / 1.34)
bw = 0.9 * a * len(x) ** (-0.2)
return bw
def _bw_isj(x, grid_counts=None, x_std=None, x_range=None):
"""Improved Sheather-Jones bandwidth estimation.
Improved Sheather and Jones method as explained in [1]_.
This is an internal version pretended to be used by the KDE estimator.
When used internally computation time is saved because things like minimums,
maximums and the grid are pre-computed.
References
----------
.. [1] Kernel density estimation via diffusion.
<NAME>, <NAME>, and <NAME>.
Ann. Statist. 38 (2010), no. 5, 2916--2957.
"""
x_len = len(x)
if x_range is None:
x_min = np.min(x)
x_max = np.max(x)
x_range = x_max - x_min
# Relative frequency per bin
if grid_counts is None:
x_std = np.std(x)
grid_len = 256
grid_min = x_min - 0.5 * x_std
grid_max = x_max + 0.5 * x_std
grid_counts, _, _ = histogram(x, grid_len, (grid_min, grid_max))
else:
grid_len = len(grid_counts) - 1
grid_relfreq = grid_counts / x_len
# Discrete cosine transform of the data
a_k = _dct1d(grid_relfreq)
k_sq = np.arange(1, grid_len) ** 2
a_sq = a_k[range(1, grid_len)] ** 2
t = _root(_fixed_point, x_len, args=(x_len, k_sq, a_sq), x=x)
h = t ** 0.5 * x_range
return h
def _bw_experimental(x, grid_counts=None, x_std=None, x_range=None):
"""Experimental bandwidth estimator."""
bw_silverman = _bw_silverman(x, x_std=x_std)
bw_isj = _bw_isj(x, grid_counts=grid_counts, x_range=x_range)
return 0.5 * (bw_silverman + bw_isj)
def _bw_taylor(x):
"""Taylor's rule for circular bandwidth estimation.
This function implements a rule-of-thumb for choosing the bandwidth of
a von Mises kernel density estimator that assumes the underlying
distribution is von Mises as introduced in [1]_.
It is analogous to Scott's rule for the Gaussian KDE.
Circular bandwidth has a different scale from linear bandwidth.
Unlike linear scale, low bandwidths are associated with oversmoothing
while high values are associated with undersmoothing.
References
----------
.. [1] <NAME> (2008). Automatic bandwidth selection for circular
density estimation.
Computational Statistics and Data Analysis, 52, 7, 3493–3500.
"""
x_len = len(x)
kappa = _kappa_mle(x)
num = 3 * x_len * kappa ** 2 * ive(2, 2 * kappa)
den = 4 * np.pi ** 0.5 * ive(0, kappa) ** 2
return (num / den) ** 0.4
_BW_METHODS_LINEAR = {
"scott": _bw_scott,
"silverman": _bw_silverman,
"isj": _bw_isj,
"experimental": _bw_experimental,
}
def _get_bw(x, bw, grid_counts=None, x_std=None, x_range=None):
"""Compute bandwidth for a given data `x` and `bw`.
Also checks `bw` is correctly specified.
Parameters
----------
x : 1-D numpy array
1 dimensional array of sample data from the
variable for which a density estimate is desired.
bw: int, float or str
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth.
Returns
-------
bw: float
Bandwidth
"""
if isinstance(bw, bool):
raise ValueError(
(
"`bw` must not be of type `bool`.\n"
"Expected a positive numeric or one of the following strings:\n"
"{}."
).format(list(_BW_METHODS_LINEAR.keys()))
)
if isinstance(bw, (int, float)):
if bw < 0:
raise ValueError("Numeric `bw` must be positive.\nInput: {:.4f}.".format(bw))
elif isinstance(bw, str):
bw_lower = bw.lower()
if bw_lower not in _BW_METHODS_LINEAR.keys():
raise ValueError(
(
"Unrecognized bandwidth method.\n" "Input is: {}.\n" "Expected one of: {}."
).format(bw_lower, list(_BW_METHODS_LINEAR.keys()))
)
bw_fun = _BW_METHODS_LINEAR[bw_lower]
bw = bw_fun(x, grid_counts=grid_counts, x_std=x_std, x_range=x_range)
else:
raise ValueError(
(
"Unrecognized `bw` argument.\n"
"Expected a positive numeric or one of the following strings:\n"
"{}."
).format(list(_BW_METHODS_LINEAR.keys()))
)
return bw
def _vonmises_pdf(x, mu, kappa):
"""Calculate vonmises_pdf."""
if kappa <= 0:
raise ValueError("Argument 'kappa' must be positive.")
pdf = 1 / (2 * np.pi * ive(0, kappa)) * np.exp(np.cos(x - mu) - 1) ** kappa
return pdf
def _a1inv(x):
"""Compute inverse function.
Inverse function of the ratio of the first and
zeroth order Bessel functions of the first kind.
Returns the value k, such that a1inv(x) = k, i.e. a1(k) = x.
"""
if 0 <= x < 0.53:
return 2 * x + x ** 3 + (5 * x ** 5) / 6
elif x < 0.85:
return -0.4 + 1.39 * x + 0.43 / (1 - x)
else:
return 1 / (x ** 3 - 4 * x ** 2 + 3 * x)
def _kappa_mle(x):
mean = _circular_mean(x)
kappa = _a1inv(np.mean(np.cos(x - mean)))
return kappa
def _dct1d(x):
"""Discrete Cosine Transform in 1 Dimension.
Parameters
----------
x : numpy array
1 dimensional array of values for which the
DCT is desired
Returns
-------
output : DTC transformed values
"""
x_len = len(x)
even_increasing = np.arange(0, x_len, 2)
odd_decreasing = np.arange(x_len - 1, 0, -2)
x = np.concatenate((x[even_increasing], x[odd_decreasing]))
w_1k = np.r_[1, (2 * np.exp(-(0 + 1j) * (np.arange(1, x_len)) * np.pi / (2 * x_len)))]
output = np.real(w_1k * fft(x))
return output
def _fixed_point(t, N, k_sq, a_sq):
"""Calculate t-zeta*gamma^[l](t).
Implementation of the function t-zeta*gamma^[l](t) derived from equation (30) in [1].
References
----------
.. [1] Kernel density estimation via diffusion.
<NAME>, <NAME>, and <NAME>.
Ann. Statist. 38 (2010), no. 5, 2916--2957.
"""
k_sq = np.asfarray(k_sq, dtype=np.float64)
a_sq = np.asfarray(a_sq, dtype=np.float64)
l = 7
f = np.sum(np.power(k_sq, l) * a_sq * np.exp(-k_sq * np.pi ** 2 * t))
f *= 0.5 * np.pi ** (2.0 * l)
for j in np.arange(l - 1, 2 - 1, -1):
c1 = (1 + 0.5 ** (j + 0.5)) / 3
c2 = np.product(np.arange(1.0, 2 * j + 1, 2, dtype=np.float64))
c2 /= (np.pi / 2) ** 0.5
t_j = np.power((c1 * (c2 / (N * f))), (2.0 / (3.0 + 2.0 * j)))
f = np.sum(k_sq ** j * a_sq * np.exp(-k_sq * np.pi ** 2.0 * t_j))
f *= 0.5 * np.pi ** (2 * j)
out = t - (2 * N * np.pi ** 0.5 * f) ** (-0.4)
return out
def _root(function, N, args, x):
# The right bound is at most 0.01
found = False
N = max(min(1050, N), 50)
tol = 10e-12 + 0.01 * (N - 50) / 1000
while not found:
try:
bw, res = brentq(function, 0, 0.01, args=args, full_output=True, disp=False)
found = res.converged
except ValueError:
bw = 0
tol *= 2.0
found = False
if bw <= 0 or tol >= 1:
# warnings.warn(
# "Improved Sheather-Jones did not converge as expected. "
# "Using Silverman's rule instead.",
# Warning
# )
bw = (_bw_silverman(x) / np.ptp(x)) ** 2
return bw
return bw
def _check_type(x):
"""Check the input is of the correct type.
It only accepts numeric lists/numpy arrays of 1 dimension or something that
can be flattened to 1 dimension.
Parameters
----------
x : Object whose type is checked before computing the KDE.
Returns
-------
x : 1-D numpy array
If no error is thrown, a 1 dimensional array of
sample data from the variable for which a density estimate is desired.
"""
# Will raise an error if `x` can't be casted to numeric or flattened to one dimension.
try:
x = np.asfarray(x).flatten()
except Exception as e:
warnings.warn(
"The following exception occurred while trying to convert `x`"
"to a 1 dimensional float array."
)
raise e
x = x[np.isfinite(x)]
if x.size == 0:
raise ValueError("`x` does not contain any finite number.")
if x.size == 1:
raise ValueError("`x` is of length 1. Can't produce a KDE with only one data point.")
return x
def _check_custom_lims(custom_lims, x_min, x_max):
"""Check if `custom_lims` are of the correct type.
It accepts numeric lists/tuples of length 2.
Parameters
----------
custom_lims : Object whose type is checked.
Returns
-------
None: Object of type None
"""
if not isinstance(custom_lims, (list, tuple)):
raise TypeError(
(
"`custom_lims` must be a numeric list or tuple of length 2.\n"
"Not an object of {}."
).format(type(custom_lims))
)
if len(custom_lims) != 2:
raise AttributeError("`len(custom_lims)` must be 2, not {}.".format(len(custom_lims)))
any_bool = any(isinstance(i, bool) for i in custom_lims)
if any_bool:
raise TypeError("Elements of `custom_lims` must be numeric or None, not bool.")
custom_lims = list(custom_lims) # convert to a mutable object
if custom_lims[0] is None:
custom_lims[0] = x_min
if custom_lims[1] is None:
custom_lims[1] = x_max
all_numeric = all(isinstance(i, (int, float, np.integer, np.float)) for i in custom_lims)
if not all_numeric:
raise TypeError(
("Elements of `custom_lims` must be numeric or None.\n" "At least one of them is not.")
)
if not custom_lims[0] < custom_lims[1]:
raise AttributeError("`custom_lims[0]` must be smaller than `custom_lims[1]`.")
return custom_lims
def _get_grid(
x_min, x_max, x_std, extend_fct, grid_len, custom_lims, extend=True, bound_correction=False
):
"""Compute the grid that bins the data used to estimate the density function.
Parameters
----------
x_min : float
Minimum value of the data
x_max: float
Maximum value of the data.
x_std: float
Standard deviation of the data.
extend_fct: bool
Indicates the factor by which `x_std` is multiplied
to extend the range of the data.
grid_len: int
Number of bins
custom_lims: tuple or list
Custom limits for the domain of the density estimation.
Must be numeric of length 2. Overrides `extend`.
extend: bool, optional
Whether to extend the range of the data or not.
Default is True.
bound_correction: bool, optional
Whether the density estimations performs boundary correction or not.
This does not impacts directly in the output, but is used
to override `extend`. Overrides `extend`.
Default is False.
Returns
-------
grid_len: int
Number of bins
grid_min: float
Minimum value of the grid
grid_max: float
Maximum value of the grid
"""
# Set up number of bins.
if grid_len < 100:
grid_len = 100
grid_len = int(grid_len)
# Set up domain
if custom_lims is not None:
custom_lims = _check_custom_lims(custom_lims, x_min, x_max)
grid_min = custom_lims[0]
grid_max = custom_lims[1]
elif extend and not bound_correction:
grid_extend = extend_fct * x_std
grid_min = x_min - grid_extend
grid_max = x_max + grid_extend
else:
grid_min = x_min
grid_max = x_max
return grid_min, grid_max, grid_len
def kde(x, circular=False, **kwargs):
"""One dimensional density estimation.
It is a wrapper around `kde_linear()` and `kde_circular()`.
Parameters
----------
x : 1D numpy array
Data used to calculate the density estimation.
Theoritically it is a random sample obtained from $f$,
the true probability density function we aim to estimate.
circular: bool, optional
Whether `x` is a circular variable or not. Defaults to False.
**kwargs: Arguments passed to `kde_linear()` and `kde_circular()`.
See their documentation for more info.
Returns
-------
grid : Gridded numpy array for the x values.
pdf : Numpy array for the density estimates.
bw: optional, the estimated bandwidth.
Examples
--------
Default density estimation for linear data
.. plot::
:context: close-figs
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from arviz import kde
>>>
>>> rvs = np.random.gamma(shape=1.8, size=1000)
>>> grid, pdf = kde(rvs)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for linear data with Silverman's rule bandwidth
.. plot::
:context: close-figs
>>> grid, pdf = kde(rvs, bw="silverman")
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for linear data with scaled bandwidth
.. plot::
:context: close-figs
>>> # bw_fct > 1 means more smoothness.
>>> grid, pdf = kde(rvs, bw_fct=2.5)
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for linear data with extended limits
.. plot::
:context: close-figs
>>> grid, pdf = kde(rvs, bound_correction=False, extend=True, extend_fct=0.5)
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for linear data with custom limits
.. plot::
:context: close-figs
# It accepts tuples and lists of length 2.
>>> grid, pdf = kde(rvs, bound_correction=False, custom_lims=(0, 10))
>>> plt.plot(grid, pdf)
>>> plt.show()
Default density estimation for circular data
.. plot::
:context: close-figs
>>> rvs = np.random.vonmises(mu=np.pi, kappa=1, size=500)
>>> grid, pdf = kde(rvs, circular=True)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for circular data with scaled bandwidth
.. plot::
:context: close-figs
>>> rvs = np.random.vonmises(mu=np.pi, kappa=1, size=500)
>>> # bw_fct > 1 means less smoothness.
>>> grid, pdf = kde(rvs, circular=True, bw_fct=3)
>>> plt.plot(grid, pdf)
>>> plt.show()
Density estimation for circular data with custom limits
.. plot::
:context: close-figs
>>> # This is still experimental, does not always work.
>>> rvs = np.random.vonmises(mu=0, kappa=30, size=500)
>>> grid, pdf = kde(rvs, circular=True, custom_lims=(-1, 1))
>>> plt.plot(grid, pdf)
>>> plt.show()
See Also
--------
plot_kde : Compute and plot a kernel density estimate.
arviz.stats.density_utils.kde: Arviz KDE estimator
"""
if circular:
kde_fun = _kde_circular
else:
kde_fun = _kde_linear
return kde_fun(x, **kwargs)
def _kde_linear(
x,
bw="experimental",
adaptive=False,
extend=False,
bound_correction=True,
extend_fct=0,
bw_fct=1,
bw_return=False,
custom_lims=None,
cumulative=False,
grid_len=512,
**kwargs, # pylint: disable=unused-argument
):
"""One dimensional density estimation for linear data.
Given an array of data points `x` it returns an estimate of
the probability density function that generated the samples in `x`.
Parameters
----------
x : 1D numpy array
Data used to calculate the density estimation.
Theoritically it is a random sample obtained from $f$,
the true probability density function we aim to estimate.
bw: int, float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental".
Defaults to "experimental".
adaptive: boolean, optional
Indicates if the bandwidth is adaptative or not.
It is the recommended approach when there are multiple modalities
with different spread.
It is not compatible with convolution. Defaults to False.
extend: boolean, optional
Whether to extend the observed range for `x` in the estimation.
It extends each bound by a multiple of the standard deviation of `x`
given by `extend_fct`. Defaults to False.
bound_correction: boolean, optional
Whether to perform boundary correction on the bounds of `x` or not.
Defaults to True.
extend_fct: float, optional
Number of standard deviations used to widen the
lower and upper bounds of `x`. Defaults to 0.5.
bw_fct: float, optional
A value that multiplies `bw` which enables tuning smoothness by hand.
Must be positive. Values below 1 decrease smoothness while values
above 1 decrease it. Defaults to 1 (no modification).
bw_return: bool, optional
Whether to return the estimated bandwidth in addition to the
other objects. Defaults to False.
custom_lims: list or tuple, optional
A list or tuple of length 2 indicating custom bounds
for the range of `x`. Defaults to None which disables custom bounds.
cumulative: bool, optional
Whether return the PDF or the cumulative PDF. Defaults to False.
grid_len: int, optional
The number of intervals used to bin the data points
(a.k.a. the length of the grid used in the estimation)
Defaults to 512.
Returns
-------
grid : Gridded numpy array for the x values.
pdf : Numpy array for the density estimates.
bw: optional, the estimated bandwidth.
"""
# Check `x` is from appropiate type
try:
x = _check_type(x)
except ValueError as e:
warnings.warn("Something failed: " + str(e))
return np.array([np.nan]), np.array([np.nan])
# Check `bw_fct` is numeric and positive
if not isinstance(bw_fct, (int, float, np.integer, np.floating)):
raise TypeError(
"`bw_fct` must be a positive number, not an object of {}.".format(type(bw_fct))
)
if bw_fct <= 0:
raise ValueError("`bw_fct` must be a positive number, not {}.".format(bw_fct))
# Preliminary calculations
x_len = len(x)
x_min = x.min()
x_max = x.max()
x_std = (((x ** 2).sum() / x_len) - (x.sum() / x_len) ** 2) ** 0.5
x_range = x_max - x_min
# Determine grid
grid_min, grid_max, grid_len = _get_grid(
x_min, x_max, x_std, extend_fct, grid_len, custom_lims, extend, bound_correction
)
grid_counts, _, grid_edges = histogram(x, grid_len, (grid_min, grid_max))
# Bandwidth estimation
bw = bw_fct * _get_bw(x, bw, grid_counts, x_std, x_range)
# Density estimation
if adaptive:
grid, pdf = _kde_adaptive(x, bw, grid_edges, grid_counts, grid_len, bound_correction)
else:
grid, pdf = _kde_convolution(x, bw, grid_edges, grid_counts, grid_len, bound_correction)
if cumulative:
pdf = pdf.cumsum() / pdf.sum()
if bw_return:
return grid, pdf, bw
else:
return grid, pdf
def _kde_circular(
x,
bw="taylor",
bw_fct=1,
bw_return=False,
custom_lims=None,
cumulative=False,
grid_len=512,
**kwargs, # pylint: disable=unused-argument
):
"""One dimensional density estimation for circular data.
Given an array of data points `x` measured in radians,
it returns an estimate of the probability density function that generated
the samples in `x`.
Parameters
----------
x : 1D numpy array
Data used to calculate the density estimation.
Theoritically it is a random sample obtained from $f$,
the true probability density function we aim to estimate.
bw: int, float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
"taylor" since it is the only option supported so far. Defaults to "taylor".
bw_fct: float, optional
A value that multiplies `bw` which enables tuning smoothness by hand.
Must be positive. Values above 1 decrease smoothness while values
below 1 decrease it. Defaults to 1 (no modification).
bw_return: bool, optional
Whether to return the estimated bandwidth in addition to the
other objects. Defaults to False.
custom_lims: list or tuple, optional
A list or tuple of length 2 indicating custom bounds
for the range of `x`. Defaults to None which means the estimation
limits are [-pi, pi].
cumulative: bool, optional
Whether return the PDF or the cumulative PDF. Defaults to False.
grid_len: int, optional
The number of intervals used to bin the data points
(a.k.a. the length of the grid used in the estimation)
Defaults to 512.
"""
try:
x = _check_type(x)
except ValueError as e:
warnings.warn("Something failed: " + str(e))
return np.array([np.nan]), np.array([np.nan])
# All values between -pi and pi
x = _normalize_angle(x)
# Check `bw_fct` is numeric and positive
if not isinstance(bw_fct, (int, float, np.integer, np.floating)):
raise TypeError(
"`bw_fct` must be a positive number, not an object of {}.".format(type(bw_fct))
)
if bw_fct <= 0:
raise ValueError("`bw_fct` must be a positive number, not {}.".format(bw_fct))
# Determine bandwidth
if isinstance(bw, bool):
raise ValueError(
("`bw` can't be of type `bool`.\n" "Expected a positive numeric or 'taylor'")
)
if isinstance(bw, (int, float)):
if bw < 0:
raise ValueError("Numeric `bw` must be positive.\nInput: {:.4f}.".format(bw))
if isinstance(bw, str):
if bw == "taylor":
bw = _bw_taylor(x)
else:
raise ValueError(("`bw` must be a positive numeric or `taylor`, not {}".format(bw)))
bw *= bw_fct
# Determine grid
if custom_lims is not None:
custom_lims = _check_custom_lims(custom_lims, x.min(), x.max())
grid_min = custom_lims[0]
grid_max = custom_lims[1]
assert grid_min >= -np.pi, "Lower limit can't be smaller than -pi"
assert grid_max <= np.pi, "Upper limit can't be larger than pi"
else:
grid_min = -np.pi
grid_max = np.pi
bins = np.linspace(grid_min, grid_max, grid_len + 1)
bin_counts, _, bin_edges = histogram(x, bins=bins)
grid = 0.5 * (bin_edges[1:] + bin_edges[:-1])
kern = _vonmises_pdf(x=grid, mu=0, kappa=bw)
pdf = np.fft.fftshift(np.fft.irfft(np.fft.rfft(kern) * np.fft.rfft(bin_counts)))
pdf /= len(x)
if cumulative:
pdf = pdf.cumsum() / pdf.sum()
if bw_return:
return grid, pdf, bw
else:
return grid, pdf
# pylint: disable=unused-argument
def _kde_convolution(x, bw, grid_edges, grid_counts, grid_len, bound_correction, **kwargs):
"""Kernel density with convolution.
One dimensional Gaussian kernel density estimation via
convolution of the binned relative frequencies and a Gaussian filter.
This is an internal function used by `kde()`.
"""
# Calculate relative frequencies per bin
bin_width = grid_edges[1] - grid_edges[0]
f = grid_counts / bin_width / len(x)
# Bandwidth must consider the bin width
bw /= bin_width
# See: https://stackoverflow.com/questions/2773606/gaussian-filter-in-matlab
kernel_n = int(bw * 2 * np.pi)
# Temporal fix?
if kernel_n == 0:
kernel_n = 1
kernel = gaussian(kernel_n, bw)
if bound_correction:
npad = int(grid_len / 5)
f = np.concatenate([f[npad - 1 :: -1], f, f[grid_len : grid_len - npad - 1 : -1]])
pdf = convolve(f, kernel, mode="same", method="direct")[npad : npad + grid_len]
pdf /= bw * (2 * np.pi) ** 0.5
else:
pdf = convolve(f, kernel, mode="same", method="direct")
pdf /= bw * (2 * np.pi) ** 0.5
grid = (grid_edges[1:] + grid_edges[:-1]) / 2
return grid, pdf
def _kde_adaptive(x, bw, grid_edges, grid_counts, grid_len, bound_correction, **kwargs):
"""Compute Adaptive Kernel Density Estimation.
One dimensional adaptive Gaussian kernel density estimation.
The implementation uses the binning technique.
Since there is not an unique `bw`, the convolution is not possible.
The alternative implemented in this function is known as Abramson's method.
This is an internal function used by `kde()`.
"""
# Pilot computations used for bandwidth adjustment
pilot_grid, pilot_pdf = _kde_convolution(
x, bw, grid_edges, grid_counts, grid_len, bound_correction
)
# Adds to avoid np.log(0) and zero division
pilot_pdf += 1e-9
# Determine the modification factors
pdf_interp = np.interp(x, pilot_grid, pilot_pdf)
geom_mean = np.exp(np.mean(np.log(pdf_interp)))
# Power of c = 0.5 -> Abramson's method
adj_factor = (geom_mean / pilot_pdf) ** 0.5
bw_adj = bw * adj_factor
# Estimation of Gaussian KDE via binned method (convolution not possible)
grid = pilot_grid
if bound_correction:
grid_npad = int(grid_len / 5)
grid_width = grid_edges[1] - grid_edges[0]
grid_pad = grid_npad * grid_width
grid_padded = np.linspace(
grid_edges[0] - grid_pad,
grid_edges[grid_len - 1] + grid_pad,
num=grid_len + 2 * grid_npad,
)
grid_counts = np.concatenate(
[
grid_counts[grid_npad - 1 :: -1],
grid_counts,
grid_counts[grid_len : grid_len - grid_npad - 1 : -1],
]
)
bw_adj = np.concatenate(
[bw_adj[grid_npad - 1 :: -1], bw_adj, bw_adj[grid_len : grid_len - grid_npad - 1 : -1]]
)
pdf_mat = (grid_padded - grid_padded[:, None]) / bw_adj[:, None]
pdf_mat = np.exp(-0.5 * pdf_mat ** 2) * grid_counts[:, None]
pdf_mat /= (2 * np.pi) ** 0.5 * bw_adj[:, None]
pdf = np.sum(pdf_mat[:, grid_npad : grid_npad + grid_len], axis=0) / len(x)
else:
pdf_mat = (grid - grid[:, None]) / bw_adj[:, None]
pdf_mat = np.exp(-0.5 * pdf_mat ** 2) * grid_counts[:, None]
pdf_mat /= (2 * np.pi) ** 0.5 * bw_adj[:, None]
pdf = np.sum(pdf_mat, axis=0) / len(x)
return grid, pdf
def _fast_kde(x, cumulative=False, bw=4.5, xmin=None, xmax=None): # pylint: disable=unused-argument
"""Kernel Density Estimate, Deprecated."""
if not (xmin is None and xmax is None):
custom_lims = (xmin, xmax)
else:
custom_lims = None
grid, pdf = kde(x, cumulative=cumulative, bw=bw, custom_lims=custom_lims)
warnings.warn("_fast_kde() has been replaced by kde() in stats.density_utils.py", FutureWarning)
return grid, pdf
def _fast_kde_2d(x, y, gridsize=(128, 128), circular=False):
"""
2D fft-based Gaussian kernel density estimate (KDE).
The code was adapted from https://github.com/mfouesneau/faststats
Parameters
----------
x : Numpy array or list
y : Numpy array or list
gridsize : tuple
Number of points used to discretize data. Use powers of 2 for fft optimization
circular: bool
If True, use circular boundaries. Defaults to False
Returns
-------
grid: A gridded 2D KDE of the input points (x, y)
xmin: minimum value of x
xmax: maximum value of x
ymin: minimum value of y
ymax: maximum value of y
"""
x = np.asarray(x, dtype=float)
x = x[np.isfinite(x)]
y = np.asarray(y, dtype=float)
y = y[np.isfinite(y)]
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
len_x = len(x)
weights = np.ones(len_x)
n_x, n_y = gridsize
d_x = (xmax - xmin) / (n_x - 1)
d_y = (ymax - ymin) / (n_y - 1)
xyi = _stack(x, y).T
xyi -= [xmin, ymin]
xyi /= [d_x, d_y]
xyi = np.floor(xyi, xyi).T
scotts_factor = len_x ** (-1 / 6)
cov = _cov(xyi)
std_devs = np.diag(cov) ** 0.5
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
inv_cov = np.linalg.inv(cov * scotts_factor ** 2)
x_x = np.arange(kern_nx) - kern_nx / 2
y_y = np.arange(kern_ny) - kern_ny / 2
x_x, y_y = np.meshgrid(x_x, y_y)
kernel = _stack(x_x.flatten(), y_y.flatten())
kernel = _dot(inv_cov, kernel) * kernel
kernel = np.exp(-kernel.sum(axis=0) / 2)
kernel = kernel.reshape((int(kern_ny), int(kern_nx)))
boundary = "wrap" if circular else "symm"
grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).toarray()
grid = convolve2d(grid, kernel, mode="same", boundary=boundary)
norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor ** 2)
norm_factor = len_x * d_x * d_y * norm_factor ** 0.5
grid /= norm_factor
return grid, xmin, xmax, ymin, ymax
def get_bins(values):
"""
Automatically compute the number of bins for discrete variables.
Parameters
----------
values = numpy array
values
Returns
-------
array with the bins
Notes
-----
Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis
estimators. According to numpy `np.histogram` this provides good all around performance.
The Sturges is a very simplistic estimator based on the assumption of normality of the data.
This estimator has poor performance for non-normal data, which becomes especially obvious for
large data sets. The estimate depends only on size of the data.
The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.
It is considered a robusts version of the Scott rule as the IQR is less affected by outliers
than the standard deviation. However, the IQR depends on fewer points than the standard
deviation, so it is less accurate, especially for long tailed distributions.
"""
x_min = values.min().astype(int)
x_max = values.max().astype(int)
# Sturges histogram bin estimator
bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1)
# The Freedman-Diaconis histogram bin estimator.
iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return
bins_fd = 2 * iqr * values.size ** (-1 / 3)
width = np.round(np.max([1, bins_sturges, bins_fd])).astype(int)
return | np.arange(x_min, x_max + width + 1, width) | numpy.arange |
# -*- coding: utf-8 -*-
"""
pmutt.empirical.nasa
Operations related to Nasa polynomials
"""
import inspect
from copy import copy
from warnings import warn
import numpy as np
from scipy.optimize import Bounds, LinearConstraint, minimize, minimize_scalar
from pmutt import (_apply_numpy_operation, _get_R_adj, _is_iterable,
_pass_expected_arguments)
from pmutt import constants as c
from pmutt.empirical import EmpiricalBase
from pmutt.io.cantera import obj_to_cti
from pmutt.io.json import json_to_pmutt, remove_class
from pmutt.mixture import _get_mix_quantity
class Nasa(EmpiricalBase):
"""Stores the NASA polynomial coefficients for species. Inherits from
:class:`~pmutt.empirical.EmpiricalBase`
The thermodynamic properties are calculated using the following form:
:math:`\\frac {Cp} {R} = a_{1} + a_{2} T + a_{3} T^{2} + a_{4} T^{3}
+ a_{5} T^{4}`
:math:`\\frac {H} {RT} = a_{1} + a_{2} \\frac {T} {2} + a_{3}
\\frac {T^{2}} {3} + a_{4} \\frac {T^{3}} {4} + a_{5}
\\frac {T^{4}} {5} + a_{6} \\frac {1} {T}`
:math:`\\frac {S} {R} = a_{1} \\ln {T} + a_{2} T + a_{3}
\\frac {T^{2}} {2} + a_{4} \\frac {T^{3}} {3} + a_{5}
\\frac {T^{4}} {4} + a_{7}`
Attributes
----------
T_low : float
Lower temperature bound (in K)
T_mid : float
Middle temperature bound (in K)
T_high : float
High temperature bound (in K)
a_low : (7,) `numpy.ndarray`_
NASA polynomial to use between T_low and T_mid
a_high : (7,) `numpy.ndarray`_
NASA polynomial to use between T_mid and T_high
cat_site : :class:`~pmutt.chemkin.CatSite` object, optional
Catalyst site for adsorption. Used only for Chemkin input/output.
Default is None
n_sites : int, optional
Number of catalyst sites occupied by species. If cat_site is not
assigned, then n_sites is None. If cat_site is specified, the
default is 1
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
def __init__(self,
name,
T_low,
T_mid,
T_high,
a_low,
a_high,
cat_site=None,
n_sites=None,
**kwargs):
super().__init__(name=name, **kwargs)
self.T_low = T_low
self.T_mid = T_mid
self.T_high = T_high
self.a_low = np.array(a_low)
self.a_high = np.array(a_high)
if inspect.isclass(cat_site):
self.cat_site = _pass_expected_arguments(cat_site, **kwargs)
else:
self.cat_site = cat_site
if self.cat_site is not None and n_sites is None:
n_sites = 1
self.n_sites = n_sites
def get_a(self, T):
"""Returns the correct polynomial range based on T_low, T_mid and
T_high
Parameters
----------
T : float
Temperature in K
Returns
-------
a : (7,) `numpy.ndarray`_
NASA polynomial coefficients
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if type(self.T_mid) is list:
self.T_mid = self.T_mid[0]
if T < self.T_mid:
if T < self.T_low:
warn_msg = ('Requested temperature ({} K), below T_low ({} K)'
'for Nasa object, {}'
''.format(T, self.T_low, self.name))
warn(warn_msg, RuntimeWarning)
return self.a_low
else:
if T > self.T_high:
warn_msg = ('Requested temperature ({} K), above T_high ({} K)'
'for Nasa object, {}'
''.format(T, self.T_high, self.name))
warn(warn_msg, RuntimeWarning)
return self.a_high
def get_CpoR(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
CpoR : float or (N,) `numpy.ndarray`_
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
CpoR = np.zeros(len(T))
for i, T_i in enumerate(T):
a = self.get_a(T_i)
CpoR[i] = get_nasa_CpoR(a=a, T=T_i) \
+ np.sum(_get_mix_quantity(self.misc_models,
method_name='get_CpoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
a = self.get_a(T=T)
CpoR = get_nasa_CpoR(a=a, T=T) \
+ np.sum(_get_mix_quantity(self.misc_models,
method_name='get_CpoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
return CpoR
def get_Cp(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units.
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
Cp : float or (N,) `numpy.ndarray`_
Heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_CpoR(T=T) * R_adj
def get_HoRT(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
HoRT : float or (N,) `numpy.ndarray`_
Dimensionless enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
HoRT = np.zeros_like(a=T, dtype=np.double)
for i, T_i in enumerate(T):
a = self.get_a(T=T_i)
HoRT[i] = get_nasa_HoRT(a=a, T=T_i) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_HoRT',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
a = self.get_a(T=T)
HoRT = get_nasa_HoRT(a=a, T=T) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_HoRT',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
return HoRT
def get_H(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units but omit the '/K' (e.g. J/mol).
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
H : float or (N,) `numpy.ndarray`_
Enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
units = '{}/K'.format(units)
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_HoRT(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * T * R_adj
def get_SoR(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless entropy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
SoR : float or (N,) `numpy.ndarray`_
Dimensionless entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
SoR = np.zeros_like(a=T, dtype=np.double)
for i, T_i in enumerate(T):
a = self.get_a(T=T_i)
SoR[i] = get_nasa_SoR(a=a, T=T_i) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_SoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
a = self.get_a(T=T)
SoR = get_nasa_SoR(a=a, T=T) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_SoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
return SoR
def get_S(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the entropy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units.
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
S : float or (N,) `numpy.ndarray`_
Entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_SoR(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * R_adj
def get_GoRT(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless Gibbs free energy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
GoRT : float or (N,) `numpy.ndarray`_
Dimensionless Gibbs free energy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
GoRT = self.get_HoRT(T, raise_error=raise_error,
raise_warning=raise_warning, **kwargs) \
- self.get_SoR(T, raise_error=raise_error,
raise_warning=raise_warning, **kwargs)
return GoRT
def get_G(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the Gibbs energy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units but omit the '/K' (e.g. J/mol).
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
G : float or (N,) `numpy.ndarray`_
Gibbs energy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
units = '{}/K'.format(units)
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_GoRT(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * T * R_adj
@classmethod
def from_data(cls,
name,
T,
CpoR,
T_ref,
HoRT_ref,
SoR_ref,
elements=None,
T_mid=None,
**kwargs):
"""Calculates the NASA polynomials using thermodynamic data
Parameters
----------
name : str
Name of the species
T : (N,) `numpy.ndarray`_
Temperatures in K used for fitting CpoR.
CpoR : (N,) `numpy.ndarray`_
Dimensionless heat capacity corresponding to T.
T_ref : float
Reference temperature in K used fitting empirical coefficients.
HoRT_ref : float
Dimensionless reference enthalpy that corresponds to T_ref.
SoR_ref : float
Dimensionless entropy that corresponds to T_ref.
elements : dict
Composition of the species.
Keys of dictionary are elements, values are stoichiometric
values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
T_mid : float or iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the 6th lowest
and 6th highest value of T.
Returns
-------
Nasa : Nasa object
Nasa object with polynomial terms fitted to data.
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_low = min(T)
T_high = max(T)
# Find midpoint temperature, and a[0] through a[4] parameters
a_low, a_high, T_mid_out = _fit_CpoR(T=T, CpoR=CpoR, T_mid=T_mid)
# Fit a[5] parameter using reference enthalpy
a_low[5], a_high[5] = _fit_HoRT(T_ref=T_ref,
HoRT_ref=HoRT_ref,
a_low=a_low,
a_high=a_high,
T_mid=T_mid_out)
# Fit a[6] parameter using reference entropy
a_low[6], a_high[6] = _fit_SoR(T_ref=T_ref,
SoR_ref=SoR_ref,
a_low=a_low,
a_high=a_high,
T_mid=T_mid_out)
return cls(name=name,
T_low=T_low,
T_high=T_high,
T_mid=T_mid_out,
a_low=a_low,
a_high=a_high,
elements=elements,
**kwargs)
@classmethod
def from_statmech(cls,
name,
statmech_model,
T_low,
T_high,
T_mid=None,
references=None,
elements=None,
**kwargs):
"""Calculates the NASA polynomials using statistical mechanic models.
Deprecated as of Version 1.2.13. Please use ``from_model`` instead.
Parameters
----------
name : str
Name of the species
statmech_model : `pmutt.statmech.StatMech` object or class
Statistical Mechanics model to generate data
T_low : float
Lower limit temerature in K
T_high : float
Higher limit temperature in K
T_mid : float or iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the 6th lowest
and 6th highest value of T.
references : `pmutt.empirical.references.References` object
Reference to adjust enthalpy
elements : dict
Composition of the species.
Keys of dictionary are elements, values are stoichiometric
values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
kwargs : keyword arguments
Used to initalize ``statmech_model`` or ``EmpiricalBase``
attributes to be stored.
Returns
-------
Nasa : Nasa object
Nasa object with polynomial terms fitted to data.
"""
err_msg = ('Nasa.from_statmech is deprecated as of Version 1.2.13. '
'Please use the more generic function, Nasa.from_model.')
raise RuntimeError(err_msg)
@classmethod
def from_model(cls,
model,
name=None,
T_low=None,
T_high=None,
T_mid=None,
elements=None,
n_T=50,
**kwargs):
"""Calculates the NASA polynomials using the model passed
Parameters
----------
model : Model object or class
Model to generate data. Must contain the methods `get_CpoR`,
`get_HoRT` and `get_SoR`
name : str, optional
Name of the species. If not passed, `model.name` will be used.
T_low : float, optional
Lower limit temerature in K. If not passed, `model.T_low` will
be used.
T_high : float, optional
Higher limit temperature in K. If not passed, `model.T_high`
will be used.
T_mid : float or iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the 6th lowest
and 6th highest value of T.
elements : dict, optional
Composition of the species. If not passed, `model.elements`
will be used. Keys of dictionary are elements, values are
stoichiometric values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
n_T : int, optional
Number of data points between `T_low` and `T_high` for fitting
heat capacity. Default is 50.
kwargs : keyword arguments
Used to initalize model if a class is passed.
Returns
-------
Nasa : Nasa object
Nasa object with polynomial terms fitted to data.
"""
# Initialize the model object
if inspect.isclass(model):
model = model(name=name, elements=elements, **kwargs)
if name is None:
try:
name = model.name
except AttributeError:
err_msg = ('Name must either be passed to from_model directly '
'or be an attribute of model.')
raise AttributeError(err_msg)
if T_low is None:
try:
T_low = model.T_low
except AttributeError:
err_msg = ('T_low must either be passed to from_model '
'directly or be an attribute of model.')
raise AttributeError(err_msg)
if T_high is None:
try:
T_high = model.T_high
except AttributeError:
err_msg = ('T_high must either be passed to from_model '
'directly or be an attribute of model.')
raise AttributeError(err_msg)
if elements is None:
try:
elements = model.elements
except AttributeError:
pass
# Check if inputted T_low and T_high are outside model's T_low and
# T_high range
try:
if T_low < model.T_low:
warn_msg = ('Inputted T_low is lower than model T_low. Fitted '
'empirical object may not be valid.')
warn(warn_msg, UserWarning)
except AttributeError:
pass
try:
if T_high > model.T_high:
warn_msg = ('Inputted T_high is higher than model T_high. '
'Fitted empirical object may not be valid.')
warn(warn_msg, UserWarning)
except AttributeError:
pass
# Generate heat capacity data
T = np.linspace(T_low, T_high, n_T)
try:
CpoR = model.get_CpoR(T=T)
except ValueError:
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
else:
if not _is_iterable(CpoR) or len(CpoR) != len(T):
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
# Generate enthalpy and entropy data
T_mean = (T_low + T_high) / 2.
HoRT_ref = model.get_HoRT(T=T_mean)
SoR_ref = model.get_SoR(T=T_mean)
return cls.from_data(name=name,
T=T,
CpoR=CpoR,
T_ref=T_mean,
HoRT_ref=HoRT_ref,
SoR_ref=SoR_ref,
T_mid=T_mid,
model=model,
elements=elements,
**kwargs)
def to_cti(self):
"""Writes the object in Cantera's CTI format.
Returns
-------
CTI_str : str
Object represented as a CTI string.
"""
elements = {key: int(val) for key, val in self.elements.items()}
if self.n_sites is None:
size_str = ''
else:
size_str = ' size={},'.format(self.n_sites)
cti_str = ('species(name="{}", atoms={},{}\n'
' thermo=(NASA([{}, {}],\n'
' [{: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}]),\n'
' NASA([{}, {}], \n'
' [{: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}])))\n').format(
self.name, obj_to_cti(elements), size_str, self.T_low,
self.T_mid, self.a_low[0], self.a_low[1], self.a_low[2],
self.a_low[3], self.a_low[4], self.a_low[5],
self.a_low[6], self.T_mid, self.T_high, self.a_high[0],
self.a_high[1], self.a_high[2], self.a_high[3],
self.a_high[4], self.a_high[5], self.a_high[6])
return cti_str
def to_dict(self):
"""Represents object as dictionary with JSON-accepted datatypes
Returns
-------
obj_dict : dict
"""
obj_dict = super().to_dict()
obj_dict['class'] = str(self.__class__)
obj_dict['type'] = 'nasa'
obj_dict['a_low'] = self.a_low.tolist()
obj_dict['a_high'] = self.a_high.tolist()
obj_dict['T_low'] = self.T_low
obj_dict['T_mid'] = self.T_mid
obj_dict['T_high'] = self.T_high
try:
obj_dict['cat_site'] = self.cat_site.to_dict()
except AttributeError:
obj_dict['cat_site'] = None
obj_dict['n_sites'] = self.n_sites
return obj_dict
def to_omkm_yaml(self):
"""Returns a dictionary compatible with Cantera's YAML format
Returns
-------
yaml_dict : dict
Dictionary compatible with Cantera's YAML format
"""
yaml_dict = {
'name': self.name,
'composition': self.elements,
'thermo': {'model': 'NASA7',
'temperature-ranges': [float(self.T_low),
float(self.T_mid),
float(self.T_high)],
'data': [self.a_low.tolist(),
self.a_high.tolist()]}
}
if self.n_sites is not None:
yaml_dict['sites'] = self.n_sites
return yaml_dict
@classmethod
def from_dict(cls, json_obj):
"""Recreate an object from the JSON representation.
Parameters
----------
json_obj : dict
JSON representation
Returns
-------
Nasa : Nasa object
"""
json_obj = remove_class(json_obj)
# Reconstruct statmech model
json_obj['model'] = json_to_pmutt(json_obj['model'])
json_obj['cat_site'] = json_to_pmutt(json_obj['cat_site'])
json_obj['misc_models'] = json_to_pmutt(json_obj['misc_models'])
return cls(**json_obj)
class Nasa9(EmpiricalBase):
"""Stores the NASA9 polynomials for species.
Inherits from :class:`~pmutt.empirical.EmpiricalBase`
:math:`\\frac {Cp} {R} = a_{1} T^{-2} + a_{2} T^{-1} + a_{3} + a_{4} T
+ a_{5} T^{2} + a_{6} T^{3} + a_{7} T^{4}`
:math:`\\frac {H} {RT} = -a_{1} \\frac {T^{-2}} {2} +
a_{2} \\frac {ln {T}} {T} + a_{3} + a_{4} \\frac {T} {2} + a_{5}
\\frac {T^{2}} {3} + a_{6} \\frac {T^{3}} {4} + a_{7} \\frac {T^{4}} {5} +
a_{8} \\frac {1} {T}`
:math:`\\frac {S} {R} = -a_{1}\\frac {T^{-2}} {2} - a_2 \\frac {1} {T} +
a_{3} \\ln {T} + a_{4} T + a_{5} \\frac {T^{2}} {2} + a_{6}
\\frac {T^{3}} {3} + a_{7}\\frac {T^{4}} {4} + a_{9}`
Attributes
----------
nasas : list of :class:`~pmutt.empirical.nasa.SingleNasa9`
NASA9 polynomials for each temperature interval
T_low : float
Lower temperature bound (in K). Determined from inputted `nasas`
T_high : float
High temperature bound (in K). Determined from inputted `nasas`
"""
def __init__(self, name, nasas, n_sites=1, **kwargs):
super().__init__(name=name, **kwargs)
self.n_sites = n_sites
self.nasas = nasas
def __iter__(self):
for nasa in self.nasas:
yield nasa
def __getitem__(self, key):
return self.nasas[key]
def __len__(self):
return len(self.nasas)
@property
def nasas(self):
return self._nasas
@nasas.setter
def nasas(self, val):
self._nasas = copy(val)
@property
def T_low(self):
T_lows = [nasa.T_low for nasa in self.nasas]
return np.min(T_lows)
@property
def T_high(self):
T_higs = [nasa.T_high for nasa in self.nasas]
return np.max(T_highs)
def _get_nasa(self, T):
"""Gets the relevant :class:`~pmutt.empirical.nasa.SingleNasa9` object
given a temperature
Attributes
----------
T : float
Temperature in float
Returns
-------
nasa : :class:`~pmutt.empirical.nasa.SingleNasa9` object
Relevant NASA9 polynomial for T
Raises
------
ValueError:
Raised if no valid :class:`~pmutt.empirical.nasa.SingleNasa9`
exists for T
"""
for nasa in self.nasas:
if T <= nasa.T_high and T >= nasa.T_low:
return nasa
else:
err_msg = ('Requested T ({} K) has no valid SingleNasa9 object for '
'species, {}. The global T_low is {} K and global '
'T_high is {} K.'
''.format(T, self.name, self.T_low, self.T_high))
raise ValueError(err_msg)
def get_CpoR(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
CpoR : float or (N,) `numpy.ndarray`_
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
CpoR = np.zeros(len(T))
for i, T_i in enumerate(T):
nasa = self._get_nasa(T_i)
CpoR[i] = nasa.get_CpoR(T=T_i) \
+ np.sum(_get_mix_quantity(self.misc_models,
method_name='get_CpoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
nasa = self._get_nasa(T=T)
CpoR = nasa.get_CpoR(T=T) \
+ np.sum(_get_mix_quantity(self.misc_models,
method_name='get_CpoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
if len(CpoR) == 1:
CpoR = CpoR.item(0)
return CpoR
def get_Cp(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units.
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
Cp : float or (N,) `numpy.ndarray`_
Heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_CpoR(T=T) * R_adj
def get_HoRT(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
HoRT : float or (N,) `numpy.ndarray`_
Dimensionless enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
HoRT = np.zeros_like(a=T, dtype=np.double)
for i, T_i in enumerate(T):
nasa = self._get_nasa(T=T_i)
HoRT[i] = nasa.get_HoRT(T=T_i) \
+ np.sum(_get_mix_quantity(
misc_models=self.misc_models,
method_name='get_HoRT',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
nasa = self._get_nasa(T=T)
HoRT = nasa.get_HoRT(T=T) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_HoRT',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
if len(HoRT) == 1:
HoRT = HoRT.item(0)
return HoRT
def get_H(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units but omit the '/K' (e.g. J/mol).
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
H : float or (N,) `numpy.ndarray`_
Enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
units = '{}/K'.format(units)
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_HoRT(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * T * R_adj
def get_SoR(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless entropy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
SoR : float or (N,) `numpy.ndarray`_
Dimensionless entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
SoR = np.zeros_like(a=T, dtype=np.double)
for i, T_i in enumerate(T):
nasa = self._get_nasa(T=T_i)
SoR[i] = nasa.get_SoR(T=T_i) \
+ np.sum(_get_mix_quantity(
misc_models=self.misc_models,
method_name='get_SoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
nasa = self._get_nasa(T=T)
SoR = nasa.get_SoR(T=T) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_SoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
if len(SoR) == 1:
SoR = SoR.item(0)
return SoR
def get_S(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the entropy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units.
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
S : float or (N,) `numpy.ndarray`_
Entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_SoR(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * R_adj
def get_GoRT(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless Gibbs free energy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
GoRT : float or (N,) `numpy.ndarray`_
Dimensionless Gibbs free energy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
GoRT = self.get_HoRT(T, raise_error=raise_error,
raise_warning=raise_warning, **kwargs) \
- self.get_SoR(T, raise_error=raise_error,
raise_warning=raise_warning, **kwargs)
return GoRT
def get_G(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the Gibbs energy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units but omit the '/K' (e.g. J/mol).
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
G : float or (N,) `numpy.ndarray`_
Gibbs energy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
units = '{}/K'.format(units)
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_GoRT(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * T * R_adj
@classmethod
def from_data(cls,
name,
T,
CpoR,
T_ref,
HoRT_ref,
SoR_ref,
elements=None,
T_mid=None,
fit_T_mid=True,
**kwargs):
"""Calculates the NASA polynomials using thermodynamic data
Parameters
----------
name : str
Name of the species
T : (N,) `numpy.ndarray`_
Temperatures in K used for fitting CpoR.
CpoR : (N,) `numpy.ndarray`_
Dimensionless heat capacity corresponding to T.
T_ref : float
Reference temperature in K used fitting empirical coefficients.
HoRT_ref : float
Dimensionless reference enthalpy that corresponds to T_ref.
SoR_ref : float
Dimensionless entropy that corresponds to T_ref.
elements : dict
Composition of the species.
Keys of dictionary are elements, values are stoichiometric
values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
T_mid : iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the 6th lowest
and 6th highest value of T.
Returns
-------
Nasa : Nasa object
Nasa object with polynomial terms fitted to data.
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_low = min(T)
T_high = max(T)
# Find midpoint temperature, and a[0] through a[4] parameters
a = _fit_CpoR9(T=T, CpoR=CpoR, T_low=T_low, T_high=T_high, T_mid=T_mid)
# Fit a[7] parameter using reference enthalpy
a = _fit_HoRT9(T_ref=T_ref, HoRT_ref=HoRT_ref, a=a, T_mid=T_mid)
# Fit a[8] parameter using reference entropy
a = _fit_SoR9(T_ref=T_ref, SoR_ref=SoR_ref, a=a, T_mid=T_mid)
nasas = []
T_interval = np.concatenate([[T_low], T_mid, [T_high]])
for a_row, T_low, T_high in zip(a, T_interval, T_interval[1:]):
nasas.append(SingleNasa9(T_low=T_low, T_high=T_high, a=a_row))
return cls(name=name, nasas=nasas, elements=elements, **kwargs)
@classmethod
def from_model(cls,
name,
model,
T_low,
T_high,
elements=None,
T_mid=None,
n_interval=2,
n_T=50,
fit_T_mid=True,
**kwargs):
"""Calculates the NASA polynomials using the model passed
Parameters
----------
name : str
Name of the species
model : Model object or class
Model to generate data. Must contain the methods `get_CpoR`,
`get_HoRT` and `get_SoR`
T_low : float
Lower limit temerature in K
T_high : float
Higher limit temperature in K
elements : dict
Composition of the species.
Keys of dictionary are elements, values are stoichiometric
values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
T_mid : (n_interval,) nd.ndarray
Temperatures (in K) to use at intervals. See `fit_T_mid` for
behavior.
n_interval : int, optional
Number of NASA9 polynomials to create. Default is 2
n_T : int, optional
Number of temperature values to evaluate between each interval.
Larger values result is a better fit but take longer to run.
Default is 50.
fit_T_mid : bool, optional
If True, T_mid values initial values and can be changed. If
False, T_mid values are not changed
kwargs : keyword arguments
Used to initalize model if a class is passed.
Returns
-------
Nasa9 : Nasa9 object
Nasa object with polynomial terms fitted to data.
"""
# Initialize the model object
if inspect.isclass(model):
model = model(name=name, elements=elements, **kwargs)
# Optimize T_mids
if fit_T_mid:
# If guesses not specified, use even spacing
if T_mid is None:
T_mid0 = np.linspace(T_low, T_high, n_interval + 1)[1:-1]
else:
T_mid0 = T_mid
res = minimize(method='Nelder-Mead',
x0=T_mid0,
fun=_calc_T_mid_mse_nasa9,
args=(T_low, T_high, model, n_T))
T_mid = res.x
# Generate heat capacity data for from_data
T_interval = np.concatenate([[T_low], T_mid, [T_high]])
for i, (T1, T2) in enumerate(zip(T_interval, T_interval[1:])):
if i == 0:
T = np.linspace(T1, T2, n_T)
else:
T = np.concatenate([T, np.linspace(T1, T2, n_T)])
# Calculate heat capacity
try:
CpoR = model.get_CpoR(T=T)
except ValueError:
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
else:
if not _is_iterable(CpoR) or len(CpoR) != len(T):
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
# Generate enthalpy and entropy data
HoRT_ref = model.get_HoRT(T=T_low)
SoR_ref = model.get_SoR(T=T_low)
return cls.from_data(name=name,
T=T,
CpoR=CpoR,
T_ref=T_low,
HoRT_ref=HoRT_ref,
SoR_ref=SoR_ref,
T_mid=T_mid,
model=model,
elements=elements,
fit_T_mid=False,
**kwargs)
def to_dict(self):
"""Represents object as dictionary with JSON-accepted datatypes
Returns
-------
obj_dict : dict
"""
obj_dict = super().to_dict()
obj_dict['class'] = str(self.__class__)
obj_dict['type'] = 'nasa9'
obj_dict['nasa'] = [nasa.to_dict() for nasa in self.nasas]
obj_dict['n_sites'] = self.n_sites
return obj_dict
def to_omkm_yaml(self):
"""Returns a dictionary compatible with Cantera's YAML format
Returns
-------
yaml_dict : dict
Dictionary compatible with Cantera's YAML format
"""
yaml_dict = {
'name': self.name,
'composition': self.elements,
'thermo': {'model': 'NASA9',
'reference-pressure': '1 bar'},
}
if self.n_sites is not None:
yaml_dict['sites'] = self.n_sites
# Ensure that sorted NASAs are consistent whether using T_low or T_high
nasas_sorted_T_low = sorted(self.nasas, key=lambda nasa: nasa.T_low)
nasas_sorted_T_high = sorted(self.nasas, key=lambda nasa: nasa.T_high)
assert nasas_sorted_T_low == nasas_sorted_T_high
# Add temperature ranges and polynomials
yaml_dict['thermo']['temperature-ranges'] = []
yaml_dict['thermo']['data'] = []
for nasa in nasas_sorted_T_low:
yaml_dict['thermo']['temperature-ranges'].append(float(nasa.T_low))
yaml_dict['thermo']['data'].append(nasa.a.tolist())
yaml_dict['thermo']['temperature-ranges'].append(float(nasa.T_high))
return yaml_dict
@classmethod
def from_dict(cls, json_obj):
"""Recreate an object from the JSON representation.
Parameters
----------
json_obj : dict
JSON representation
Returns
-------
Nasa : Nasa object
"""
json_obj = remove_class(json_obj)
# Reconstruct statmech model
json_obj['nasas'] = [json_to_pmutt(nasa) for nasa in json_obj['nasas']]
json_obj['model'] = json_to_pmutt(json_obj['model'])
json_obj['misc_models'] = json_to_pmutt(json_obj['misc_models'])
return cls(**json_obj)
def to_cti(self):
"""Writes the object in Cantera's CTI format.
Returns
-------
CTI_str : str
Object represented as a CTI string.
"""
elements = {key: int(val) for key, val in self.elements.items()}
if self.n_sites is None:
size_str = ''
else:
size_str = ' size={},'.format(self.n_sites)
cti_str = ('species(name="{}", atoms={},{}\n'
' thermo=('
''.format(self.name, obj_to_cti(elements), size_str))
for i, nasa in enumerate(self.nasas):
line_indent = (i != 0)
cti_str += '{},\n'.format(nasa.to_cti(line_indent=line_indent))
cti_str = '{})\n'.format(cti_str[:-2])
return cti_str
class SingleNasa9(EmpiricalBase):
"""Stores the NASA9 polynomial for a defined interval.
Inherits from :class:`~pmutt.empirical.EmpiricalBase`
Attributes
----------
T_low : float
Lower temperature bound (in K)
T_high : float
High temperature bound (in K)
a : (9,) `numpy.ndarray`_
NASA9 polynomial to use between T_low and T_high
"""
def __init__(self, T_low, T_high, a):
self.T_low = T_low
self.T_high = T_high
self.a = a
def get_CpoR(self, T):
"""Calculate the dimensionless heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
Returns
-------
CpoR : float or (N,) `numpy.ndarray`_
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
# Convert T to 1D numpy format
if not _is_iterable(T):
T = [T]
T = np.array(T)
CpoR = get_nasa9_CpoR(a=self.a, T=T)
return CpoR
def get_HoRT(self, T):
"""Calculate the dimensionless enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
Returns
-------
HoRT : float or (N,) `numpy.ndarray`_
Dimensionless enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
# Convert T to 1D numpy format
if not _is_iterable(T):
T = [T]
T = np.array(T)
HoRT = get_nasa9_HoRT(a=self.a, T=T)
return HoRT
def get_SoR(self, T):
"""Calculate the dimensionless heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
Returns
-------
CpoR : float or (N,) `numpy.ndarray`_
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
# Convert T to 1D numpy format
if not _is_iterable(T):
T = [T]
T = | np.array(T) | numpy.array |
from recsys.preprocess import *
from sklearn import model_selection
import numpy as np
from recsys.utility import *
RANDOM_STATE = 42
np.random.seed(RANDOM_STATE)
train = get_train()
target_playlist = get_target_playlists()
target_tracks = get_target_tracks()
# Uncomment if you want to test
# train, test, target_playlist, target_tracks = train_test_split(train, test_size=0.20)
most_popular = get_most_popular_tracks(train)
tracks_in_playlist = get_playlist_track_list2(train)
tracks_to_suggest = most_popular.index.values
predictions = []
predictions = pd.DataFrame(target_playlist)
predictions.index = target_playlist['playlist_id']
predictions['track_ids'] = [np.array([]) for i in range(len(predictions))]
for it,row in target_playlist.iterrows():
count = 0
i = 0
pred = []
while count < 5:
if tracks_to_suggest[i] not in tracks_in_playlist.loc[row['playlist_id']]['track_ids']:
# Predict track i
# IMPORTANT: should we check if the track to suggest is in target_tracks?
pred.append(tracks_to_suggest[i])
count += 1
i += 1
predictions.loc[row['playlist_id']] = predictions.loc[row['playlist_id']].set_value('track_ids', | np.array(pred) | numpy.array |
"""
Mask R-CNN
Train on the Paper dataset and implement warp and threshold.
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 paper.py train --dataset=/path/to/paper/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 paper.py train --dataset=/path/to/paper/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 paper.py train --dataset=/path/to/paper/dataset --weights=imagenet
# Apply warp and threshold to an image
python3 paper.py warp --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply warp and threshold to video using the last weights you trained
python3 paper.py warp --weights=last --video=<URL or path to file>
"""
import os
import sys
import json
import glob
import cv2
import time
import datetime
import numpy as np
import skimage.draw
from matplotlib import pyplot as plt
import imutils
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import visualize
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class CCA:
def __init__(self, output_process = False):
self.output_process = output_process
def __call__(self, image):
# 2nd argument is either 4 or 8, denoting the type of Connected Component Analysis
(numLabels, labels, stats, centroids) = cv2.connectedComponentsWithStats(image,8, cv2.CV_32S)
max_area = -1
max_area_label = -1
if self.output_process:
print("numlabels -- ",numLabels)
for i in range(1,numLabels):
temp_area = stats[i, cv2.CC_STAT_AREA]
if self.output_process:
print(temp_area)
if temp_area > max_area :
max_area = temp_area
max_area_label = i
res_image = (labels == max_area_label).astype("uint8") * 255
return res_image
class Dilation:
def __init__(self, kernel_size = 3, iterations = 25, output_process = False):
self._kernel_size = kernel_size
self._iterations = iterations
self.output_process = output_process
def __call__(self, image):
start = time.time()
kernel = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE,
(self._kernel_size, self._kernel_size)
)
dilated = cv2.dilate(image,kernel,iterations = self._iterations )
end = time.time()
if self.output_process:
print("After executing Dilation ---" , (end-start))
return dilated
class Closer:
def __init__(self, kernel_size = 3, iterations = 10, output_process = False):
self._kernel_size = kernel_size
self._iterations = iterations
self.output_process = output_process
def __call__(self, image):
start = time.time()
kernel = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE,
(self._kernel_size, self._kernel_size)
)
closed = cv2.morphologyEx(
image,
cv2.MORPH_CLOSE,
kernel,
iterations = self._iterations
)
end = time.time()
if self.output_process:
print("After executing Closer ---" , (end-start))
return closed
class OtsuThresholder:
def __init__(self, thresh1 = 0, thresh2 = 255, output_process = False):
self.output_process = output_process
self.thresh1 = thresh1
self.thresh2 = thresh2
def __call__(self, image):
start = time.time()
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
T_, thresholded1 = cv2.threshold(image, self.thresh1, self.thresh2, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
thresholded2 = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,5,2)
end = time.time()
if self.output_process:
print("After executing Otsu thresholder ---" , (end-start))
return thresholded1,thresholded2
def hand_remove(img):
otsu_obj = OtsuThresholder(thresh1 = 128, thresh2 = 255, output_process = False)
close_obj = Closer(iterations = 5,output_process = False)
dilate_obj = Dilation(iterations = 1,output_process = False)
cca_obj = CCA(output_process = False)
p,q = otsu_obj(img)
p = close_obj(p)
p = cca_obj(~p)
p = dilate_obj(p)
p = q | p
p = dilate_obj(p)
return p
class PaperConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "paper"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + paper
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
############################################################
# Dataset
############################################################
class PaperDataset(utils.Dataset):
# def load_paper(self, dataset_dir, subset):
# """Load a subset of the Paper dataset.
# dataset_dir: Root directory of the dataset.
# subset: Subset to load: train or val
# """
# # Add classes. We have only one class to add.
# self.add_class("paper", 1, "paper")
# # Train or validation dataset?
# assert subset in ["train", "val"]
# dataset_dir = os.path.join(dataset_dir, subset)
# img_dir = "image/"
# txt_dir = "text/"
# data_path = os.path.join(dataset_dir, img_dir)
# txt_dir = os.path.join(dataset_dir, txt_dir)
# # files = glob.glob(data_path + '/*')
# files = [os.path.normpath(i) for i in glob.glob(data_path + '/*')]
# # print(files)
# #files.sort() #We sort the images in alphabetical order to match them to the xml files containing the annotations of the bounding boxes
# for f1 in files:
# img = cv2.imread(f1)
# height, width = img.shape[:2]
# # print(height, width)
# pp = f1
# pp = pp.split('\\')
# pp = pp[8]
# pp = pp.split('.')
# pp = pp[0]
# img_name = pp + '.jpg'
# print(img_name)
# p = txt_dir + pp + '.txt'
# image_path = data_path + pp + '.jpg'
# file1 = open(p, "r")
# Fc = file1.read()
# Fc = json.loads(Fc)
# Fc = np.array(Fc)
# Fc = Fc.flatten()
# Fc = np.int32(Fc)
# # print(Fc)
# self.add_image(
# "paper",
# image_id=img_name, # use file name as a unique image id
# path=image_path,
# width=width, height=height,
# polygons=Fc)
def load_pp(self, img_name, image_path, width, height, Fc):
"""Load a subset of the Paper dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("paper", 1, "paper")
self.add_image(
"paper",
image_id=img_name, # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=Fc)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a paper dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "paper":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
# print(info)
mask = np.zeros([info["height"], info["width"], 1], dtype=np.uint8)
ycord = [info["polygons"][0],info["polygons"][2],info["polygons"][4],info["polygons"][6]]
xcord = [info["polygons"][1],info["polygons"][3],info["polygons"][5],info["polygons"][7]]
print(xcord)
rr, cc = skimage.draw.polygon(xcord, ycord)
mask[rr, cc, 0] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "paper":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = PaperDataset()
dataset_train.load_paper(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = PaperDataset()
dataset_val.load_paper(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads')
# def gd1(pt,lst):
# pt = pt / 2
# lt =[]
# rt =[]
# for i in range(4):
# if lst[i][0]<=pt:
# lt.append([lst[i][0],lst[i][1]])
# else :
# rt.append([lst[i][0],lst[i][1]])
# return lt,rt
def orientation(o,a,b):
return (b[0][1]-a[0][1])*(a[0][1]-o[0][0]) - (a[0][1]-o[0][1])*(b[0][0]-a[0][0])
def dist(a,b):
return (a[0][0]-b[0][0])*(a[0][0]-b[0][0]) + (a[1][0]-b[0][1])*(a[0][1]-b[0][1])
def comp(a,b,po):
ori = orientation(po,a,b)
if ori==0 :
return dist(po,b)>=dist(po,a)
return ori>0
def orient(pts):
global po
if pts.shape[0]!=4:
print("need exactly 4 points")
return pts;
ind = 0
for i in range(4):
if pts[i][0][1]<pts[ind][0][1] or (pts[i][0][1]==pts[ind][0][1] and pts[i][0][0]<pts[ind][0][0]):
ind =i
pts[[0,ind]]= pts[[ind,0]]
for i in range(1,4):
for j in range (i+1,4):
if comp(pts[i],pts[j],pts[0]):
pts[[i,j]]=pts[[j,i]]
return pts
# def gd(lst,pt):
# lt =[]
# rt =[]
# pt = pt / 2 + 50
# rect = np.zeros((4, 2), dtype = "float32")
# for i in range(4):
# if lst[i][0]<=pt:
# lt.append([lst[i][0],lst[i][1]])
# else :
# rt.append([lst[i][0],lst[i][1]])
# # print(lt)
# # print(rt)
# rect[3] = lt[0]
# rect[2] = lt[1]
# rect[0] = rt[0]
# rect[1] = rt[1]
# if lt[0][1]>lt[1][1]:
# rect[3] =lt[1]
# rect[2] =lt[0]
# if rt[0][1]>rt[1][1]:
# rect[0] =rt[1]
# rect[1] =rt[0]
# return rect
def gd(lst):
rect = np.zeros((4, 2), dtype = "float32")
lt =[]
rt =[]
for i in range(4):
for j in range(i+1,4):
if(lst[i][0]>lst[j][0]):
lst[[i,j]]= lst[[j,i]]
lt.append(lst[0])
lt.append(lst[1])
rt.append(lst[2])
rt.append(lst[3])
rect[3] = lt[0] # bl
rect[2] = lt[1] # br
rect[0] = rt[0] # tl
rect[1] = rt[1] # tr
if lt[0][1]>lt[1][1]:
rect[3] =lt[1]
rect[2] =lt[0]
if rt[0][1]>rt[1][1]:
rect[0] =rt[1]
rect[1] =rt[0]
return rect
def order_points(pts,width):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
width = width / 2
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[2]
rect[2] = pts[0]
# rect[0] = pts[np.argmin(s)]
# rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[1]
rect[3] = pts[3]
# rect[1] = pts[np.argmin(diff)]
# rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
# print("pts---",pts)
# rect = order_points(pts,width)
rect = gd(pts)
# print("rect---",rect)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# print("warped shape--",warped.shape)
# return the warped image
return warped
# def generate_warp(image, mask):
# """Apply warp and threshold effect.
# image: RGB image [height, width, 3]
# mask: instance segmentation mask [height, width, instance count]
# Returns result image.
# """
# # Make a grayscale copy of the image. The grayscale copy still
# # has 3 RGB channels, though.
# gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# # Copy color pixels from the original color image where mask is set
# if mask.shape[-1] > 0:
# # We're treating all instances as one, so collapse the mask into one layer
# mask = (np.sum(mask, -1, keepdims=True) >= 1)
# warp = np.where(mask, image, gray).astype(np.uint8)
# else:
# warp = gray.astype(np.uint8)
# return warp
# def detect_and_warp(model, image_path=None, video_path=None):
# assert image_path or video_path
# class_names = ['BG', 'paper']
# # Image or video?
# if image_path:
# # Run model detection and generate the warp and threshold effect
# print("Running on {}".format(args.image))
# # Read image
# image = skimage.io.imread(args.image)
# # Detect objects
# r = model.detect([image], verbose=1)[0]
# # warp and threshold
# # warp = generate_warp(image, r['masks'])
# visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores'], making_image=True)
# file_name = 'warp.png'
# # Save output
# # file_name = "warp_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
# # save_file_name = os.path.join(out_dir, file_name)
# # skimage.io.imsave(save_file_name, warp)
# elif video_path:
# import cv2
# # Video capture
# vcapture = cv2.VideoCapture(video_path)
# # width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
# # height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# width = 1280
# height = 720
# # fps = vcapture.get(cv2.CAP_PROP_FPS)
# fps = 5
# # Define codec and create video writer
# file_name = "warp_{:%Y%m%dT%H%M%S}.wmv".format(datetime.datetime.now())
# vwriter = cv2.VideoWriter(file_name,
# cv2.VideoWriter_fourcc(*'MJPG'),
# fps, (width, height))
# count = 0
# success = True
# #For video, we wish classes keep the same mask in frames, generate colors for masks
# colors = visualize.random_colors(len(class_names))
# while success:
# print("frame: ", count)
# # Read next image
# plt.clf()
# plt.close()
# success, image = vcapture.read()
# if success and count % 5 == 0:
# # OpenCV returns images as BGR, convert to RGB
# image = image[..., ::-1]
# # Detect objects
# r = model.detect([image], verbose=0)[0]
# # warp and threshold
# # warp = generate_warp(image, r['masks'])
# warp = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores'], making_video=True)
# # Add image to video writer
# vwriter.write(warp)
# count += 1
# vwriter.release()
# print("Saved to ", file_name)
def generate_warp(image, mask):
"""Apply warp and threshold effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# Copy color pixels from the original color image where mask is set
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
mask1 = ~mask
warp = np.where(mask, image, 0).astype(np.uint8)
warp = np.where(mask1, warp, 255).astype(np.uint8)
else:
warp = gray.astype(np.uint8)
return warp
# def detect_and_warp(model, image_path=None, video_path=None):
# assert image_path or video_path
# # Image or video?
# if image_path:
# # Run model detection and generate the warp and threshold effect
# print("Running on {}".format(args.image))
# # Read image
# image = skimage.io.imread(args.image)
# # Detect objects
# r = model.detect([image], verbose=1)[0]
# # warp and threshold
# warp = generate_warp(image, r['masks'])
# # Save output
# file_name = "warp_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
# skimage.io.imsave(file_name, warp)
# elif video_path:
# import cv2
# # Video capture
# vcapture = cv2.VideoCapture(video_path)
# width1 = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
# height1 = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# width = 500
# height = 888
# fps = vcapture.get(cv2.CAP_PROP_FPS)
# # fps = 5
# # Define codec and create video writer
# file_name = "warp_{:%Y%m%dT%H%M%S}.mp4".format(datetime.datetime.now())
# vwriter = cv2.VideoWriter(file_name,
# cv2.VideoWriter_fourcc(*'X264'),
# fps, (width, height))
# count = 0
# success = True
# sm1 = [0, 0]
# succ = False
# while success:
# print("frame: ", count)
# # Read next image
# success, image = vcapture.read()
# orig = image
# if success:
# # OpenCV returns images as BGR, convert to RGB
# image = image[..., ::-1]
# # Detect objects
# if count % 15 ==0:
# r = model.detect([image], verbose=0)[0]
# # warp and threshold
# warp = generate_warp(image, r['masks'])
# # RGB -> BGR to save image to video
# warp = warp[..., ::-1]
# # print(warp.shape)
# gry = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
# kernel = np.ones((8,8), np.uint8)
# warp = cv2.dilate(gry,kernel)
# gry = cv2.GaussianBlur(gry, (5, 5), 0)
# edged = cv2.Canny(gry, 75, 200)
# # print(edged.shape)
# # TEST 01
# cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# cnts = imutils.grab_contours(cnts)
# cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
# # loop over the contours
# for c in cnts:
# peri = cv2.arcLength(c, True)
# approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# # if our approximated contour has four points, then we
# # can assume that we have found our screen
# if len(approx) == 4:
# screenCnt = approx
# succ = True
# break
# edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR)
# if succ:
# cv2.drawContours(edged, [screenCnt], -1, (0, 255, 0), 2)
# # print("edged shape--",edged.shape)
# # edged = cv2.resize(edged, (width,height), interpolation = cv2.INTER_AREA)
# # TEST 01 END
# # edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR)
# # Add image to video writer
# # screenCnt1 = screenCnt
# print("screenCnt---",screenCnt)
# sm = sum(screenCnt)
# sm = sm[0]
# print("sum----",sm)
# # screenCnt = orient(screenCnt)
# # print("Here lies Bellman--",screenCnt)
# if (((sm[0]<sm1[0]-50) or (sm[0] > sm1[0] + 50)) or ((sm[1] < sm1[1]-50) or (sm[1] > sm1[1] + 50))):
# screenCnt1 = screenCnt
# sm1 = sm
# print("hereeee")
# warped = four_point_transform(orig, screenCnt1.reshape(4, 2))
# print("sum1---",sm1)
# print("screenCnt1---",screenCnt1)
# # convert the warped image to grayscale, then threshold it
# # to give it that 'black and white' paper effect
# # warped = cv2.cvtColor(warped)
# # T = threshold_local(warped, 11, offset = 10, method = "gaussian")
# # warped = (warped > T).astype("uint8") * 255
# # print("warped111 shape--",warped.shape)
# warped = cv2.resize(warped, (width,height), interpolation = cv2.INTER_AREA)
# print("warpedres shape--",warped.shape)
# vwriter.write(warped)
# count += 1
# vwriter.release()
# print("Saved to ", file_name)
def detect_and_warp(model, image_path=None, video_path=None):
assert image_path or video_path
# Image or video?
if image_path:
# Run model detection and generate the warp and threshold effect
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
# warp and threshold
warp = generate_warp(image, r['masks'])
# Save output
file_name = "warp_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, warp)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width1 = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height1 = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = 500
height = 888
fps = vcapture.get(cv2.CAP_PROP_FPS)
# fps = 5
# Define codec and create video writer
file_name = "warp_{:%Y%m%dT%H%M%S}.mp4".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'X264'),
fps, (width, height))
count = 0
success = True
sm1 = [0, 0]
succ = False
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
orig = image
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
if count % 15 ==0:
r = model.detect([image], verbose=0)[0]
# warp and threshold
warp = generate_warp(image, r['masks'])
# RGB -> BGR to save image to video
warp = warp[..., ::-1]
print(warp.shape)
gry = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
kernel = np.ones((8,8), np.uint8)
warp = cv2.dilate(gry,kernel)
gry = cv2.GaussianBlur(gry, (5, 5), 0)
edged = cv2.Canny(gry, 75, 200)
print(edged.shape)
# TEST 01
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
# loop over the contours
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
succ = True
break
edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR)
if succ:
cv2.drawContours(edged, [screenCnt], -1, (0, 255, 0), 2)
# print("edged shape--",edged.shape)
# edged = cv2.resize(edged, (width,height), interpolation = cv2.INTER_AREA)
# TEST 01 END
# edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR)
# Add image to video writer
# screenCnt1 = screenCnt
# print("screenCnt---",screenCnt)
sm = sum(screenCnt)
sm = sm[0]
# print("sum----",sm)
# screenCnt = orient(screenCnt)
# print("Here lies Bellman--",screenCnt)
if (((sm[0]<sm1[0]-50) or (sm[0] > sm1[0] + 50)) or ((sm[1] < sm1[1]-50) or (sm[1] > sm1[1] + 50))):
screenCnt1 = screenCnt
sm1 = sm
warped = four_point_transform(orig, screenCnt1.reshape(4, 2))
# print("sum1---",sm1)
# print("screenCnt1---",screenCnt1)
# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
# warped = cv2.cvtColor(warped)
# T = threshold_local(warped, 11, offset = 10, method = "gaussian")
# warped = (warped > T).astype("uint8") * 255
# print("warped111 shape--",warped.shape)
warped = cv2.resize(warped, (width,height), interpolation = cv2.INTER_AREA)
# print("warpedres shape--",warped.shape)
res = hand_remove(warped)
vwriter.write(res)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# RLE Encoding
############################################################
def rle_encode(mask):
"""Encodes a mask in Run Length Encoding (RLE).
Returns a string of space-separated values.
"""
assert mask.ndim == 2, "Mask must be of shape [Height, Width]"
# Flatten it column wise
m = mask.T.flatten()
# Compute gradient. Equals 1 or -1 at transition points
g = np.diff( | np.concatenate([[0], m, [0]]) | numpy.concatenate |
""" FuelMap file building tools
"""
import sys
import os
from math import pow, sqrt
from shutil import copy2
import numpy as np
import pkg_resources
import yaml
from netCDF4 import Dataset
import f90nml
from .fuels import (
_ROSMODEL_FUELCLASS_REGISTER,
_ROSMODEL_NB_PROPERTIES,
BalbiFuel,
)
from .patch import (
DataPatch,
LinePatch,
RectanglePatch,
)
from.fuel_database import (
FuelDatabase,
)
from .utility import (
fire_array_2d_to_3d,
fill_fuel_array_from_patch,
)
class FuelMap:
"""Class for fuel map construction
This `FuelMap` class allows to create a fuel map object and save it to netcdf format to be an input for a MesoNH-Blaze simulation.
In order to build a fuel map the following file tree is needed:
.. code-block:: text
my_project/
├─ create_my_fuelmap.py
├─ EXSEG1.nam
├─ inifile_MesoNH.des
├─ inifile_MesoNH.nc
The MesoNH namelist `EXSEG1.nam` is used to retrieved information about fire mesh,
fire rate of spread parameterization and MesoNH initialization files.
The initialization file (here `inifile_MesoNH.nc`) is used to get atmopsheric mesh information.
The MesoNH file `inifile_MesoNH.des` will be duplicated to `FuelMap.des` in order to match MesoNH file reader requirements.
After having set all patches and data treatments to the `FuelMap.fuelmaparray`,
the :func:`~pyrolib.fuels.FuelMap.write` method can be called to save the file `FuelMap.nc`.
After this operation, the project folder should be:
.. code-block:: text
my_project/
├─ create_my_fuelmap.py
├─ EXSEG1.nam
├─ FuelMap.des
├─ FuelMap.nc
├─ FuelMap2d.nc
├─ inifile_MesoNH.des
├─ inifile_MesoNH.nc
The file `FuelMap2d.nc` is optionnaly created through the :func:`~pyrolib.fuels.FuelMap.write` method.
It contains the same information that `FuelMap.nc` but conserves the 2d format of data to be more readable for error checking.
It is recommended to use this file to check your set up.
Parameters
----------
fuel_db : pyrolib.FuelDatabase
fuel database containing fuel data.
namelistname : str, optional
MesoNH namelist name (default: 'EXSEG1.nam').
MesoNHversion : str, optional
Version of MesoNH needed (>=5.4.4) (default: '5.5.0')
"""
def __init__(self, fuel_db: FuelDatabase, namelistname: str="EXSEG1.nam", MesoNHversion: str="5.5.0", workdir: str=""):
self.fuel_db = fuel_db
self.namelist = namelistname
self.mnh_version = MesoNHversion
self.workdir = workdir
# Read default values for MNHBLAZE namelist from Default_MNH_namelist.yml
# Values should be compliant with default_desfmn.f90
defaultpath = pkg_resources.resource_stream("pyrolib", "/".join(("data", "Default_MNH_namelist.yml")))
with open(defaultpath.name, "r") as ymlfile:
alldata = yaml.safe_load(ymlfile)
current_version = f"v{self.mnh_version.replace('.', '')}"
# set default value for namelist variables
self.mnhinifile = alldata[current_version]["mnhinifile"]
self.cpropag_model = alldata[current_version]["cpropag_model"]
self.nrefinx = alldata[current_version]["nrefinx"]
self.nrefiny = alldata[current_version]["nrefiny"]
# Default values
self.xfiremeshsize = np.array([0, 0]) # Fire mesh size (dxf, dyf)
self.firemeshsizes = None
self.xfiremesh = None
self.yfiremesh = None
self.nx = 0
self.ny = 0
self.__get_info_from_namelist()
self.nbpropertiesfuel = _ROSMODEL_NB_PROPERTIES[self.cpropag_model]
self.fuel_index_correspondance = {}
# allocate fuel data array
self.fuelmaparray = np.zeros((self.nbpropertiesfuel, self.firemeshsizes[1], self.firemeshsizes[0]))
self.ignitionmaparray = 1e6 * np.ones((self.firemeshsizes[1], self.firemeshsizes[0]))
self.walkingignitionmaparray = -1.0 * np.ones_like(self.ignitionmaparray)
def __get_info_from_namelist(self):
"""Retrieve informations on the MesoNH-Blaze run from namelist and initialization file"""
if self.workdir == "":
projectpath = os.getcwd()
else:
projectpath = self.workdir
# Check if Namelist exists
if not os.path.exists(f"{projectpath:s}/{self.namelist:s}"):
raise IOError(f"File {self.namelist:s} not found")
# get MNH init file name
mnh_nml = f90nml.read(f"{projectpath:s}/{self.namelist:s}")
# Check parameters in namelist
if "nam_lunitn" in mnh_nml.keys():
if "cinifile" in mnh_nml["nam_lunitn"].keys():
self.mnhinifile = mnh_nml["nam_lunitn"]["cinifile"]
if "nam_fire" in mnh_nml.keys():
if "cpropag_model" in mnh_nml["nam_fire"].keys():
self.cpropag_model = mnh_nml["nam_fire"]["cpropag_model"]
if "nrefinx" in mnh_nml["nam_fire"].keys():
self.nrefinx = mnh_nml["nam_fire"]["nrefinx"]
if "nrefiny" in mnh_nml["nam_fire"].keys():
self.nrefiny = mnh_nml["nam_fire"]["nrefiny"]
# Check if INIFILE.des exists
if not os.path.exists(f"{projectpath:s}/{self.mnhinifile:s}.des"):
raise IOError(f"File {self.mnhinifile:s}.des not found")
# Check if INIFILE.nc exists
if not os.path.exists(f"{projectpath:s}/{self.mnhinifile:s}.nc"):
raise IOError(f"File {self.mnhinifile:s}.nc not found")
# Import XHAT and YHAT
MNHData = Dataset(f"{projectpath:s}/{self.mnhinifile:s}.nc")
self.xhat = MNHData.variables["XHAT"][:]
self.yhat = MNHData.variables["YHAT"][:]
MNHData.close()
# get sizes
self.nx = len(self.xhat)
self.ny = len(self.yhat)
self.xfiremeshsize[0] = float(self.xhat[1] - self.xhat[0]) / float(self.nrefinx)
self.xfiremeshsize[1] = float(self.yhat[1] - self.yhat[0]) / float(self.nrefinx)
self.firemeshsizes = [self.nx * self.nrefinx, self.ny * self.nrefiny]
# Get mesh position of fuel cells
self.xfiremesh = np.linspace(
self.xhat[0], self.xhat[-1] + (self.xhat[1] - self.xhat[0]), self.nx * self.nrefinx, endpoint=False
)
self.xfiremesh += 0.5 * (self.xfiremesh[1] - self.xfiremesh[0])
self.yfiremesh = np.linspace(
self.yhat[0], self.yhat[-1] + (self.yhat[1] - self.yhat[0]), self.ny * self.nrefiny, endpoint=False
)
self.yfiremesh += 0.5 * (self.yfiremesh[1] - self.yfiremesh[0])
def __add_rectangle_patch(
self, xpos: tuple, ypos: tuple, fuel_key: str = None, ignition_time: float = None, unburnable: bool = None
):
"""Add rectangle patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
Three data filing methods are available (one needs to be chosen):
- Fuel : assign a fuel type in the masked area through its index.
The fuel assigned depends on its index and the selected rate of spread parameterization.
- Ignition : Specify an ignition time for the whole patch.
- Unburnable : Specify that the patch can not burn (ROS = 0 m s-1 in that area).
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ (x1, y1) │
│ +----------------------+ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxx xxxxx│ │
│ │xxxxx Fuel Patch xxxxx│ │
│ │xxxxx xxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ +----------------------+ │
│ (x0, y0) │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
fuel_key : str, optional
Key of Fuel in FuelDatabase to place in the patch (default: `None`)
ignition_time : float, optional
Ignition time of patch (default: `None`)
unburnable : bool, optional
Flag to set patch as a non burnable area (default: `None`)
"""
# Create mask
P = RectanglePatch(self.fuelmaparray, xpos, ypos, self.xfiremesh, self.yfiremesh, self.xfiremeshsize)
# assign data
self.__assign_data_to_data_array(P, fuel_key, None, ignition_time, unburnable)
def add_fuel_rectangle_patch(self, xpos: tuple, ypos: tuple, fuel_key: str):
"""Add rectangle fuel patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
It assigns a fuel type in the masked area through its index.
The fuel assigned depends on its index and the selected rate of spread parameterization in the Méso-NH namelist.
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ (x1, y1) │
│ +----------------------+ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxx xxxxx│ │
│ │xxxxx Fuel Patch xxxxx│ │
│ │xxxxx xxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ +----------------------+ │
│ (x0, y0) │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
fuel_key : str
key of Fuel in fuel database to place in the patch
"""
self.__add_rectangle_patch(xpos, ypos, fuel_key=fuel_key)
def add_unburnable_rectangle_patch(self, xpos: tuple, ypos: tuple):
"""Add rectangle unburnable patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
It specifies that the patch can not burn (ROS = 0 m s-1 in that area).
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ (x1, y1) │
│ +----------------------+ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxx xxxxx│ │
│ │xxxxx Patch xxxxx│ │
│ │xxxxx xxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ +----------------------+ │
│ (x0, y0) │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
"""
self.__add_rectangle_patch(xpos, ypos, unburnable=True)
def add_ignition_rectangle_patch(self, xpos: tuple, ypos: tuple, ignition_time: float):
"""Add rectangle patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
It specifies an ignition time for the whole patch.
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ (x1, y1) │
│ +----------------------+ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxx xxxxx│ │
│ │xxxxx Patch xxxxx│ │
│ │xxxxx xxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ │xxxxxxxxxxxxxxxxxxxxxx│ │
│ +----------------------+ │
│ (x0, y0) │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
ignition_time : float
Ignition time of patch
"""
self.__add_rectangle_patch(xpos, ypos, ignition_time=ignition_time)
def __add_line_patch(
self,
xpos: tuple,
ypos: tuple,
fuel_key: str = None,
walking_ignition_times: list = None,
ignition_time: float = None,
unburnable: bool = None,
):
"""Add line patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
Three data filing methods are available (one needs to be chosen):
- Fuel : assign a fuel type in the masked area through its index.
The fuel assigned depends on its index and the selected rate of spread parameterization.
- Walking Ignition : Specify an ignition time `t_a` for point A (x0, y0)
and `t_b` for point B (x1, y1) where `t_b > t_a`.
- Ignition : Specify an ignition time for the whole patch.
- Unburnable : Specify that the patch can not burn (ROS = 0 m s-1 in that area).
The mask is determined by a bresenham algorithm.
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ -> (x1, y1) │
│ _/ │
│ __/ │
│ _/ │
│ _/ │
│ __/ │
│ (x0, y0) _/ │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
fuel_key : str, optional
Index of Fuel in FuelDatabase to place in the patch (default: `None`)
walking_ignition_times : list, optional
Ignition times of points A and B of the ignition line, respectively (default: `None`)
ignition_time : float, optional
Ignition time of patch (default: `None`)
unburnable : bool, optional
Flag to set patch as a non burnable area (default: `None`)
"""
# Create mask
patch = LinePatch(self.fuelmaparray, xpos, ypos, self.xfiremesh, self.yfiremesh, self.xfiremeshsize)
# # assign data
self.__assign_data_to_data_array(patch, fuel_key, walking_ignition_times, ignition_time, unburnable)
def add_fuel_line_patch(self, xpos: tuple, ypos: tuple, fuel_key: str):
"""Add line patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
It assigns a fuel type in the masked area through its index.
The fuel assigned depends on its index and the selected rate of spread parameterization.
The mask is determined by a bresenham algorithm.
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ -> (x1, y1) │
│ _/ │
│ __/ │
│ _/ │
│ _/ │
│ __/ │
│ (x0, y0) _/ │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
fuel_key : str
Key of Fuel in fuel database to place in the patch
"""
self.__add_line_patch(xpos, ypos, fuel_key=fuel_key)
def add_walking_ignition_line_patch(self, xpos: tuple, ypos: tuple, walking_ignition_times: list):
"""Add line patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
It specifies an ignition time `t_a` for point A (x0, y0)
and `t_b` for point B (x1, y1) where `t_b > t_a`.
The mask is determined by a bresenham algorithm.
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ -> (x1, y1) │
│ _/ │
│ __/ │
│ _/ │
│ _/ │
│ __/ │
│ (x0, y0) _/ │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
walking_ignition_times : list
Ignition times of points A and B of the ignition line, respectively
"""
self.__add_line_patch(xpos, ypos, walking_ignition_times=walking_ignition_times)
def add_ignition_line_patch(self, xpos: tuple, ypos: tuple, ignition_time: float):
"""Add line patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
It specifies an ignition time for the whole patch.
The mask is determined by a bresenham algorithm.
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ -> (x1, y1) │
│ _/ │
│ __/ │
│ _/ │
│ _/ │
│ __/ │
│ (x0, y0) _/ │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
ignition_time : float
Ignition time of patch
"""
self.__add_line_patch(xpos, ypos, ignition_time=ignition_time)
def add_unburnable_line_patch(self, xpos: tuple, ypos: tuple):
"""Add line patch between (xpos[0], ypos[0]) and (xpos[1], ypos[1]).
This method first sets the mask corresponding to the following scheme,
then assigns the needed data in the appropriated array.
It Specifies that the patch can not burn (ROS = 0 m s-1 in that area).
The mask is determined by a bresenham algorithm.
.. aafig::
+--------------------------------------------------+
│MesoNH domain │
│ │
│ │
│ -> (x1, y1) │
│ _/ │
│ __/ │
│ _/ │
│ _/ │
│ __/ │
│ (x0, y0) _/ │
│ │
+--------------------------------------------------+
Parameters
-----
xpos : tuple
Position of west and east boundaries of the patch
ypos : tuple
Position of south and north boundaries of the patch
"""
self.__add_line_patch(xpos, ypos, unburnable=True)
def __assign_data_to_data_array(
self,
patch: DataPatch,
fuel_key: str = None,
walkingignitiontimes: tuple = None,
ignitiontime: float = None,
unburnable: bool = None,
):
"""
This function assigns data as a function of argument passed
4 types of data can be allocated in the patch:
- Fuel properties
Select a fuel number (it should be contained in the FuelDatabase object loaded in the FuelMap object).
The corresponding fuel properties of the selected Fuel are assigned in the patch
- Walking ignition times (only for LinePatch)
allocate ignition time from point A (x0, y0) at ta to point B (x1, y1) at tb with tb > ta
The ignition time along the line is linearly interpolated with the distance relative to point A.
- Ignition time
Modify the ignition map with the specified time.
The whole patch will ignite at the same time.
- Unburnable
Every fuel property is set to 0 in the patch leading to a no propagation zone.
Be carreful for futur implementation of new fire spread parameterization to not have 0 division with this process.
"""
# case 1 : FuelIndex is set
if isinstance(fuel_key, str):
if fuel_key in self.fuel_db.fuels.keys():
# check if needed fuel (corresponding fuel class and number of FuelIndex)
needed_fuelclass = _ROSMODEL_FUELCLASS_REGISTER[self.cpropag_model]
if needed_fuelclass in self.fuel_db.fuels[fuel_key].keys():
# retrieve fuel index
if fuel_key in self.fuel_index_correspondance.keys():
fuelindex = self.fuel_index_correspondance[fuel_key]
else:
maxindex = 0
for idx in self.fuel_index_correspondance.values():
maxindex = max(maxindex, idx)
fuelindex = maxindex + 1
self.fuel_index_correspondance[fuel_key] = fuelindex
# create property vector for this fuel
propvector = self.fuel_db.fuels[fuel_key][needed_fuelclass].get_property_vector(fuelindex, self.nbpropertiesfuel)
self.fuelmaparray = fill_fuel_array_from_patch(
self.fuelmaparray,
patch.datamask,
propvector,
self.nbpropertiesfuel,
self.firemeshsizes[0],
self.firemeshsizes[1],
)
else:
print(f"Fuel < {fuel_key} > do not exist in database with the needed Fuel Class < {needed_fuelclass} >.")
else:
print(f"Fuel < {fuel_key} > not found in the fuel database. Nothing appended")
return
# case 2 : walking ignition process
# (only for LinePatch)
if walkingignitiontimes is not None:
# compute total distance between points A and B
totaldist = sqrt(pow(patch.xpos[1] - patch.xpos[0], 2) + pow(patch.ypos[1] - patch.ypos[0], 2))
# get time difference between tb and ta
ignitiondt = walkingignitiontimes[1] - walkingignitiontimes[0]
# compute ignition time for each line point
for ind in patch.line:
# distance from A
dist = sqrt(pow(self.xfiremesh[ind[0]] - patch.xpos[0], 2) + pow(self.yfiremesh[ind[1]] - patch.ypos[0], 2))
# linear interpolation
self.walkingignitionmaparray[ind[1], ind[0]] = walkingignitiontimes[0] + dist * ignitiondt / totaldist
return
# case 3 : ignition of whole patch is set
if ignitiontime is not None:
self.ignitionmaparray[patch.datamask == 1] = ignitiontime
return
# case 4 : Unburnable is set
if unburnable is not None:
# create property vector of 0
propvector = np.zeros(self.nbpropertiesfuel)
self.fuelmaparray = fill_fuel_array_from_patch(
self.fuelmaparray,
patch.datamask,
propvector,
self.nbpropertiesfuel,
self.firemeshsizes[0],
self.firemeshsizes[1],
)
return
print("WARNING: No information given on what to do. Nothing done")
def dump_mesonh(self, verbose: int = 0):
"""Write Fuel map as netCFD file named FuelMap.nc for Méso-NH
Parameters
----------
verbose : int, optional
verbose level (0: no prints, 1: low verbosity, 2: high verbosity) (default: 0)
"""
if self.workdir == "":
projectpath = os.getcwd()
else:
projectpath = self.workdir
# copy .des file
copy2(f"{projectpath:s}/{self.mnhinifile:s}.des", f"{projectpath:s}/FuelMap.des")
# Create new netcdf file to store fuel data
if verbose >= 1:
print(f">>> Create FuelMap.nc")
NewFile = Dataset(f"{projectpath:s}/FuelMap.nc", "w")
if verbose >= 2:
print(f">> Store MesoNH file info")
# need to be compliant with MesoNH output files nomenclature
NewFile.Conventions = "CF-1.7 COMODO-1.4"
NewFile.MNH_REAL = "8"
NewFile.MNH_INT = "4"
NewFile.MNH_cleanly_closed = "yes"
NewFile.MNH_REDUCE_DIMENSIONS_IN_FILES = "1"
NewFile.createDimension("X", self.nx)
NewFile.createDimension("Y", self.ny)
NewFile.createDimension("F", self.nrefinx * self.nrefiny)
NewFile.createDimension("size3", 3)
NewFile.createDimension("char16", 16)
MNHversion = np.array(self.mnh_version.split("."), dtype=int)
varia = NewFile.createVariable("MNHVERSION", int, ("size3"), fill_value=-2147483647)
varia.long_name = "MesoNH version"
varia.valid_min = np.intc(-2147483646)
varia.valid_max = np.intc(2147483647)
varia[:] = MNHversion
varia = NewFile.createVariable("MASDEV", int, ())
varia.long_name = "MesoNH version (without bugfix)"
varia = MNHversion[0]
varia = NewFile.createVariable("BUGFIX", int, ())
varia.long_name = "MesoNH bugfix number"
varia = MNHversion[1]
varia = NewFile.createVariable("STORAGE_TYPE", "c", ("char16"))
varia.long_name = "STORAGE_TYPE"
varia.comment = "Storage type for the information written in the FM files"
varia[:] = "TT "
varia = NewFile.createVariable("FILETYPE", "c", ("char16"))
varia.long_name = "type of this file"
varia[:] = "BlazeData "
# x grid
if verbose >= 2:
print(f">> Store grid")
ni = NewFile.createVariable("X", np.float64, ("X"))
ni.COMMENT = "x-dimension"
ni.GRID = np.intc(0)
ni.standard_name = "x coordinates"
ni.units = "m"
ni.axis = "X"
ni[:] = self.xhat
# y grid
nj = NewFile.createVariable("Y", np.float64, ("Y"))
nj.COMMENT = "y-dimension"
nj.GRID = np.intc(0)
nj.standard_name = "y coordinates"
nj.units = "m"
nj.axis = "Y"
nj[:] = self.yhat
# fire grid
firelevel = NewFile.createVariable("F", np.float64, ("F"))
firelevel.COMMENT = "fire-dimension"
firelevel.GRID = np.intc(0)
firelevel.standard_name = "Fire dimension"
firelevel.axis = "F"
firelevel[:] = np.array(np.arange(0, self.nrefinx * self.nrefiny), dtype=np.float64)
# ignition map
if verbose >= 2:
print(f">> Store ignition map")
IgnitionNC = NewFile.createVariable("Ignition", np.float64, ("F", "Y", "X"))
IgnitionNC.COMMENT = "Ignition map"
IgnitionNC.GRID = | np.intc(4) | numpy.intc |
import copy
from logging import getLogger
from collections import deque
import os
import gym
import numpy as np
import cv2
from pfrl.wrappers import ContinuingTimeLimit, RandomizeAction, Monitor
from pfrl.wrappers.atari_wrappers import ScaledFloatFrame, LazyFrames
cv2.ocl.setUseOpenCL(False)
logger = getLogger(__name__)
def wrap_env(
env, test,
monitor, outdir,
frame_skip,
gray_scale, frame_stack,
randomize_action, eval_epsilon,
action_choices):
# wrap env: time limit...
# Don't use `ContinuingTimeLimit` for testing, in order to avoid unexpected behavior on submissions.
# (Submission utility regards "done" as an episode end, which will result in endless evaluation)
if not test and isinstance(env, gym.wrappers.TimeLimit):
logger.info('Detected `gym.wrappers.TimeLimit`! Unwrap it and re-wrap our own time limit.')
env = env.env
max_episode_steps = env.spec.max_episode_steps
env = ContinuingTimeLimit(env, max_episode_steps=max_episode_steps)
# wrap env: observation...
# NOTE: wrapping order matters!
if test and monitor:
env = Monitor(
env, os.path.join(outdir, env.spec.id, 'monitor'),
mode='evaluation' if test else 'training', video_callable=lambda episode_id: True)
if frame_skip is not None:
env = FrameSkip(env, skip=frame_skip)
if gray_scale:
env = GrayScaleWrapper(env, dict_space_key='pov')
env = ObtainPoVWrapper(env)
env = MoveAxisWrapper(env, source=-1, destination=0) # convert hwc -> chw as Pytorch requires.
env = ScaledFloatFrame(env)
if frame_stack is not None and frame_stack > 0:
env = FrameStack(env, frame_stack, channel_order='chw')
env = ClusteredActionWrapper(env, clusters=action_choices)
if randomize_action:
env = RandomizeAction(env, eval_epsilon)
return env
class FrameSkip(gym.Wrapper):
"""Return every `skip`-th frame and repeat given action during skip.
Note that this wrapper does not "maximize" over the skipped frames.
"""
def __init__(self, env, skip=4):
super().__init__(env)
self._skip = skip
def step(self, action):
total_reward = 0.0
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
class FrameStack(gym.Wrapper):
def __init__(self, env, k, channel_order='hwc', use_tuple=False):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.observations = deque([], maxlen=k)
self.stack_axis = {'hwc': 2, 'chw': 0}[channel_order]
self.use_tuple = use_tuple
if self.use_tuple:
pov_space = env.observation_space[0]
inv_space = env.observation_space[1]
else:
pov_space = env.observation_space
low_pov = np.repeat(pov_space.low, k, axis=self.stack_axis)
high_pov = np.repeat(pov_space.high, k, axis=self.stack_axis)
pov_space = gym.spaces.Box(low=low_pov, high=high_pov, dtype=pov_space.dtype)
if self.use_tuple:
low_inv = np.repeat(inv_space.low, k, axis=0)
high_inv = np.repeat(inv_space.high, k, axis=0)
inv_space = gym.spaces.Box(low=low_inv, high=high_inv, dtype=inv_space.dtype)
self.observation_space = gym.spaces.Tuple(
(pov_space, inv_space))
else:
self.observation_space = pov_space
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.observations.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.observations.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.observations) == self.k
if self.use_tuple:
frames = [x[0] for x in self.observations]
inventory = [x[1] for x in self.observations]
return (LazyFrames(list(frames), stack_axis=self.stack_axis),
LazyFrames(list(inventory), stack_axis=0))
else:
return LazyFrames(list(self.observations), stack_axis=self.stack_axis)
class ObtainPoVWrapper(gym.ObservationWrapper):
"""Obtain 'pov' value (current game display) of the original observation."""
def __init__(self, env):
super().__init__(env)
self.observation_space = self.env.observation_space.spaces['pov']
def observation(self, observation):
return observation['pov']
class UnifiedObservationWrapper(gym.ObservationWrapper):
"""Take 'pov', 'compassAngle', 'inventory' and concatenate with scaling.
Each element of 'inventory' is converted to a square whose side length is region_size.
The color of each square is correlated to the reciprocal of (the number of the corresponding item + 1).
"""
def __init__(self, env, region_size=8):
super().__init__(env)
self._compass_angle_scale = 180 / 255 # NOTE: `ScaledFloatFrame` will scale the pixel values with 255.0 later
self.region_size = region_size
pov_space = self.env.observation_space.spaces['pov']
low_dict = {'pov': pov_space.low}
high_dict = {'pov': pov_space.high}
if 'compassAngle' in self.env.observation_space.spaces:
compass_angle_space = self.env.observation_space.spaces['compassAngle']
low_dict['compassAngle'] = compass_angle_space.low
high_dict['compassAngle'] = compass_angle_space.high
if 'inventory' in self.env.observation_space.spaces:
inventory_space = self.env.observation_space.spaces['inventory']
low_dict['inventory'] = {}
high_dict['inventory'] = {}
for key in inventory_space.spaces.keys():
low_dict['inventory'][key] = inventory_space.spaces[key].low
high_dict['inventory'][key] = inventory_space.spaces[key].high
low = self.observation(low_dict)
high = self.observation(high_dict)
self.observation_space = gym.spaces.Box(low=low, high=high)
def observation(self, observation):
obs = observation['pov']
pov_dtype = obs.dtype
if 'compassAngle' in observation:
compass_scaled = observation['compassAngle'] / self._compass_angle_scale
compass_channel = np.ones(shape=list(obs.shape[:-1]) + [1], dtype=pov_dtype) * compass_scaled
obs = np.concatenate([obs, compass_channel], axis=-1)
if 'inventory' in observation:
assert len(obs.shape[:-1]) == 2
region_max_height = obs.shape[0]
region_max_width = obs.shape[1]
rs = self.region_size
if min(region_max_height, region_max_width) < rs:
raise ValueError("'region_size' is too large.")
num_element_width = region_max_width // rs
inventory_channel = np.zeros(shape=list(obs.shape[:-1]) + [1], dtype=pov_dtype)
for idx, key in enumerate(observation['inventory']):
item_scaled = np.clip(255 - 255 / (observation['inventory'][key] + 1), # Inversed
0, 255)
item_channel = np.ones(shape=[rs, rs, 1], dtype=pov_dtype) * item_scaled
width_low = (idx % num_element_width) * rs
height_low = (idx // num_element_width) * rs
if height_low + rs > region_max_height:
raise ValueError("Too many elements on 'inventory'. Please decrease 'region_size' of each component")
inventory_channel[height_low:(height_low + rs), width_low:(width_low + rs), :] = item_channel
obs = | np.concatenate([obs, inventory_channel], axis=-1) | numpy.concatenate |
# DMD algorithms by <NAME>.
#
# TODO:
# - Should we create an ABC interface for DMD?
# - __init__.py and separate files
#
import numpy as np
from numpy.linalg import svd, pinv, eig
from scipy.linalg import expm
from .process import _threshold_svd, dag
class DMD:
def __init__(self, X2, X1, ts, **kwargs):
""" X2 = A X1
Args:
X2 (:obj:`ndarray` of float): Left side data matrix with columns containing states at sequential times.
X1 (:obj:`ndarray` of float): Right side data matrix with columns containing states at sequential times.
U (:obj:`ndarray` of float): Control signal(s) with columns containing controls.
ts (:obj:`ndarray` of float): Time measurements
**kwargs: see Keyword arguments.
Keyword arguments:
threshold (real, int): Truncate the singular values associated with DMD modes. default None.
threshold_type (str): One of {'number', 'percent'}. default 'percent'.
Attributes:
X2 (:obj:`ndarray` of float): Left side data matrix
X1 (:obj:`ndarray` of float): Right side data matrix
U (:obj:`ndarray` of float): Control signal data matrix
t0 (float): Initial time.
dt (float): Step size.
orig_timesteps (:obj:`ndarray` of float): Original times matching X1.
A (:obj:`ndarray` of float): Learned drift operator.
Atilde (:obj:`ndarray` of float): Projected A.
eigs (list of float): Eigenvalues of Atilde.
modes (:obj:`ndarray` of float): DMD modes are eigenvectors of Atilde (shared by A).
"""
self.X2 = X2
self.X1 = X1
self.t0 = ts[0]
self.dt = ts[1] - ts[0]
self.orig_timesteps = ts if len(ts) == self.X1.shape[1] else ts[:-1]
# I. Compute SVD
threshold = kwargs.get('threshold', None)
if threshold is None:
U, S, Vt = svd(self.X1, full_matrices=False)
else:
threshold_type = kwargs.get('threshold_type', 'percent')
U, S, Vt = _threshold_svd(self.X1, threshold, threshold_type)
# II: Compute operators: X2 = A X1 and Atilde = U*AU
Atilde = dag(U) @ self.X2 @ dag(Vt) @ np.diag(1 / S)
self.A = self.X2 @ dag(Vt) @ np.diag(1 / S) @ dag(U)
# III. DMD Modes
# Atilde W = W Y (Eigendecomposition)
self.eigs, W = eig(Atilde)
# Two versions (eigenvectors of A)
# (i) DMD_exact = X2 V S^-1 W
# (ii) DMD_proj = U W
dmd_modes = kwargs.get('dmd_modes', 'exact')
if dmd_modes == 'exact':
self.modes = self.X2 @ dag(Vt) @ np.diag(1 / S) @ W
elif dmd_modes == 'projected':
self.modes = U @ W
else:
raise ValueError('In DMD initialization, unknown dmd_mode type.')
@classmethod
def from_full(cls, X, ts, **kwargs):
X1 = X[:, :-1]
X2 = X[:, 1:]
return cls(X2, X1, ts, **kwargs)
def time_spectrum(self, ts, system='discrete'):
"""Returns a continuous approximation to the time dynamics of A.
Note that A_dst = e^(A_cts dt). Suppose (operator, eigs) pairs are denoted (A_dst, Y) for the discrete case
and (A_cts, Omega) for the continuous case. The eigenvalue correspondence is e^log(Y)/dt = Omega.
Args:
ts (:obj:`ndarray` of float): Times.
system ({'continuous', 'discrete'}): default 'discrete'.
Returns:
:obj:`ndarray` of float: Evaluations of modes at ts.
"""
if np.isscalar(ts):
# Cast eigs to complex numbers for logarithm
if system == 'discrete':
omega = np.log(self.eigs + 0j) / self.dt
elif system == 'continuous':
omega = self.eigs + 0j
else:
raise ValueError('In time_spectrum, invalid system value.')
return np.exp(omega * (ts - self.t0))
else:
return np.array([self.time_spectrum(it, system=system) for it in ts]).T
def _predict(self, ts, x0, system):
left = self.modes
right = pinv(self.modes) @ x0
if np.isscalar(ts):
return left @ np.diag(self.time_spectrum(ts, system)) @ right
else:
return np.array([left @ np.diag(self.time_spectrum(it, system)) @ right for it in ts]).T
def predict_dst(self, ts=None, x0=None):
"""Predict the future state using continuous approximation to the discrete A.
Args:
ts (:obj:`ndarray` of float): Array of time-steps to predict. default self.orig_timesteps.
x0 (:obj:`ndarray` of float): The initial value. default self.x0.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
x0 = self.X1[:, 0] if x0 is None else x0
ts = self.orig_timesteps if ts is None else ts
return self._predict(ts, x0, 'discrete')
def predict_cts(self, ts=None, x0=None):
"""Predict the future state using the continuous operator A.
Args:
ts (:obj:`ndarray` of float): Array of time-steps to predict. default self.orig_timesteps.
x0 (:obj:`ndarray` of float): The initial value. default self.x0.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
x0 = self.X1[:, 0] if x0 is None else x0
ts = self.orig_timesteps if ts is None else ts
return self._predict(ts, x0, 'continuous')
class DMDc:
def __init__(self, X2, X1, U, ts, **kwargs):
""" X2 = A X1 + B U
Args:
X2 (:obj:`ndarray` of float): Left side data matrix with columns containing states at sequential times.
X1 (:obj:`ndarray` of float): Right side data matrix with columns containing states at sequential times.
U (:obj:`ndarray` of float): Control signal(s) with columns containing controls.
ts (:obj:`ndarray` of float): Time measurements
**kwargs: see Keyword arguments.
Keyword arguments:
threshold (real, int): Truncate the singular values associated with DMD modes. default None.
threshold_type (str): One of {'number', 'percent'}. default 'percent'.
Attributes:
X2 (:obj:`ndarray` of float): Left side data matrix
X1 (:obj:`ndarray` of float): Right side data matrix
U (:obj:`ndarray` of float): Control signal data matrix
t0 (float): Initial time.
dt (float): Step size.
orig_timesteps (:obj:`ndarray` of float): Original times matching X1.
A (:obj:`ndarray` of float): Learned drift operator.
Atilde (:obj:`ndarray` of float): Projected A.
B (:obj:`ndarray` of float): Learned control operator.
Btilde (:obj:`ndarray` of float): projected B.
eigs (list of float): Eigenvalues of Atilde.
modes (:obj:`ndarray` of float): DMD modes are eigenvectors of Atilde (shared by A).
"""
self.X1 = X1
self.X2 = X2
self.U = U if U.shape[1] == self.X1.shape[1] else U[:, :-1] # ONLY these 2 options
Omega = np.vstack([self.X1, self.U])
self.t0 = ts[0]
self.dt = ts[1] - ts[0]
self.orig_timesteps = ts if len(ts) == self.X1.shape[1] else ts[:-1]
# I. Compute SVDs
threshold = kwargs.get('threshold', None)
if threshold is None:
Ug, Sg, Vgt = svd(Omega, full_matrices=False)
U, S, Vt = svd(self.X2, full_matrices=False)
else:
# Allow for independent thresholding
t1, t2 = 2 * [threshold] if np.isscalar(threshold) else threshold
threshold_type = kwargs.get('threshold_type', 'percent')
Ug, Sg, Vgt = _threshold_svd(Omega, t1, threshold_type)
U, S, Vt = _threshold_svd(self.X2, t2, threshold_type)
# II. Compute operators
n, _ = self.X2.shape
left = self.X2 @ dag(Vgt) @ np.diag(1 / Sg)
self.A = left @ dag(Ug[:n, :])
self.B = left @ dag(Ug[n:, :])
# III. DMD modes
self.Atilde = dag(U) @ self.A @ U
self.Btilde = dag(U) @ self.B
self.eigs, W = eig(self.Atilde)
self.modes = self.A @ U @ W
@classmethod
def from_full(cls, X, U, ts, **kwargs):
X2 = X[:, 1:]
X1 = X[:, :-1]
return cls(X2, X1, U, ts, **kwargs)
def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution X_2 = A X_1 + B u_1.
Default behavior (control=None) is to use the original control. (If the underlying A is desired,
format zeros_like u that runs for the desired time.)
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (:obj:`ndarray` of float): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
U = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0
res = [xt]
for ut in U[:, :-1].T:
xt_1 = self.A @ xt + self.B @ ut
xt = xt_1
res.append(xt_1)
return np.array(res).T
def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
X_dot = A X + B u
x(t+dt) = e^{dt A}(x(t) + dt B u(t))
Default behavior (control=None) is to use the original control. (If the underlying A is desired,
format zeros_like u that runs for the desired time.) Be sure that dt matches the train dt if
using delay embeddings.
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
U = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0
res = [xt]
for ut in U[:, :-1].T:
xt_1 = expm(dt * self.A) @ (xt + dt * self.B @ ut)
xt = xt_1
res.append(xt_1)
return np.array(res).T
def zero_control(self, n_steps=None):
n_steps = len(self.orig_timesteps) if n_steps is None else n_steps
return np.zeros([self.U.shape[0], n_steps])
class biDMD:
def __init__(self, X2, X1, U, ts, **kwargs):
"""X2 = A X1 + U B X1
Args:
X2 (:obj:`ndarray` of float): Left side data matrix with columns containing states at sequential times.
X1 (:obj:`ndarray` of float): Right side data matrix with columns containing states at sequential times.
U (:obj:`ndarray` of float): Control signal(s) with columns containing controls.
ts (:obj:`ndarray` of float): Time measurements
**kwargs: see Keyword arguments.
Keyword arguments:
shift (int): Number of time delays in order to match times in the nonlinear term. default 0.
threshold (real, int): Truncate the singular values associated with DMD modes. default None.
threshold_type (str): One of {'number', 'percent'}. default 'percent'.
Attributes:
X2 (:obj:`ndarray` of float): Left side data matrix
X1 (:obj:`ndarray` of float): Right side data matrix
U (:obj:`ndarray` of float): Control signal data matrix
Ups (:obj:`ndarray` of float): augmented state U*X1.
t0 (float): Initial time.
dt (float): Step size.
orig_timesteps (:obj:`ndarray` of float): Original times matching X1.
A (:obj:`ndarray` of float): Learned drift operator.
Atilde (:obj:`ndarray` of float): Projected A.
B (:obj:`ndarray` of float): Learned nonlinear control operator.
Btilde (:obj:`ndarray` of float): projected B.
eigs (list of float): Eigenvalues of Atilde.
modes (:obj:`ndarray` of float): DMD modes are eigenvectors of Atilde (shared by A).
"""
self.U = U
self.X1 = X1
self.X2 = X2
self.t0 = ts[0]
self.dt = ts[1] - ts[0]
self.orig_timesteps = ts if len(ts) == self.X1.shape[1] else ts[:-1]
# store useful dimension
n_time = len(self.orig_timesteps)
# Partially unwrap delay embedding to make sure the correct control signals
# are combined with the correct data times. The unwrapped (=>) operators:
# X1 => (delays+1) x (measured dimensions) x (measurement times)
# U => (delays+1) x (number of controls) x (measurement times)
# Ups => (delays+1) x (controls) x (measured dimensions) x (measurement times)
# => (delays+1 x controls x measured dimensions) x (measurement times)
# Re-flatten all but the time dimension of Ups to set the structure of the
# data matrix. This will set the strucutre of the B operator to match our
# time-delay function.
self.shift = kwargs.get('shift', 0)
self.Ups = np.einsum('sct, smt->scmt',
self.U.reshape(self.shift + 1, -1, n_time),
self.X1.reshape(self.shift + 1, -1, n_time)
).reshape(-1, n_time)
Omega = np.vstack([self.X1, self.Ups])
# I. Compute SVDs
threshold = kwargs.get('threshold', None)
if threshold is None:
Ug, Sg, Vgt = svd(Omega, full_matrices=False)
U, S, Vt = svd(self.X2, full_matrices=False)
else:
# Allow for independent thresholding
t1, t2 = 2 * [threshold] if np.isscalar(threshold) else threshold
threshold_type = kwargs.get('threshold_type', 'percent')
Ug, Sg, Vgt = _threshold_svd(Omega, t1, threshold_type)
U, S, Vt = _threshold_svd(self.X2, t2, threshold_type)
# II. Compute operators
n, _ = self.X2.shape
left = self.X2 @ dag(Vgt) @ np.diag(1 / Sg)
self.A = left @ dag(Ug[:n, :])
self.B = left @ dag(Ug[n:, :])
# III. DMD modes
self.Atilde = dag(U) @ self.A @ U
self.Btilde = dag(U) @ self.B
self.eigs, W = eig(self.Atilde)
self.modes = self.A @ U @ W
def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution:
x_1 = A x_0 + B (u.x_0)
= [A B] [x_0, u.x_0]^T
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
res = [xt]
for t in range(control.shape[1] - 1):
# Outer product then flatten to correctly combine the different
# times present due to time-delays. That is, make sure that
# u(t)'s multiply x(t)'s
# _ct => (time-delays + 1) x (number of controls)
# _xt => (time-delays + 1) x (measured dimensions)
# _ups_t => (time-delays + 1) x (controls) x (measurements)
# Flatten to get the desired vector.
_ct = control[:, t].reshape(self.shift + 1, -1)
_xt = xt.reshape(self.shift + 1, -1)
ups_t = np.einsum('sc,sm->scm', _ct, _xt).flatten()
xt_1 = self.A @ xt + self.B @ ups_t
xt = xt_1
res.append(xt_1)
return np.array(res).T
def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
x_{t+1} = e^{A dt + u B dt } x_t
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
# store useful dimensions
delay_dim = self.shift + 1
control_dim = self.U.shape[0] // delay_dim
measure_1_dim = self.X1.shape[0] // delay_dim
to_dim = self.X2.shape[0]
res = [xt]
for t in range(control.shape[1] - 1):
# Correctly combine u(t) and B(t)
# Initial:
# B <= (time-delays+1 x measurements_2) x (time-delays+1 x controls x measurements_1)
# Reshape:
# B => (time-delays+1 x measurements_2) x (time-delays+1) x (controls) x (measurements_1)
# _ct => (time-delays+1) x (controls)
# _uBt => (time-delays+1 x measurements_2) x (time-delays+1) x (measurements_1)
# => (time-delays+1 x measurements_2) x (time-delays+1 x measurements_1)
# Notice that _uBt is formed by a sum over all controls in order to act on the
# state xt which has dimensions of (delays x measurements_1).
_uBt = np.einsum('ascm,sc->asm',
self.B.reshape(to_dim, delay_dim, control_dim, measure_1_dim),
control[:, t].reshape(delay_dim, control_dim)
).reshape(to_dim, delay_dim * measure_1_dim)
xt_1 = expm((self.A + _uBt) * dt) @ xt
xt = xt_1
res.append(xt_1)
return np.array(res).T
def zero_control(self, n_steps=None):
n_steps = len(self.orig_timesteps) if n_steps is None else n_steps
return np.zeros([self.Ups.shape[0], n_steps])
class biDMDc:
def __init__(self, X2, X1, U, ts, **kwargs):
""" X2 = A X1 + U B X1 + D U
Args:
X2 (:obj:`ndarray` of float): Left side data matrix with columns containing states at sequential times.
X1 (:obj:`ndarray` of float): Right side data matrix with columns containing states at sequential times.
U (:obj:`ndarray` of float): Control signal(s) with columns containing controls.
ts (:obj:`ndarray` of float): Time measurements
**kwargs: see Keyword arguments.
Keyword arguments:
shift (int): Number of time delays in order to match times in the nonlinear term. default 0.
threshold (real, int): Truncate the singular values associated with DMD modes. default None.
threshold_type (str): One of {'number', 'percent'}. default 'percent'.
Attributes:
X2 (:obj:`ndarray` of float): Left side data matrix
X1 (:obj:`ndarray` of float): Right side data matrix
U (:obj:`ndarray` of float): Control signal data matrix
Ups (:obj:`ndarray` of float): augmented state U*X1.
t0 (float): Initial time.
dt (float): Step size.
orig_timesteps (:obj:`ndarray` of float): Original times matching X1.
A (:obj:`ndarray` of float): Learned drift operator.
Atilde (:obj:`ndarray` of float): Projected A.
B (:obj:`ndarray` of float): Learned nonlinear control operator.
Btilde (:obj:`ndarray` of float): projected B.
D (:obj:`ndarray` of float): Learned control operator.
eigs (list of float): Eigenvalues of Atilde.
modes (:obj:`ndarray` of float): DMD modes are eigenvectors of Atilde (shared by A).
"""
self.U = U
self.X1 = X1
self.X2 = X2
self.t0 = ts[0]
self.dt = ts[1] - ts[0]
self.orig_timesteps = ts if len(ts) == self.X1.shape[1] else ts[:-1]
# store useful dimension
n_time = len(self.orig_timesteps)
self.shift = kwargs.get('shift', 0)
delay_dim = self.shift + 1
# Partially unwrap delay embedding to make sure the correct control signals
# are combined with the correct data times. The unwrapped (=>) operators:
# X1 => (delays+1) x (measured dimensions) x (measurement times)
# U => (delays+1) x (number of controls) x (measurement times)
# Ups => (delays+1) x (controls) x (measured dimensions) x (measurement times)
# => (delays+1 x controls x measured dimensions) x (measurement times)
# Re-flatten all but the time dimension of Ups to set the structure of the
# data matrix. This will set the structure of the B operator to match our
# time-delay function.
self.Ups = np.einsum('sct, smt->scmt',
self.U.reshape(delay_dim, -1, n_time),
self.X1.reshape(delay_dim, -1, n_time)
).reshape(-1, n_time)
Omega = np.vstack([self.X1, self.Ups, self.U])
# I. Compute SVDs
threshold = kwargs.get('threshold', None)
if threshold is None:
Ug, Sg, Vgt = svd(Omega, full_matrices=False)
U, S, Vt = svd(self.X2, full_matrices=False)
else:
# Allow for independent thresholding
t1, t2 = 2 * [threshold] if np.isscalar(threshold) else threshold
threshold_type = kwargs.get('threshold_type', 'percent')
Ug, Sg, Vgt = _threshold_svd(Omega, t1, threshold_type)
U, S, Vt = _threshold_svd(self.X2, t2, threshold_type)
# II. Compute operators
c = self.U.shape[0] // delay_dim
n = self.X1.shape[0]
left = self.X2 @ dag(Vgt) @ np.diag(1 / Sg)
# Omega = X + uX + u => dim'ns: n + c*n + c
self.A = left @ dag(Ug[:n, :])
self.B = left @ dag(Ug[n:(c + 1) * n, :])
self.D = left @ dag(Ug[(c + 1) * n:, :])
# III. DMD modes
self.Atilde = dag(U) @ self.A @ U
self.Btilde = dag(U) @ self.B
self.Dtilde = dag(U) @ self.D
self.eigs, W = | eig(self.Atilde) | numpy.linalg.eig |
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robust Shared Response Model (RSRM)
The implementation is based on the following publications:
.. [Turek2017] "Capturing Shared and Individual Information in fMRI Data",
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>
under review, 2017.
"""
# Authors: <NAME> (Intel Labs), 2017
import logging
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite
from sklearn.utils.validation import NotFittedError
__all__ = [
"RSRM"
]
logger = logging.getLogger(__name__)
class RSRM(BaseEstimator, TransformerMixin):
"""Robust Shared Response Model (RSRM)
Given multi-subject data, factorize it as a shared response R among all
subjects, an orthogonal transform W per subject, and an individual
(outlying) sparse component S per subject:
.. math:: X_i \\approx W_i R + S_i, \\forall i=1 \\dots N
This unsupervised model allows to learn idiosyncratic information for
subjects and simultaneously improve the shared response estimation.
The model has similar properties to the Shared Response Model (SRM) with
the addition of the individual components.
The model is estimated solving the following optimization problem:
.. math::
\\min_{W_i, S_i, R}\\sum_i \\frac{1}{2}\\|X_i - W_i R - S_i\\|_F^2
.. math:: + \\gamma\\|S_i\\|_1
.. math:: s.t. \\qquad W_i^TW_i = I \\quad \\forall i=1 \\dots N
The solution to this problem is obtained by applying a Block-Coordinate
Descent procedure. More details can be found in [Turek2017]_.
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components. Higher values yield sparser individual components.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
r_ : array, shape=[features, timepoints]
The shared response.
s_ : list of array, element i has shape=[voxels_i, timepoints]
The individual components for each subject.
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of timepoints for the alignment data must be the same across
subjects.
The Robust Shared Response Model is approximated using the
Block-Coordinate Descent (BCD) algorithm proposed in [Turek2017]_.
This is a single node version.
"""
def __init__(self, n_iter=10, features=50, gamma=1.0, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.lam = gamma
self.rand_seed = rand_seed
def fit(self, X):
"""Compute the Robust Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data of one subject.
"""
logger.info('Starting RSRM')
# Check that the regularizer value is positive
if 0.0 >= self.lam:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough timepoints to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment timepoints "
"between subjects.")
# Create a new random state
self.random_state_ = | np.random.RandomState(self.rand_seed) | numpy.random.RandomState |
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import torch
from torch.utils.data import Dataset
from scipy.sparse import coo_matrix
class TrainTestDataset(Dataset):
def __init__(self, triples, nrelation, entity2edges, edge2entities, edge2relation, entity2relation, context_hops):
self.len = len(triples)
self.triples = triples
self.nrelation = nrelation
self.entity2edges = entity2edges
self.edge2entities = edge2entities
self.edge2relation = edge2relation
self.entity2relation = entity2relation
self.context_hops = context_hops
self.__getitem__(0)
def __len__(self):
return self.len
def __getitem__(self, idx):
head, relation, tail = self.triples[idx]
label = torch.tensor(relation)
label_set = np.zeros((self.nrelation,))
for i in self.entity2relation[tail]:
label_set[i] = 1
label_set = torch.tensor(label_set)
# subgraph sampling
tail_edges_list, tail_masks, tail_edge2relation_list = self._get_neighbors_and_masks(
relation, tail, idx)
entity_pair = torch.LongTensor([head, tail])
return entity_pair, label, label_set, tail_edges_list, tail_masks, tail_edge2relation_list
@staticmethod
def collate_fn(data):
entity_pairs = torch.stack([_[0] for _ in data], dim=0)
labels = torch.stack([_[1] for _ in data], dim=0)
label_sets = torch.stack([_[2] for _ in data], dim=0)
tail_edges_lists = []
for i in range(len(data[0][3])):
tail_edges_lists.append(torch.stack([torch.tensor(_[3][i]) for _ in data], dim=0))
tail_masks = []
for i in range(1, len(data[0][4])):
tail_masks.append(torch.stack([torch.tensor(_[4][i]) for _ in data], dim=0))
tail2relation = []
for i in range(1, len(data[0][5])):
tail2relation.append(torch.stack([torch.tensor(_[5][i]) for _ in data], dim=0))
return entity_pairs, labels, label_sets, tail_edges_lists, tail_masks, tail2relation
def _get_neighbors_and_masks(self, relation, entity, edge):
edges_list = [relation]
masks = [1]
edge2relation_list = [1]
for i in range(self.context_hops):
if i == 0:
neighbor_entities = entity
else:
neighbor_entities = np.take(self.edge2entities, edges_list[-1], 0)
neighbor_edges = np.take(self.entity2edges, neighbor_entities, 0)
edges_list.append(neighbor_edges)
mask = neighbor_edges - edge
mask = (mask != 0)
relations = np.take(self.edge2relation, edges_list[-1], 0)
# remove null relation
mask1 = relations - self.nrelation
mask1 = (mask1 != 0)
mask = mask * mask1
masks.append(mask)
edge2relation_list.append(relations)
return | np.array(edges_list) | numpy.array |
#! /usr/bin/Python
from gensim.models.keyedvectors import KeyedVectors
from scipy import spatial
from numpy import linalg
import argparse
import sys
vector_file = sys.argv[1]
if len(sys.argv) != 6:
print('arguments wrong!')
print(len(sys.argv))
exit()
else:
words = [sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]]
print(words)
wvs = KeyedVectors.load_word2vec_format(vector_file, binary=True)
print('WVs loaded.')
for w in words:
if w not in wvs.vocab:
print('out of vocab!')
exit()
#print(wvs.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=3))
w1 = wvs[words[0]]
w2 = wvs[words[1]]
w3 = wvs[words[2]]
w4 = wvs[words[3]]
m1 = w1 / | linalg.norm(w1) | numpy.linalg.norm |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for coco_evaluator."""
import io
import os
# Import libraries
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
import six
import tensorflow as tf
from official.vision.beta.evaluation import coco_evaluator
from official.vision.beta.evaluation import coco_utils
_COCO_JSON_FILE = '/placer/prod/home/snaggletooth/test/data/coco/instances_val2017.json'
_SAVED_COCO_JSON_FILE = 'tmp.json'
def get_groundtruth_annotations(image_id, coco, include_mask=False):
anns = coco.loadAnns(coco.getAnnIds([image_id]))
if not anns:
return None
image = coco.loadImgs([image_id])[0]
groundtruths = {
'boxes': [],
'classes': [],
'is_crowds': [],
'areas': [],
}
if include_mask:
groundtruths['masks'] = []
for ann in anns:
# Creates detections from groundtruths.
# Converts [x, y, w, h] to [y1, x1, y2, x2] box format.
box = [ann['bbox'][1],
ann['bbox'][0],
(ann['bbox'][1] + ann['bbox'][3]),
(ann['bbox'][0] + ann['bbox'][2])]
# Creates groundtruths.
groundtruths['boxes'].append(box)
groundtruths['classes'].append(ann['category_id'])
groundtruths['is_crowds'].append(ann['iscrowd'])
groundtruths['areas'].append(ann['area'])
if include_mask:
mask_img = Image.fromarray(coco.annToMask(ann).astype(np.uint8))
with io.BytesIO() as stream:
mask_img.save(stream, format='PNG')
mask_bytes = stream.getvalue()
groundtruths['masks'].append(mask_bytes)
for key, val in groundtruths.items():
groundtruths[key] = np.stack(val, axis=0)
groundtruths['source_id'] = image['id']
groundtruths['height'] = image['height']
groundtruths['width'] = image['width']
groundtruths['num_detections'] = len(anns)
for k, v in six.iteritems(groundtruths):
groundtruths[k] = np.expand_dims(v, axis=0)
return groundtruths
def get_predictions(image_id, coco, include_mask=False):
anns = coco.loadAnns(coco.getAnnIds([image_id]))
if not anns:
return None
image = coco.loadImgs([image_id])[0]
predictions = {
'detection_boxes': [],
'detection_classes': [],
'detection_scores': [],
}
if include_mask:
predictions['detection_masks'] = []
for ann in anns:
# Creates detections from groundtruths.
# Converts [x, y, w, h] to [y1, x1, y2, x2] box format and
# does the denormalization.
box = [ann['bbox'][1],
ann['bbox'][0],
(ann['bbox'][1] + ann['bbox'][3]),
(ann['bbox'][0] + ann['bbox'][2])]
predictions['detection_boxes'].append(box)
predictions['detection_classes'].append(ann['category_id'])
predictions['detection_scores'].append(1)
if include_mask:
mask = coco.annToMask(ann)
predictions['detection_masks'].append(mask)
for key, val in predictions.items():
predictions[key] = np.expand_dims(np.stack(val, axis=0), axis=0)
predictions['source_id'] = | np.array([image['id']]) | numpy.array |
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import time
import json
import os
import cv2
import io
import tensorflow as tf
from .cvfuncs import CvFuncs
from pprint import pprint
from random import shuffle
from PIL import Image
from keras.backend.tensorflow_backend import set_session
from keras.utils import np_utils
from keras.models import Model, load_model, model_from_json
from keras.preprocessing import image
from sklearn.preprocessing import LabelEncoder
from skimage.transform import resize
from skimage.color import rgb2gray
def tf_new_session(device_id = "0", memory_fraction = 1.0):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = memory_fraction
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = device_id
sess = tf.Session(config=config)
# see https://github.com/keras-team/keras/issues/4780
sess.run(tf.global_variables_initializer())
return sess
def set_tf_session_for_keras(device_id = "0", memory_fraction = 1.0):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = memory_fraction
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = device_id
sess = tf.Session(config=config)
# see https://github.com/keras-team/keras/issues/4780
sess.run(tf.global_variables_initializer())
set_session(sess)
def load_image_into_numpy_array(path_to_image, imgdim=(96,96,1), grayScale = True):
if None != imgdim:
img = image.load_img(path_to_image, grayscale = grayScale, target_size=(imgdim[0], imgdim[1]))
else:
img = image.load_img(path_to_image, grayscale = grayScale, target_size=None)
x = image.img_to_array(img).astype(np.uint8)
return x
def bytes_to_image_for_classification(imgbytes):
img = Image.open(io.BytesIO(imgbytes))
img = img.convert("RGB")
ret = np.array(img)
return ret
def load_image_for_classification(path_to_image, imgdim=(96,96,1),expandDims=True,grayScale = True):
if imgdim != None:
img = image.load_img(path_to_image, grayscale = grayScale, target_size=(imgdim[0], imgdim[1]))
else:
img = image.load_img(path_to_image, grayscale = grayScale)
x = image.img_to_array(img).astype(np.uint8)
if expandDims is True:
x = np.expand_dims(x, axis=0)
x = x / 255
return x
def load_images_for_classification(path_to_images, imgdim=(96,96,1)):
h,w,c = imgdim
loaded_images = np.empty((len(path_to_images), 1, h,w,c), np.float)
for i in range(0,len(path_to_images)):
path = path_to_images[i]
loaded_image = load_image_for_classification(path, imgdim, True)
loaded_images[i] = loaded_image
return loaded_images
def convertToGrayscaleForClassification(img):
imgDim = img.shape
img = rgb2gray(img)
img = np.reshape(img, (imgDim[0],imgDim[1],1))
return img
def standardFitByClass(plate_img, plate_class):
x = plate_img
channels = plate_img.shape[2]
if plate_class == 'qa.priv_broad':
x = resize(plate_img, (70,260), preserve_range=True)
elif plate_class == 'qa.priv_norm':
x = resize(plate_img, (110,200), preserve_range=True)
return x.astype(np.uint8)
def extractTextRoiFromPlate(plate_img, plate_class):
plate_img = standardFitByClass(plate_img, plate_class)
original_shape = plate_img.shape
if plate_class == 'qa.priv_broad':
roi_y_start = 0
roi_y_end = original_shape[0]
roi_x_start = int(original_shape[1] * 0.3)
roi_x_end = original_shape[1]
elif plate_class == 'qa.priv_norm':
roi_y_start = int(original_shape[0] * 0.3)
roi_y_end = original_shape[0]
roi_x_start = 0
roi_x_end = original_shape[1]
else:
roi_y_start = int(original_shape[0] * 0.3)
roi_y_end = original_shape[0]
roi_x_start = 0
roi_x_end = original_shape[1]
extractedRoi = plate_img[roi_y_start:roi_y_end, roi_x_start:roi_x_end, :].astype(np.uint8)
return extractedRoi
def overlayImageOnBlackCanvas(img, canvas_shape = (400,400,3)):
h,w,c = img.shape
computed_canvas_shape = canvas_shape
resizeAtEnd = False
if h>canvas_shape[0] or w>canvas_shape[1]:
max_dim = max(h,w)
computed_canvas_shape = (max_dim,max_dim,c)
resizeAtEnd = True
canvas = np.zeros(computed_canvas_shape).astype(np.uint8)
insert_y = (computed_canvas_shape[0] - h) //2
insert_x = (computed_canvas_shape[1] - w) //2
canvas[insert_y: insert_y+h , insert_x:insert_x+w] = img
if resizeAtEnd is True:
canvas = resize(canvas, canvas_shape, preserve_range=True).astype(np.uint8)
return canvas
def getImageSlices(img, stride, window_size):
h,w,c = img.shape
arr = np.empty((0,h,window_size,c),np.uint8)
for i in range(0,(w-window_size)//stride):
x_start = i*stride
x_end = x_start + window_size
sub = img[:,x_start:x_end,:]
arr = np.concatenate( (arr, np.expand_dims(sub, axis=0)), axis = 0)
return arr
def getNewCVFuncs(debugEnabled=False):
#set params
cvfuncs = CvFuncs()
cvfuncs.reset()
cvfuncs._charHeightMin = 20
cvfuncs._charHeightMax = 70
cvfuncs._b_charHeightMin = 20
cvfuncs._b_charHeightMax = 70
cvfuncs.max_allowed_char_width = 40
cvfuncs.debugEnabled = debugEnabled
cvfuncs.imageStoreDir = "."
return cvfuncs
def find_rois(image, debugEnabled=False):
t_start = time.time()
cvfunc = getNewCVFuncs(debugEnabled)
cvfunc.debugEnabled = False
rects, rois = cvfunc.processPlate(image,"test")
t_end = time.time()
#print("Took [{}] s. to find [{}] rois".format((t_end - t_start), len(rois)))
return rects, rois
def make_dataset(loc, split = 0.2, imgdim=(96,96,1), grayScale = True, max_test_files = 4096):
#the path contains sub folders, name of folder is the label whereas
t_start = time.time()
#dictionary of foldername -> list
train_files = {}
for root, directory, files in os.walk(loc):
if root != loc:
label = os.path.basename(root)
train_files[label] = [ os.path.join(root,x) for x in os.listdir(root)]
shuffle(train_files[label])
tmp_keys = list(train_files.keys())
#print(len(train_files[tmp_keys[0]]), split_index)
#split the data into train and dev
num_train_files = 0
num_dev_files = 0
max_test_files_per_class = max_test_files // len(tmp_keys)
print("Max X_test size is [{}] - per class [{}]".format(max_test_files, max_test_files_per_class))
train_files_list = []
dev_files_list = []
dev_files = {}
for k in tmp_keys:
print("Processing class [{}]".format(k), end='')
split_index = int(len(train_files[k]) * float(split))
#take only max_test_files as test samples.. big enough
if split_index > max_test_files_per_class:
split_index = max_test_files_per_class
num_train_files += (len(train_files[k]) - split_index)
num_dev_files += split_index
dev_files[k] = train_files[k][:split_index]
train_files[k] = train_files[k][split_index:]
#add train files to the list to be returned
for f in train_files[k]:
train_files_list.append((k,f))
for f in dev_files[k]:
dev_files_list.append((k,f))
print("| train_files [{}] & dev_files [{}]".format(len(train_files[k]), len(dev_files[k])))
unique_classes = np.unique(tmp_keys)
unique_classes.sort()
t_end = time.time()
print("Took [{}] s. to make dataset".format((t_end-t_start)))
return num_train_files, num_dev_files, tmp_keys, train_files_list, dev_files_list, list(unique_classes)
def load_minibatch(classes, train_files_list, batch_size, batch_number,imgdim=(96,96,1), grayScale = True):
batch_start_index = batch_size * batch_number
# t_1 = time.time()
X_index = 0
X = np.empty((batch_size,imgdim[0],imgdim[1],imgdim[2]),np.uint8)
Y = []
# t_2 = time.time()
for i in range(batch_start_index, batch_start_index+batch_size):
train_item = train_files_list[i]
X[X_index] = load_image_into_numpy_array(train_item[1], imgdim, grayScale = grayScale)
Y.append(train_item[0])
X_index += 1
# t_3 = time.time()
#ensure we have len(classes) = len(np.unique(Y))
Y_unique = | np.unique(Y) | numpy.unique |
import numpy as np
def arrow(headHeight, headRadius, shaftRadius, ns=8):
profile = np.array([[0, 0, 0], [0, shaftRadius, 0], [1 - headHeight, shaftRadius, 0], [1 - headHeight, headRadius, 0], [1, 0, 0]], dtype=np.float32)
coneNormal = np.array([headRadius, headHeight, 0])
coneNormal /= np.linalg.norm(coneNormal)
normals = np.array([[-1, 0, 0], [0, 1, 0], [0, 1, 0], coneNormal, coneNormal], dtype=np.float32)
a = 2 * np.pi / ns
c, s = np.cos(a), np.sin(a)
R = np.array([[1, 0, 0],
[0, c, -s],
[0, s, c]])
npp = len(profile)
# Each strip of the revolution consists of one triangle at each end + two triangles to triangulate each inner quad
stripTris = np.array([[0, 1, npp + 1]] +
[[i , i + 1, npp + i] for i in range(1, npp - 1)] +
[[i + 1, npp + (i + 1), npp + i] for i in range(1, npp - 1)] +
[[npp + (npp - 2), npp - 2, npp - 1]], dtype=np.uint32)
nst = len(stripTris)
V = np.empty((ns * npp, 3), dtype=np.float32)
N = np.empty((ns * npp, 3), dtype=np.float32)
F = np.empty((ns * nst, 3), dtype=np.uint32)
for i in range(ns):
vs = i * npp
ve = (i + 1) * npp
V[vs:ve] = profile
N[vs:ve] = normals
fs = i * nst
fe = (i + 1) * nst
F[fs:fe] = (stripTris + vs) % len(V)
profile = profile @ R
normals = normals @ R
return V, N, F
def cylinder(radius, ns=8):
angles = np.linspace(0, 2 * np.pi, ns, endpoint=False)
circlePoints = np.column_stack(( | np.zeros_like(angles) | numpy.zeros_like |
import numpy as np
from baselines.ecbp.agents.buffer.ps_learning_process import PSLearningProcess
# from baselines.ecbp.agents.graph.build_graph_mer_attention import *
from baselines.ecbp.agents.graph.build_graph_mer_bvae_attention import *
import logging
from multiprocessing import Pipe
import os
from baselines.ecbp.agents.psmp_learning_target_agent import PSMPLearnTargetAgent
import cv2
class BVAEAttentionAgent(PSMPLearnTargetAgent):
def __init__(self, encoder_func,decoder_func, exploration_schedule, obs_shape, vector_input=True, lr=1e-4, buffer_size=1000000,
num_actions=6, latent_dim=32,
gamma=0.99, knn=4, eval_epsilon=0.1, queue_threshold=5e-5, batch_size=32, density=True, trainable=True,
num_neg=10, tf_writer=None):
self.conn, child_conn = Pipe()
self.replay_buffer = np.empty((buffer_size + 10,) + obs_shape, np.float32 if vector_input else np.uint8)
self.ec_buffer = PSLearningProcess(num_actions, buffer_size, latent_dim*2, obs_shape, child_conn, gamma,
density=density)
self.obs = None
self.z = None
self.cur_capacity = 0
self.ind = -1
self.writer = tf_writer
self.sequence = []
self.gamma = gamma
self.queue_threshold = queue_threshold
self.num_actions = num_actions
self.exploration_schedule = exploration_schedule
self.latent_dim = latent_dim
self.knn = knn
self.steps = 0
self.batch_size = batch_size
self.rmax = 100000
self.logger = logging.getLogger("ecbp")
self.log("psmp learning agent here")
self.eval_epsilon = eval_epsilon
self.train_step = 4
self.alpha = 1
self.burnin = 2000
self.burnout = 10000000000
self.update_target_freq = 10000
self.buffer_capacity = 0
self.trainable = trainable
self.num_neg = num_neg
self.loss_type = ["attention"]
input_type = U.Float32Input if vector_input else U.Uint8Input
# input_type = U.Uint8Input
self.hash_func, self.unmask_z_func,self.train_func, self.eval_func, self.norm_func, self.attention_func, self.value_func, self.reconstruct_func,self.update_target_func = build_train_mer_bvae_attention(
input_type=input_type,
obs_shape=obs_shape,
encoder_func=encoder_func,
decoder_func=decoder_func,
num_actions=num_actions,
optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-4),
gamma=gamma,
grad_norm_clipping=10,
latent_dim=latent_dim,
loss_type=self.loss_type,
batch_size=batch_size,
num_neg=num_neg,
c_loss_type="sqmargin",
)
self.finds = [0, 0]
self.ec_buffer.start()
def train(self):
# sample
# self.log("begin training")
# print("training",self.writer)
noise = np.random.randn(9,self.batch_size,self.latent_dim)
samples = self.send_and_receive(4, (self.batch_size, self.num_neg))
samples_u = self.send_and_receive(4, (self.batch_size, self.num_neg))
samples_v = self.send_and_receive(4, (self.batch_size, self.num_neg))
index_u, _, _, _, value_u, _, _, _ = samples_u
index_v, _, _, _, value_v, _, _, _ = samples_v
index_tar, index_pos, index_neg, reward_tar, value_tar, action_tar, neighbours_index, neighbours_value = samples
if len(index_tar) < self.batch_size:
return
obs_tar = [self.replay_buffer[ind] for ind in index_tar]
obs_pos = [self.replay_buffer[ind] for ind in index_pos]
obs_neg = [self.replay_buffer[ind] for ind in index_neg]
obs_neighbour = [self.replay_buffer[ind] for ind in neighbours_index]
obs_u = [self.replay_buffer[ind] for ind in index_u]
obs_v = [self.replay_buffer[ind] for ind in index_v]
# print(obs_tar[0].shape)
if "regression" in self.loss_type:
value_original = self.norm_func(np.array(obs_tar))
value_tar = np.array(value_tar)
self.log(value_original, "value original")
self.log(value_tar, "value tar")
value_original = np.array(value_original).squeeze() / self.alpha
assert value_original.shape == np.array(value_tar).shape, "{}{}".format(value_original.shape,
np.array(value_tar).shape)
value_tar[np.isnan(value_tar)] = value_original[np.isnan(value_tar)]
assert not np.isnan(value_tar).any(), "{}{}".format(value_original, obs_tar)
input = [noise,obs_tar]
if "contrast" in self.loss_type:
input += [obs_pos, obs_neg]
if "regression" in self.loss_type:
input += [np.nan_to_num(value_tar)]
if "linear_model" in self.loss_type:
input += [action_tar]
if "contrast" not in self.loss_type:
input += [obs_pos]
if "fit" in self.loss_type:
input += [obs_neighbour, np.nan_to_num(neighbours_value)]
if "regression" not in self.loss_type:
input += [np.nan_to_num(value_tar)]
if "causality" in self.loss_type:
input += [reward_tar, action_tar]
if "weight_product" in self.loss_type:
value_u = np.nan_to_num(np.array(value_u))
value_v = np.nan_to_num(np.array(value_v))
input += [obs_u, obs_v, obs_u, obs_v, value_u, value_v]
if "attention" in self.loss_type:
value_original = self.value_func(noise,np.array(obs_tar))
value_tar = np.array(value_tar)
value_original = np.array(value_original).squeeze()
value_tar[np.isnan(value_tar)] = value_original[ | np.isnan(value_tar) | numpy.isnan |
'''
Climatological mean
'''
import sys
from glob import glob
import h5py
import numpy as np
import numba as nb
import pandas as pd
from datetime import datetime, timedelta
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/utils/')
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/QC_OBS/')
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/QC_OBS/utils')
import graph_utils as gu
import data_utils as du
import BCH_utils as bu
from namelist import *
@nb.njit()
def climo_mean(fcst, flag_pick):
N_grids = fcst.shape[-1]
MEAN = | np.empty((12, N_grids)) | numpy.empty |
"""
pyart.testing.sample_objects
============================
Functions for creating sample Radar and Grid objects.
.. autosummary::
:toctree: generated/
make_empty_ppi_radar
make_target_radar
make_velocity_aliased_radar
make_single_ray_radar
make_empty_grid
make_target_grid
"""
import numpy as np
from .sample_files import _EXAMPLE_RAYS_FILE
from ..config import get_metadata
from ..core.radar import Radar
from ..core.grid import Grid
def make_empty_ppi_radar(ngates, rays_per_sweep, nsweeps):
"""
Return an Radar object, representing a PPI scan.
Parameters
----------
ngates : int
Number of gates per ray.
rays_per_sweep : int
Number of rays in each PPI sweep.
nsweeps : int
Number of sweeps.
Returns
-------
radar : Radar
Radar object with no fields, other parameters are set to default
values.
"""
nrays = rays_per_sweep * nsweeps
time = get_metadata('time')
_range = get_metadata('range')
latitude = get_metadata('latitude')
longitude = get_metadata('longitude')
altitude = get_metadata('altitude')
sweep_number = get_metadata('sweep_number')
sweep_mode = get_metadata('sweep_mode')
fixed_angle = get_metadata('fixed_angle')
sweep_start_ray_index = get_metadata('sweep_start_ray_index')
sweep_end_ray_index = get_metadata('sweep_end_ray_index')
azimuth = get_metadata('azimuth')
elevation = get_metadata('elevation')
fields = {}
scan_type = 'ppi'
metadata = {'instrument_name': 'fake_radar'}
time['data'] = np.arange(nrays, dtype='float64')
time['units'] = 'seconds since 1989-01-01T00:00:01Z'
_range['data'] = np.linspace(0, 1000, ngates).astype('float32')
latitude['data'] = np.array([36.5], dtype='float64')
longitude['data'] = np.array([-97.5], dtype='float64')
altitude['data'] = np.array([200], dtype='float64')
sweep_number['data'] = np.arange(nsweeps, dtype='int32')
sweep_mode['data'] = np.array(['azimuth_surveillance'] * nsweeps)
fixed_angle['data'] = np.array([0.75] * nsweeps, dtype='float32')
sweep_start_ray_index['data'] = np.arange(0, nrays, rays_per_sweep,
dtype='int32')
sweep_end_ray_index['data'] = np.arange(rays_per_sweep - 1, nrays,
rays_per_sweep, dtype='int32')
azimuth['data'] = np.arange(nrays, dtype='float32')
elevation['data'] = np.array([0.75] * nrays, dtype='float32')
return Radar(time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
instrument_parameters=None)
def make_target_radar():
"""
Return a PPI radar with a target like reflectivity field.
"""
radar = make_empty_ppi_radar(50, 360, 1)
fields = {
'reflectivity': get_metadata('reflectivity')}
fdata = np.zeros((360, 50), dtype='float32')
fdata[:, 0:10] = 0.
fdata[:, 10:20] = 10.
fdata[:, 20:30] = 20.
fdata[:, 30:40] = 30.
fdata[:, 40:50] = 40.
fields['reflectivity']['data'] = fdata
radar.fields = fields
return radar
def make_velocity_aliased_radar(alias=True):
"""
Return a PPI radar with a target like reflectivity field.
Set alias to False to return a de-aliased radar.
"""
radar = make_empty_ppi_radar(50, 360, 1)
radar.range['meters_between_gates'] = 1.0
radar.range['meters_to_center_of_first_gate'] = 1.0
radar.instrument_parameters = {
'nyquist_velocity': {'data': np.array([10.0] * 360)}}
fields = {
'reflectivity': get_metadata('reflectivity'),
'velocity': get_metadata('velocity')}
# fake reflectivity data, all zero reflectivity
fdata = np.zeros((360, 50), dtype='float32')
fields['reflectivity']['data'] = fdata
# fake velocity data, all zeros except a wind burst on at ~13 degrees.
# burst is partially aliased.
vdata = np.zeros((360 * 1, 50), dtype='float32')
for i, idx in enumerate(range(13, -1, -1)):
vdata[i, idx:idx + i + 1] = np.arange(0.5, 0.5 + i * 1. + 0.001)
vdata[:14, 14:27] = vdata[:14, 12::-1] # left/right flip
vdata[14:27] = vdata[12::-1, :] # top/bottom flip
aliased = np.where(vdata > 10.0)
if alias:
vdata[aliased] += -20.
fields['velocity']['data'] = vdata
radar.fields = fields
return radar
def make_single_ray_radar():
"""
Return a PPI radar with a single ray taken from a ARM C-SAPR Radar
Radar object returned has 'reflectivity_horizontal',
'norm_coherent_power', 'copol_coeff', 'dp_phase_shift', and 'diff_phase'
fields with no metadata but a 'data' key. This radar is used for unit
tests in correct modules.
"""
radar = make_empty_ppi_radar(983, 1, 1)
radar.range['data'] = 117.8784 + np.arange(983) * 119.91698
f = np.load(_EXAMPLE_RAYS_FILE)
for field_name in f:
radar.fields[field_name] = {'data': f[field_name]}
f.close()
return radar
def make_empty_grid(grid_shape, grid_limits):
"""
Make an empty grid object without any fields or metadata.
Parameters
----------
grid_shape : 3-tuple of floats
Number of points in the grid (x, y, z).
grid_limits : 3-tuple of 2-tuples
Minimum and maximum grid location (inclusive) in meters for the
x, y, z coordinates.
Returns
-------
grid : Grid
Empty Grid object, centered near the ARM SGP site (Oklahoma).
"""
time = {
'data': np.array([0.0]),
'units': 'seconds since 2000-01-01T00:00:00Z',
'calendar': 'gregorian',
'standard_name': 'time',
'long_name': 'Time in seconds since volume start'}
time_start = {
'data': np.array([0.0]),
'units': 'seconds since 2000-01-01T00:00:00Z',
'calendar': 'gregorian',
'standard_name': 'time',
'long_name': 'Time in seconds since volume start'}
time_end = {
'data': np.array([0.0]),
'units': 'seconds since 2000-01-01T00:00:00Z',
'calendar': 'gregorian',
'standard_name': 'time',
'long_name': 'Time in seconds since volume start'}
# grid coordinate dictionaries
nx, ny, nz = grid_shape
(x0, x1), (y0, y1), (z0, z1) = grid_limits
xaxis = {'data': np.linspace(x0, x1, nx),
'long_name': 'X-coordinate in Cartesian system',
'axis': 'X',
'units': 'm'}
yaxis = {'data': np.linspace(y0, y1, ny),
'long_name': 'Y-coordinate in Cartesian system',
'axis': 'Y',
'units': 'm'}
zaxis = {'data': np.linspace(z0, z1, nz),
'long_name': 'Z-coordinate in Cartesian system',
'axis': 'Z',
'units': 'm',
'positive': 'up'}
altorigin = {'data': np.array([300.]),
'long_name': 'Altitude at grid origin',
'units': 'm',
'standard_name': 'altitude',
}
latorigin = {'data': np.array([36.74]),
'long_name': 'Latitude at grid origin',
'units': 'degree_N',
'standard_name': 'latitude',
'valid_min': -90.,
'valid_max': 90.
}
lonorigin = {'data': np.array([-98.1]),
'long_name': 'Longitude at grid origin',
'units': 'degree_E',
'standard_name': 'longitude',
'valid_min': -180.,
'valid_max': 180.
}
axes = {'time': time,
'time_start': time_start,
'time_end': time_end,
'z_disp': zaxis,
'y_disp': yaxis,
'x_disp': xaxis,
'alt': altorigin,
'lat': latorigin,
'lon': lonorigin}
return Grid({}, axes, {})
def make_target_grid():
"""
Make a sample Grid with a rectangular target.
"""
grid_shape = (320, 400, 2)
grid_limits = ((-300000, 300000), (-400000, 400000), (0, 500))
grid = make_empty_grid(grid_shape, grid_limits)
fdata = | np.zeros((2, 400, 320), dtype='float32') | numpy.zeros |
import numpy as np
def scan(X,Y):
'''
Calculates the solution for the constrained regression called SCAN
given in the publication: Maag et al. "SCAN: Multi-Hop Calibration for Mobile Sensor Arrays".
In particuluar it solves: min_B trace( (Y-BX)(Y-BX)^T ) subject to BXX^TB^T = YY^T
Inputs:
X: size [n x m] (n: nUmber of sensors, m: nUmber of samples)
Y: size [n x m]
returns B: [n x n]
'''
Ux,Dx,Vx = np.linalg.svd(X,full_matrices=False)
Uy,Dy,Vy = np.linalg.svd(Y,full_matrices=False)
Dx = np.diag(Dx)
Dy = np.diag(Dy)
Vx = np.transpose(Vx)
Vy = np.transpose(Vy)
M = np.matmul( | np.transpose(Vx) | numpy.transpose |
import os, sys
import numpy as np
import csv,argparse
import collections
from pyAudioAnalysis import audioSegmentation as aS
def read_segmentation_gt(gt_file):
"""
This function reads a segmentation ground truth file,
following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gt_file: the path of the CSV segment file
RETURNS:
- seg_start: a np array of segments' start positions
- seg_end: a np array of segments' ending positions
- seg_label: a list of respective class labels (strings)
"""
print(gt_file)
with open(gt_file, 'rt') as f_handle:
reader = csv.reader(f_handle, delimiter=' ')
start_end_times = {}
start_times = []
end_times = []
labels = []
for row in reader:
if len(row) == 4:
start_end_times[float(row[2])]=float(row[3])
sorted_start = collections.OrderedDict(sorted(start_end_times.items()))
start_point=0.0
sp_count=len(start_end_times)
for start, end in sorted_start.items():
if start!=start_point:
start_times.append(start_point)
end_times.append(start)
labels.append('ns')
start_times.append(start)
end_times.append(end)
labels.append('speech')
start_point=end
else:
start_times.append(start)
end_times.append(end)
labels.append('speech')
start_point = end
# start_times = []
# end_times = []
# labels = []
# for row in reader:
# # if type== 'p' and len(row) == 4:
# # start_times.append(float(row[1]))
# # end_times.append(float(row[2]))
# # labels.append((row[3]))
# if len(row) == 4:
# start_times.append(float(row[2]))
# end_times.append(float(row[3]))
# labels.append(('speech'))
return np.array(start_times), np.array(end_times), labels, sp_count
def XXread_segmentation_gt(gt_file):
"""
This function reads a segmentation ground truth file,
following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gt_file: the path of the CSV segment file
RETURNS:
- seg_start: a np array of segments' start positions
- seg_end: a np array of segments' ending positions
- seg_label: a list of respective class labels (strings)
"""
with open(gt_file, 'rt') as f_handle:
reader = csv.reader(f_handle, delimiter='\t')
start_times = []
end_times = []
labels = []
for row in reader:
if len(row) == 3:
start_times.append(float(row[0]))
end_times.append(float(row[1]))
labels.append((row[2]))
return np.array(start_times), np.array(end_times), labels
def segments_to_labels(start_times, end_times, labels, window):
"""
This function converts segment endpoints and respective segment
labels to fix-sized class labels.
ARGUMENTS:
- start_times: segment start points (in seconds)
- end_times: segment endpoints (in seconds)
- labels: segment labels
- window: fix-sized window (in seconds)
RETURNS:
- flags: np array of class indices
- class_names: list of classnames (strings)
"""
flags = []
class_names = list(set(labels))
index = window / 2.0
while index < end_times[-1]:
for i in range(len(start_times)):
if start_times[i] < index <= end_times[i]:
break
flags.append(class_names.index(labels[i]))
index += window
return np.array(flags), class_names
def load_ground_truth_segments(gt_file, window):
"""
This function reads a gold label and predicted labesl,
following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gt_file: the path of the CSV segment file
- window: fix-sized window (in seconds) to segment
RETURNS:
"""
seg_start, seg_end, seg_labels, sp_count = read_segmentation_gt(gt_file)
labels, class_names = segments_to_labels(seg_start, seg_end, seg_labels,
window)
labels_temp = []
for index, label in enumerate(labels):
# "align" labels with GT
if class_names[labels[index]] in class_names:
labels_temp.append(class_names.index(class_names[
labels[index]]))
else:
labels_temp.append(-1)
labels = np.array(labels_temp)
return labels, class_names, sp_count
def load_ground_truth(gt_file, labels, class_names, mid_step, plot_results):
accuracy = 0
cm = | np.array([]) | numpy.array |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.data import RandomSampler, BatchSampler
from .utils import calculate_accuracy
from .trainer import Trainer
from .utils import EarlyStopping
class CPCTrainer(Trainer):
# TODO: Make it work for all modes, right now only it defaults to pcl.
def __init__(self, encoder, config, device=torch.device('cpu'), wandb=None):
super().__init__(encoder, wandb, device)
self.config = config
for k, v in config.items():
setattr(self, k, v)
self.device = device
self.steps_gen = lambda: range(self.steps_start, self.steps_end, self.steps_step)
self.discriminators = {i: nn.Linear(self.gru_size, self.encoder.hidden_size).to(device) for i in self.steps_gen()}
self.gru = nn.GRU(input_size=self.encoder.hidden_size, hidden_size=self.gru_size, num_layers=self.gru_layers, batch_first=True).to(device)
self.labels = {i: torch.arange(self.batch_size * (self.sequence_length - i - 1)).to(device) for i in self.steps_gen()}
params = list(self.encoder.parameters()) + list(self.gru.parameters())
for disc in self.discriminators.values():
params += disc.parameters()
self.optimizer = torch.optim.Adam(params, lr=config['lr'])
self.early_stopper = EarlyStopping(patience=self.patience, verbose=False, wandb=self.wandb, name="encoder")
def generate_batch(self, episodes):
episodes = [episode for episode in episodes if len(episode) >= self.sequence_length]
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=len(episodes) * self.sequence_length),
self.batch_size, drop_last=True)
for indices in sampler:
episodes_batch = [episodes[x] for x in indices]
sequences = []
for episode in episodes_batch:
start_index = np.random.randint(0, len(episode) - self.sequence_length+1)
seq = episode[start_index: start_index + self.sequence_length]
sequences.append(torch.stack(seq))
yield torch.stack(sequences)
def do_one_epoch(self, epoch, episodes):
mode = "train" if self.encoder.training and self.gru.training else "val"
steps = 0
step_losses = {i: [] for i in self.steps_gen()}
step_accuracies = {i: [] for i in self.steps_gen()}
data_generator = self.generate_batch(episodes)
for sequence in data_generator:
with torch.set_grad_enabled(mode == 'train'):
sequence = sequence.to(self.device)
sequence = sequence / 255.
channels, w, h = self.config['obs_space'][-3:]
flat_sequence = sequence.view(-1, channels, w, h)
flat_latents = self.encoder(flat_sequence)
latents = flat_latents.view(
self.batch_size, self.sequence_length, self.encoder.hidden_size)
contexts, _ = self.gru(latents)
loss = 0.
for i in self.steps_gen():
predictions = self.discriminators[i](contexts[:, :-(i+1), :]).contiguous().view(-1, self.encoder.hidden_size)
targets = latents[:, i+1:, :].contiguous().view(-1, self.encoder.hidden_size)
logits = torch.matmul(predictions, targets.t())
step_loss = F.cross_entropy(logits, self.labels[i])
step_losses[i].append(step_loss.detach().item())
loss += step_loss
preds = torch.argmax(logits, dim=1)
step_accuracy = preds.eq(self.labels[i]).sum().float() / self.labels[i].numel()
step_accuracies[i].append(step_accuracy.detach().item())
if mode == "train":
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
steps += 1
epoch_losses = {i: np.mean(step_losses[i]) for i in step_losses}
epoch_accuracies = {i: | np.mean(step_accuracies[i]) | numpy.mean |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
from pathlib import Path
from moviepy.editor import VideoFileClip
from line_class import Line
def test_functions(path,foo=None,cmap=None):
"""
Function to test a function (foo) with a folder of imgs (path)
All images in the folder will be printed.
Args:
path: path to images folders
foo: function
Return:
None
"""
p = Path(path)
imgs = list(p.glob("*.jpg"))
fig, axs = plt.subplots(2,len(imgs)//2,figsize=(30,10))
axs = axs.flatten()
for i,img in enumerate(imgs):
image = mpimg.imread(img)
if foo:
image = foo(image)
if len(image.shape) < 3:
cmap = "gray"
axs[i].imshow(image,cmap=cmap)
def color_selection(img, threshold):
"""
Return a mask from the image from pixel between the threshold
Args:
img: RGB image. Use the mpimg.imread()
threshold: Tuple with the low and high threshold from a HLS image
np.array([0, 200, 0]), np.array([255, 255,70]))
return:
Black and white image. White pixel are between the threshold
"""
img_hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
mask = cv2.inRange(img_hls, *threshold)
return mask
def yellow_white_selection(img):
"""
Return a mask from the white and yellow_threshold
white_threshold = [0, 200, 0] [255, 255,70]
yellow_threshold = [0, 80, 150] [80, 180, 255]
If you want to use another values, Use the color_selection function!
Args:
img: RGB image. Use the mpimg.imread()
return:
Black and white image. White pixel are between the threshold
"""
white_threshold = (np.array([0, 200, 0]), np.array([255, 255,70]))
white_mask = color_selection(img, white_threshold)
yellow_threshold = (np.array([0, 80, 150]), np.array([80, 180, 255]))
yellow_mask = color_selection(img, yellow_threshold)
color_mask = cv2.bitwise_or(white_mask, yellow_mask)
return cv2.bitwise_or(img, img, mask=color_mask)
def region_of_interest(img, vertices=None):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = | np.zeros_like(img) | numpy.zeros_like |
import time as tm
import numpy as np
from pylab import *
def Jacobi(A, b, x, eps=1e-4, xs=None):
x = x.copy()
cnt = 0
while True:
cnt += 1
x_old = x.copy()
for i in range(b.shape[0]):
x[i] += (b[i] - A[i].dot(x_old)) / A[i, i]
if abs(x_old - x).max() < eps:
return x, cnt
def GS(A, b, x, eps=1e-4, xs=None):
x = x.copy()
cnt = 0
while True:
cnt += 1
x_old = x.copy()
for i in range(b.shape[0]):
x[i] += (b[i] - A[i].dot(x)) / A[i, i]
if abs(x_old - x).max() < eps:
return x, cnt
def SOR(A, b, x, eps=1e-4, w=0.9, xs=None):
x = x.copy()
cnt = 0
while True:
cnt += 1
x_old = x.copy()
for i in range(b.shape[0]):
x[i] += w * (b[i] - A[i].dot(x)) / A[i, i]
if abs(x_old - x).max() < eps:
return x, cnt
def solve(eps, a, n):
print('eps =', eps, ', a =', a, ', n =', n)
A = np.zeros((n, n))
h = 1 / n
for i in range(n):
A[i, i] = -2 * eps - h
for i in range(n - 1):
A[i + 1, i] = eps
A[i, i + 1] = eps + h
# print(A)
x = | np.arange(0 + h, 1 + h, h) | numpy.arange |