#!/usr/bin/env/pythonw
# -*- coding: cp1252 -*-
import codecs
from datetime import date
import os
import random
import re
import sys
import time
import urllib
import zipfile
import io
import numpy as np
import pandas as pd
from scipy import stats
from scipy.optimize import fminbound
from scipy.integrate import quad
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.pylab import polyfit
import matplotlib.ticker as mtick
from matplotlib import colors
from matplotlib import cm
try:
import requests
except ImportError:
requests = None
encoding = "ISO-8859-1"
from .mapping import map_magic
from pmagpy import contribution_builder as cb
from pmagpy import spline
from pmagpy import version
from pmag_env import set_env
from . import pmag
from . import pmagplotlib
from . import data_model3 as data_model
from .contribution_builder import Contribution
from . import validate_upload3 as val_up3
from numpy.linalg import inv, eig
has_cartopy, cartopy = pmag.import_cartopy()
if has_cartopy == True:
import cartopy.crs as ccrs
[docs]
def igrf(input_list, mod='', ghfile=""):
"""
Determine declination, inclination and intensity from a geomagnetic
field model. The default model used is the IGRF model
(http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html) with other
models available for selection with the available options detailed
in the mod parameter below.
Parameters:
input_list : list with format [Date, Altitude, Latitude, Longitude]
date must be in decimal year format XXXX.XXXX (Common Era)
altitude is in kilometers
mod : desired model
"" : Use the IGRF13 model by default
'custom' : use values supplied in ghfile
or choose from this list
['arch3k','cals3k','pfm9k','hfm10k','cals10k.2','cals10k.1b','shadif14','shawq2k','shawqIA','ggf100k']
where:
- arch3k (Korte et al., 2009)
- cals3k (Korte and Constable, 2011)
- cals10k.1b (Korte et al., 2011)
- pfm9k (Nilsson et al., 2014)
- hfm10k is the hfm.OL1.A1 of Constable et al. (2016)
- cals10k.2 (Constable et al., 2016)
- shadif14 (Pavon-Carrasco et al., 2014)
- shawq2k (Campuzano et al., 2019)
- shawqIA (Osete et al., 2020)
- ggk100k (Panovska et al., 2018)[only models from -99950 in 200 year increments allowed)
- the first four of these models, are constrained to agree
- with gufm1 (Jackson et al., 2000) for the past four centuries
gh : path to file with l m g h data
Returns:
igrf_array
array of magnetic field values (0: dec; 1: inc; 2: intensity (in nT))
Examples:
>>> local_field = ipmag.igrf([2013.6544, .052, 37.87, -122.27])
>>> local_field
array([1.431355648576314e+01, 6.148304376287219e+01, 4.899264739340517e+04])
>>> ipmag.igrf_print(local_field)
Declination: 14.314
Inclination: 61.483
Intensity: 48992.647 nT
"""
if ghfile != "":
lmgh = np.loadtxt(ghfile)
gh = []
lmgh = np.loadtxt(ghfile).transpose()
gh.append(lmgh[2][0])
for i in range(1, lmgh.shape[1]):
gh.append(lmgh[2][i])
gh.append(lmgh[3][i])
if len(gh) == 0:
print('no valid gh file')
return
mod = 'custom'
if mod == "":
x, y, z, f = pmag.doigrf(
input_list[3] % 360., input_list[2], input_list[1], input_list[0])
elif mod != 'custom':
x, y, z, f = pmag.doigrf(
input_list[3] % 360., input_list[2], input_list[1], input_list[0], mod=mod)
else:
x, y, z, f = pmag.docustom(
input_list[3] % 360., input_list[2], input_list[1], gh)
igrf_array = pmag.cart2dir((x, y, z))
return igrf_array
[docs]
def igrf_print(igrf_array):
"""
Print out Declination, Inclination, Intensity from an array returned from
the ipmag.igrf() function.
Parameters:
igrf_array : array that is output from ipmag.igrf() function
Examples:
An array generated by the ``ipmag.igrf()`` function is passed to
``ipmag.igrf_print()``
>>> local_field = ipmag.igrf([2013.6544, .052, 37.87, -122.27])
>>> ipmag.igrf_print(local_field)
Declination: 14.314
Inclination: 61.483
Intensity: 48992.647 nT
"""
print("Declination: %0.3f" % (igrf_array[0]))
print("Inclination: %0.3f" % (igrf_array[1]))
print("Intensity: %0.3f nT" % (igrf_array[2]))
[docs]
def dms2dd(degrees, minutes, seconds):
"""
Convert latitude/longitude of a location that is in
degrees, minutes, seconds to decimal degrees
Parameters:
degrees : degrees of latitude/longitude
minutes : minutes of latitude/longitude
seconds : seconds of latitude/longitude
Returns:
float
decimal degrees of location
Examples:
Convert 180 degrees 4 minutes 23 seconds to decimal degrees:
>>> ipmag.dms2dd(180,4,23)
180.07305555555556
"""
dd = float(degrees) + float(minutes)/60 + float(seconds)/(60 * 60)
return dd
[docs]
def fisher_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Fisher mean and associated parameters from either a list of
declination values and a separate list of inclination values or from a
di_block (a nested list of [dec,inc,1.0]). Returns a dictionary with the
Fisher mean and statistical parameters.
Parameters:
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
or
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
Returns:
dictionary
Dictionary containing the Fisher mean parameters.
This dictionary can be printed in a more readable fashion using the
``ipmag.print_direction_mean()`` function if it is a directional mean or
``ipmag.print_pole_mean()`` function if it is a pole mean.
Examples:
Use lists of declination and inclination to calculate a Fisher mean:
>>> ipmag.fisher_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'alpha95': 7.292891411309177,
'csd': 6.4097743211340896,
'dec': 136.30838974272072,
'inc': 21.347784026899987,
'k': 159.69251473636305,
'n': 4,
'r': 3.9812138971889026}
Use a di_block to calculate a Fisher mean (will give the same output as the
example with the lists):
>>> ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.fisher_mean(di_block)
else:
return pmag.fisher_mean(di_block)
[docs]
def fisher_angular_deviation(dec=None, inc=None, di_block=None, confidence=95):
'''
The angle from the true mean within which a chosen percentage of directions
lie can be calculated from the Fisher distribution. This function uses the
calculated Fisher concentration parameter to estimate this angle from
directional data. The 63 percent confidence interval is often called the
angular standard deviation.
Parameters:
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
or
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
confidence : 50 percent, 63 percent or 95 percent (default is 95 percent)
Returns:
float
theta is returned which is the critical angle of interest from the
mean which contains the percentage of directions specified by the
confidence parameter
'''
if di_block is None:
di_block = make_di_block(dec, inc)
mean = pmag.fisher_mean(di_block)
else:
mean = pmag.fisher_mean(di_block)
if confidence == 50:
theta = 67.5/np.sqrt(mean['k'])
if confidence == 63:
theta = 81.0/np.sqrt(mean['k'])
if confidence == 95:
theta = 140.0/np.sqrt(mean['k'])
return theta
[docs]
def bingham_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Bingham mean and associated statistical parameters from
either a list of declination values and a separate list of inclination
values or from a di_block (a nested list of [dec, inc, 1.0]).
Returns a dictionary with the Bingham mean and statistical parameters.
Parameters:
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns:
dictionary containing the Bingham mean and associated statistics.
Examples:
Use lists of declination and inclination to calculate a Bingham mean:
>>> ipmag.bingham_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'Edec': 220.84075754194598,
'Einc': -13.745780972597291,
'Eta': 9.9111522306938742,
'Zdec': 280.38894136954474,
'Zeta': 9.8653370276451113,
'Zinc': 64.23509410796224,
'dec': 136.32637167111312,
'inc': 21.34518678073179,
'n': 4}
Use a di_block to calculate a Bingham mean (will give the same output as the
example above with the lists):
>>> ipmag.bingham_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.dobingham(di_block)
else:
return pmag.dobingham(di_block)
[docs]
def kent_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Kent mean and associated statistical parameters from either
a list of declination values and a separate list of inclination values or
from a di_block (a nested list a nested list of [dec,inc,1.0]). Returns a
dictionary with the Kent mean and statistical parameters.
Parameters:
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns:
dictionary containing Kent mean and associated statistics.
Examples:
Use lists of declination and inclination to calculate a Kent mean:
>>> ipmag.kent_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'Edec': 280.38683553668795,
'Einc': 64.236598921744289,
'Eta': 0.72982112760919715,
'Zdec': 40.824690028412761,
'Zeta': 6.7896823241008795,
'Zinc': 13.739412321974067,
'dec': 136.30838974272072,
'inc': 21.347784026899987,
'n': 4}
Use a di_block to calculate a Kent mean (will give the same output as the
example above with the dec, inc lists):
>>> ipmag.kent_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.dokent(di_block, len(di_block))
else:
return pmag.dokent(di_block, len(di_block))
[docs]
def kent_distribution_95(dec=None, inc=None, di_block=None):
"""
Calculates the Kent mean and and provides the parameters associated with
the region containing 95% of the directions from either a list of
declination values and a separate list of inclination values or from a
di_block (a nested list a nested list of [dec,inc,1.0]). Returns a
dictionary with the Kent mean and statistical parameters.
Parameters:
dec: list of declinations
inc: list of inclinations
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns:
dictionary containing Kent mean and associated statistics.
Examples:
Use lists of declination and inclination to calculate a Kent mean:
>>> ipmag.kent_distribution_95(dec=[140,127,142,136],inc=[21,23,19,22])
{'dec': 136.30838974272072,
'inc': 21.347784026899987,
'n': 4,
'Zdec': 40.82469002841276,
'Zinc': 13.739412321974067,
'Edec': 280.38683553668795,
'Einc': 64.23659892174429,
'Zeta': 13.677129096579478,
'Eta': 1.4597607031196376}
Use a di_block to calculate a Kent mean (will give the same output as the
example with the lists):
>>> ipmag.kent_distribution_95(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.dokent(di_block, len(di_block), distribution_95=True)
else:
return pmag.dokent(di_block, len(di_block), distribution_95=True)
[docs]
def mean_bootstrap_confidence(dec=None,inc=None,di_block=None,num_sims=10000,alpha=0.05):
"""
Estimates the bootstrap confidence region for the mean of a collection of paleomagnetic
directions based on the approach of Heslop et al. (2023). This approach involves the projection
onto a tangent plane for a tractable statistical analysis in two dimensions.
Parameters:
dec (list or None): List of declination values. Default is None.
inc (list or None): List of inclination values. Default is None.
di_block (list or None): List of [dec, inc] pairs. Default is None.
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block needs to be provided.
num_sims (int): Number of bootstrap iterations. Default is 10,000.
alpha (float): Confidence region. Default is 0.05 corresponding to 95% region.
Returns:
tuple: A tuple containing:
(1) a dictionary of parameters the includes the estimated mean
direction and the T statistic which is the basis of the bootstrap confidence region,
(2) list of [dec, inc] pairs that represent the boundary of the confidence region.
The bootstrap confidence region cannot be reported readily in a compact form so is
instead a long list of points along the boundary of the confidence region.
References:
Heslop, D., Scealy, J. L., Wood, A. T. A., Tauxe, L., & Roberts, A. P. (2023).
A bootstrap common mean direction test. Journal of Geophysical Research: Solid Earth, 128, e2023JB026983.
https://doi.org/10.1029/2023JB026983
"""
if di_block is None:
di_block = make_di_block(dec, inc)
pars = {}
X = np.transpose(pmag.dir2cart(di_block))
n = np.shape(X)[1] #number of directions
mhat = np.mean(X,axis=1)
mhat /= np.linalg.norm(mhat) #estimate mean direction
mean_direction = pmag.cart2dir(mhat)
pars["dec"] = mean_direction[0]
pars["inc"] = mean_direction[1]
T_b = np.zeros(num_sims) #predefine output array for bootstrapped T values
for i in range(num_sims): #loop through bootstrap iterations
idx = np.random.randint(0,n,n) #select observation indicies with replacement
X_b = X[:,idx] #form bootstrap sample
mhat_b = np.mean(X_b,axis=1) #mean direction of bootstrap sample
mhat_b /= np.linalg.norm(mhat_b)
Mhat_b = pmag.form_Mhat(mhat_b) #\hat{M} for bootstrap sample
Ghat_b = pmag.form_Ghat(X_b,Mhat_b) #\hat{G} for bootstrap sample
T_b[i] = pmag.find_T(mhat,n,Mhat_b,Ghat_b) #T for bootstrap sample
Tc = np.quantile(T_b,1-alpha) #find 1-alpha quantile of T
pars["T_critical"] = Tc
Mhat = pmag.form_Mhat(mhat)
Ghat = pmag.form_Ghat(X,Mhat)
Xc = pmag.find_CR(mhat,Mhat,Ghat,n,Tc) #calculate points along boundary of 95% confidence region
confidence_DI = pmag.cart2dir(np.transpose(Xc)) #convert to dec and inc for plotting
return pars, confidence_DI
[docs]
def print_direction_mean(mean_dictionary):
"""
Does a pretty job printing a Fisher mean and associated statistics for
directional data.
Parameters:
mean_dictionary : output dictionary of pmag.fisher_mean()
Returns:
prints the mean and associated statistics
Examples:
Generate a Fisher mean using ``ipmag.fisher_mean()`` and then print it nicely
using ``ipmag.print_direction_mean()``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_direction_mean(my_mean)
Dec: 136.3 Inc: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (a_95): 7.3
Precision parameter (k) estimate: 159.7
"""
print('Dec: ' + str(round(mean_dictionary['dec'], 1)) +
' Inc: ' + str(round(mean_dictionary['inc'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
print('Angular radius of 95% confidence (a_95): ' +
str(round(mean_dictionary['alpha95'], 1)))
print('Precision parameter (k) estimate: ' +
str(round(mean_dictionary['k'], 1)))
[docs]
def print_pole_mean(mean_dictionary):
"""
Does a pretty job printing a Fisher mean and associated statistics for
mean paleomagnetic poles.
Parameters:
mean_dictionary : output dictionary of pmag.fisher_mean()
Returns:
prints the mean and associated statistics
Examples:
Generate a Fisher mean using ``ipmag.fisher_mean()`` and then print it nicely
using ``ipmag.print_pole_mean()``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_pole_mean(my_mean)
Plon: 136.3 Plat: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (A_95): 7.3
Precision parameter (k) estimate: 159.7
"""
print('Plon: ' + str(round(mean_dictionary['dec'], 1)) +
' Plat: ' + str(round(mean_dictionary['inc'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
print('Angular radius of 95% confidence (A_95): ' +
str(round(mean_dictionary['alpha95'], 1)))
print('Precision parameter (k) estimate: ' +
str(round(mean_dictionary['k'], 1)))
[docs]
def print_kent_mean(mean_dictionary):
"""
Does a pretty job printing a Kent mean and associated statistics.
Parameters:
mean_dictionary: output dictionary of ipmag.kent_mean
Returns:
prints the mean and associated statistics
Examples:
Generate a Kent mean using ``ipmag.kent_mean()`` and then print it nicely
using ``ipmag.print_kent_mean()``
>>> my_di_block = [[183.2931831390693, 80.70169305002725, 1.0],
[75.50744693411644, 79.57922789535208, 1.0],
[162.32513875820177, 76.51741087479394, 1.0],
[143.8749848879392, 85.79156599168213, 1.0],
[177.12011881027854, 84.02007456929348, 1.0]]
>>> my_kent_mean = ipmag.kent_mean(di_block = my_di_block)
>>> ipmag.print_kent_mean(my_kent_mean)
Plon: 150.4 Plat: 83.3
Major axis lon: 31.4 Major axis lat: 3.2
Minor axis lon: 301.0 Minor axis lat: 5.8
Major axis angle of 95% ellipse (Zeta): 6.5
Minor axis angle of 95% ellipse (Eta): 2.8
Number of directions in mean (n): 5
"""
print('Plon: ' + str(round(mean_dictionary['dec'], 1)) +
' Plat: ' + str(round(mean_dictionary['inc'], 1)))
print('Major axis lon: ' + str(round(mean_dictionary['Zdec'], 1)) +
' Major axis lat: ' + str(round(mean_dictionary['Zinc'], 1)))
print('Minor axis lon: ' + str(round(mean_dictionary['Edec'], 1)) +
' Minor axis lat: ' + str(round(mean_dictionary['Einc'], 1)))
print('Major axis angle of 95% ellipse (Zeta): ' +
str(round(mean_dictionary['Zeta'], 1)))
print('Minor axis angle of 95% ellipse (Eta): ' +
str(round(mean_dictionary['Eta'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
[docs]
def fishrot(k=20, n=100, dec=0, inc=90, di_block=True):
"""
Generates Fisher distributed unit vectors from a specified distribution
using the pmag.py function pmag.fshdev() and pmag.dodirot_V() functions.
Parameters:
k : kappa precision parameter (default is 20)
n : number of vectors to determine (default is 100)
dec : mean declination of distribution (default is 0)
inc : mean inclination of distribution (default is 90)
di_block : this function returns a nested list of [dec,inc,1.0] as the default
if di_block = False it will return a list of dec and a list of inc
Returns:
- di_block, a nested list of [dec,inc,1.0] (default)
- dec, inc, a list of dec and a list of inc (if di_block = False)
Examples:
>>> ipmag.fishrot(k=20, n=5, dec=40, inc=60)
array([[55.30451720381376 , 56.186057037482435, 1. ],
[25.593998008087908, 63.544360587984784, 1. ],
[29.263675539971246, 54.58964868129066 , 1. ],
[61.28572459596148 , 51.5004074156194 , 1. ],
[55.20784339888985 , 54.186746152272484, 1. ]])
"""
# Generation of samples
declinations, inclinations = pmag.fshdev(np.repeat(k, n))
# Rotation to have desired mean direction
resampled_di = np.vstack([declinations, inclinations]).T
resampled_di_rotated = pmag.dodirot_V(resampled_di, np.repeat(dec, n), np.repeat(inc, n))
if di_block:
return np.insert(resampled_di_rotated, 2, np.ones(n), axis=1)
else:
return resampled_di_rotated[:,0], resampled_di_rotated[:,1]
[docs]
def fisher_mean_resample(alpha95=20, n=100, dec=0, inc=90, di_block=True):
"""
Generates resamples of directional means from a Fisher mean with a specified
alpha95. Equivalent of sampling from the angular standard deviation.
Parameters:
alpha95 : 95% confidence on mean direction (default is 5)
n : number of vectors to determine (default is 100)
dec : mean declination of distribution (default is 0)
inc : mean inclination of distribution (default is 90)
di_block : this function returns a nested list of [dec,inc,1.0] as the default
if di_block = False it will return a list of dec and a list of inc
Returns:
- di_block, a nested list of [dec,inc,1.0] (default)
- dec, inc, a list of dec and a list of inc (if di_block = False)
Examples:
>>> ipmag.fisher_mean_resample(alpha95=5, n=5, dec=40, inc=60)
>>> [[41.47587050719005, 62.44682515754436, 1.0],
[33.738299775944085, 55.88461662263949, 1.0],
[42.98707546462242, 60.21460942319564, 1.0],
[28.282076485842992, 59.67015046929257, 1.0],
[41.87081053973009, 57.18064045614739, 1.0]]
"""
directions = []
declinations = []
inclinations = []
k = 140.0**2 / alpha95**2
if di_block:
for data in range(n):
d, i = pmag.fshdev(k)
drot, irot = pmag.dodirot(d, i, dec, inc)
directions.append([drot, irot, 1.])
return directions
else:
for data in range(n):
d, i = pmag.fshdev(k)
drot, irot = pmag.dodirot(d, i, dec, inc)
declinations.append(drot)
inclinations.append(irot)
return declinations, inclinations
[docs]
def kentrot(kent_dict, n=100, di_block=True):
'''
Generates Kent distributed unit vectors from a specified distribution
using the pmag.py function pmag.kentdev().
Parameters:
kent_dict: a dictionary for Kent distribution parameters. It should at least have:
dec: mean axis dec,
inc: mean axis inc,
Zdec: major axis dec,
Zinc: major axis inc,
Edec: minor axis dec,
Einc: minor axis inc,
R1: Kent distribution size quantity for calculating kappa and beta,
R2: Kent distribution shape quantity for calculating kappa and beta}
di_block : this function returns a nested list of [dec,inc,1.0] as the default
if di_block = False it will return a list of dec and a list of inc
Returns:
**di_block**, a nested list of [dec,inc,1.0] (default)
**dec, inc**, a list of dec and a list of inc (if di_block = False)
'''
# get kent states from dictionary
mean_lon = kent_dict['dec']
mean_lat = kent_dict['inc']
major_lon = kent_dict['Zdec']
major_lat = kent_dict['Zinc']
minor_lon = kent_dict['Edec']
minor_lat = kent_dict['Einc']
R1 = kent_dict['R1']
R2 = kent_dict['R2']
# calculate kappa and beta
kappa = 1/(2-2*R1-R2)+1/(2-2*R1+R2)
beta = 1/2*(1/(2-2*R1-R2)-1/(2-2*R1+R2))
# generate unrotated decs and incs
decs, incs = pmag.kentdev(kappa, beta, n)
#convert to cartesion coordinates
X = pmag.dir2cart(np.array([decs, incs]).T)
# new axes:
new_axes = pmag.dir2cart([[360-minor_lon,-minor_lat],[major_lon,major_lat],[mean_lon,mean_lat]])
# old axes:
old_axes = pmag.dir2cart([[-90,0],[0,0],[0,90]])
# construct coordinate rotation matrix
rotation_matrix = np.inner(new_axes,old_axes)
rotated_X = X@rotation_matrix
rotated_dir = pmag.cart2dir(rotated_X).T
rotated_decs, rotated_incs = rotated_dir[0]-90, rotated_dir[1]
if di_block:
return make_di_block(rotated_decs, rotated_incs)
else:
return rotated_decs, rotated_incs
[docs]
def tk03(n=100, dec=0, lat=0, rev='no', G1=-18e3, G2=0, G3=0, B_threshold=0):
"""
Generates vectors drawn from the TK03.gad model of secular
variation (Tauxe and Kent, 2004) at given latitude and rotated
about a vertical axis by the given declination. Returns a nested list of
of [dec,inc,intensity].
Parameters
----------
n : number of vectors to determine (default is 100)
dec : mean declination of data set (default is 0)
lat : latitude at which secular variation is simulated (default is 0)
rev : if reversals are to be included this should be 'yes' (default is 'no')
G1 : specify average g_1^0 fraction (default is -18e3 in nT, minimum = 1)
G2 : specify average g_2^0 fraction (default is 0)
G3 : specify average g_3^0 fraction (default is 0)
B_threshold : return vectors with B>B_threshold (in nT) (default is 0 which
returns all vectors)
Returns
----------
tk_03_output : a nested list of declination, inclination, and intensity (in nT)
Examples
--------
>>> ipmag.tk03(n=5, dec=0, lat=0)
[[14.752502674158681, -36.189370642603834, 16584.848620957589],
[9.2859465437113311, -10.064247301056071, 17383.950391596223],
[2.4278460589582913, 4.8079990844938019, 18243.679003572055],
[352.93759572283585, 0.086693343935840397, 18524.551174838372],
[352.48366219759953, 11.579098286352332, 24928.412830772766]]
"""
tk_03_output = []
k=0
while k < n:
gh = pmag.mktk03(8, k, G2, G3, G1=G1) # terms and random seed
# get a random longitude, between 0 and 359
lon = random.randint(0, 360)
vec = pmag.getvec(gh, lat, lon) # send field model and lat to getvec
vec[0] += dec
if vec[0] >= 360.:
vec[0] -= 360.
if k % 2 == 0 and rev == 'yes':
vec[0] += 180.
vec[1] = -vec[1]
if vec[2]>B_threshold:
tk_03_output.append([vec[0], vec[1], vec[2]])
k+=1
#else: # do more
# k-=1
return tk_03_output
[docs]
def unsquish(incs, f):
"""
This function applies a flattening factor (f) to unflatten inclination
data (incs) and returns 'unsquished' values.
Parameters
----------
incs : list of inclination values or a single value
f : flattening factor
A value greater than 0.0 and less than or equal to 1.0 that is used
to unflatten inclination values. 1.0 implies no flatting and will
result in no change.
Returns
----------
incs_unsquished : List of unflattened inclinations (in degrees)
Examples
--------
Take a list of inclinations, flatten them using ``ipmag.squish`` and then
use ``ipmag.squish`` and the flattening factor to unflatten (i.e.
"unsquish") them:
>>> inclinations = [43,47,41]
>>> squished_incs = ipmag.squish(inclinations,0.4)
>>> ipmag.unsquish(squished_incs,0.4)
[43.0, 47.0, 41.0]
"""
try:
length = len(incs)
incs_unsquished = []
for n in range(0, length):
inc_rad = np.deg2rad(incs[n]) # convert to radians
inc_new_rad = (1.0/f) * np.tan(inc_rad)
# convert back to degrees
inc_new = np.rad2deg(np.arctan(inc_new_rad))
incs_unsquished.append(inc_new)
return incs_unsquished
except:
inc_rad = np.deg2rad(incs) # convert to radians
inc_new_rad = (1.0/f) * np.tan(inc_rad)
inc_new = np.rad2deg(np.arctan(inc_new_rad)) # convert back to degrees
return inc_new
[docs]
def squish(incs, f):
"""
This function applies an flattening factor (f) to inclination data
(incs) and returns 'squished' values.
Parameters
----------
incs : list of inclination values or a single value
f : flattening factor
A value between 0.0 and 1.0 where 1.0 is no flattening and 0.0 is
complete flattening.
Returns
---------
incs_squished : List of flattened directions (in degrees)
Examples
--------
Take a list of inclinations and flatten (i.e. "squish") them:
>>> inclinations = [43,47,41]
>>> ipmag.squish(inclinations,0.4)
[20.455818908027187, 23.216791019112204, 19.173314360172309]
"""
try:
length = len(incs)
incs_squished = []
for n in range(0, length):
inc_rad = incs[n] * np.pi / 180. # convert to radians
inc_new_rad = f * np.tan(inc_rad)
inc_new = np.arctan(inc_new_rad) * 180. / \
np.pi # convert back to degrees
incs_squished.append(inc_new)
return incs_squished
except:
inc_rad = incs * np.pi / 180. # convert to radians
inc_new_rad = f * np.tan(inc_rad)
inc_new = np.arctan(inc_new_rad) * 180. / \
np.pi # convert back to degrees
return inc_new
[docs]
def f_factor_calc(inc_observed, inc_field):
"""
Calculate the flattening factor (f) from an observed inclination in
comparison to the expected inclination.
Parameters:
inc_observed : the observed inclination (e.g. magnetization of sediment)
inc_field : inclination of field in which magnetization was acquired
Returns:
f_factor : the flattening factor
Examples:
Calculate the f factor for an inclination that was shallowed from 40 degrees
to 25 degrees:
>>> ipmag.f_factor_calc(25,40)
0.5557238268604126
"""
inc_observed_rad = np.deg2rad(inc_observed)
inc_field_rad = np.deg2rad(inc_field)
f_factor = np.tan(inc_observed_rad)/np.tan(inc_field_rad)
return f_factor
[docs]
def do_flip(dec=None, inc=None, di_block=None, unit_vector=True):
"""
This function returns the antipode (i.e. it flips) of directions.
The function can take dec and inc as separate lists if they are of equal
length and explicitly specified or are the first two arguments. It will then
return a list of flipped decs and a list of flipped incs. If a di_block (a
nested list of [dec, inc, 1.0]) is specified then it is used and the function
returns a di_block with the flipped directions.
Parameters:
dec : list of declinations
inc : list of inclinations
di_block : a nested list of [dec, inc, 1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
unit_vector : if True will return [dec,inc,1.]; if False will return [dec,inc]
(default is True)
Returns:
either **dec_flip, inc_flip** as lists of flipped declinations and inclinations
or **dflip** as a nested list of [dec, inc, 1.0] or [dec, inc]
Examples
----------
Lists of declination and inclination can be flipped to their antipodes:
>>> decs = [1.0, 358.0, 2.0]
>>> incs = [10.0, 12.0, 8.0]
>>> ipmag.do_flip(decs, incs)
([181.0, 178.0, 182.0], [-10.0, -12.0, -8.0])
The function can also take a di_block and returns a flipped di_block:
>>> directions = [[1.0,10.0],[358.0,12.0,],[2.0,8.0]]
>>> ipmag.do_flip(di_block=directions)
[[181.0, -10.0, 1.0], [178.0, -12.0, 1.0], [182.0, -8.0, 1.0]]
"""
if di_block is None:
dec_flip = []
inc_flip = []
for n in range(0, len(dec)):
dec_flip.append((dec[n] - 180.) % 360.0)
inc_flip.append(-inc[n])
return dec_flip, inc_flip
else:
dflip = []
for rec in di_block:
d, i = (rec[0] - 180.) % 360., -rec[1]
if unit_vector==True:
dflip.append([d, i, 1.0])
if unit_vector==False:
dflip.append([d, i])
return dflip
[docs]
def bootstrap_fold_test(Data, num_sims=1000, min_untilt=-10, max_untilt=120, bedding_error=0, save=False, save_folder='.', fmt='svg', ninety_nine=False):
"""
Conduct a bootstrap fold test (Tauxe and Watson, 1994)
Three plots are generated: 1) equal area plot of uncorrected data;
2) tilt-corrected equal area plot; 3) bootstrap results showing the trend
of the largest eigenvalues for a selection of the pseudo-samples (red
dashed lines), the cumulative distribution of the eigenvalue maximum (green
line) and the confidence bounds that enclose 95% of the pseudo-sample
maxima. If the confidence bounds enclose 100% unfolding, the data "pass"
the fold test.
Parameters:
Data : a numpy array of directional data [dec, inc, dip_direction, dip]
dec, inc are the declination and inclination of the paleomagnetic directions
dip_direction, dip are the orientation of the bedding
num_sims : number of bootstrap samples (default is 1000)
min_untilt : minimum percent untilting applied to the data (default is -10%)
max_untilt : maximum percent untilting applied to the data (default is 120%)
bedding_error : (circular standard deviation) for uncertainty on bedding poles
save : optional save of plots (default is False)
save_folder : path to directory where plots should be saved
fmt : format of figures to be saved (default is 'svg')
ninety_nine : changes confidence bounds from 95 percent to 99 if True
Returns:
- uncorrected data equal area plot
- tilt-corrected data equal area plot
- bootstrap results and CDF of the eigenvalue maximum
Examples:
Data in separate lists of dec, inc, dip_direction, dip data can be made into
the needed array using the ``ipmag.make_diddd_array`` function.
>>> dec = [132.5,124.3,142.7,130.3,163.2]
>>> inc = [12.1,23.2,34.2,37.7,32.6]
>>> dip_direction = [265.0,265.0,265.0,164.0,164.0]
>>> dip = [20.0,20.0,20.0,72.0,72.0]
>>> data_array = ipmag.make_diddd_array(dec,inc,dip_direction,dip)
>>> data_array
array([[ 132.5, 12.1, 265. , 20. ],
[ 124.3, 23.2, 265. , 20. ],
[ 142.7, 34.2, 265. , 20. ],
[ 130.3, 37.7, 164. , 72. ],
[ 163.2, 32.6, 164. , 72. ]])
This array can then be passed to the function:
>>> ipmag.bootstrap_fold_test(data_array)
"""
if bedding_error != 0:
kappa = (81.0/bedding_error)**2
else:
kappa = 0
plt.figure(figsize=[5, 5])
plot_net(1)
pmagplotlib.plot_di(1, Data) # plot directions
plt.text(-1.1, 1.15, 'Geographic')
if save:
plt.savefig(os.path.join(save_folder, 'eq_geo') + '.' + fmt)
D, I = pmag.dotilt_V(Data)
TCs = np.array([D, I]).transpose()
plt.figure(figsize=[5, 5])
plot_net(2)
pmagplotlib.plot_di(2, TCs) # plot directions
plt.text(-1.1, 1.15, 'Tilt-corrected')
if save:
plt.savefig(os.path.join(save_folder, 'eq_tc') + '.' + fmt)
plt.show()
print('doing ', num_sims, ' iterations...please be patient.....')
Percs = list(range(min_untilt, max_untilt))
Cdf = []
Untilt = []
plt.figure()
for n in range(num_sims): # do bootstrap data sets - plot first 25 as dashed red line
# if n%50==0:print n
Taus = [] # set up lists for taus
PDs = pmag.pseudo(Data)
if kappa != 0:
for k in range(len(PDs)):
d, i = pmag.fshdev(kappa)
dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3])
PDs[k][2] = dipdir
PDs[k][3] = dip
for perc in Percs:
tilt = np.array([1., 1., 1., 0.01 * perc])
D, I = pmag.dotilt_V(PDs * tilt)
TCs = np.array([D, I]).transpose()
ppars = pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n < 25:
plt.plot(Percs, Taus, 'r--')
# tilt that gives maximum tau
Untilt.append(Percs[Taus.index(np.max(Taus))])
Cdf.append(float(n)/float(num_sims))
plt.plot(Percs, Taus, 'k')
plt.xlabel('% Untilting')
plt.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
plt.plot(Untilt, Cdf, 'g')
lower = int(.025 * num_sims)
upper = int(.975 * num_sims)
plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--')
plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--')
title = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'percent unfolding')
if ninety_nine is True:
print('tightest grouping of vectors obtained at (99% confidence bounds):')
print(int(.005 * num_sims), ' - ',
int(.995 * num_sims), 'percent unfolding')
print("")
print('tightest grouping of vectors obtained at (95% confidence bounds):')
print(title)
print('range of all bootstrap samples: ')
print(Untilt[0], ' - ', Untilt[-1], 'percent unfolding')
plt.title(title)
if save:
plt.savefig(os.path.join(save_folder, 'bootstrap_CDF') + '.' + fmt)
plt.show()
[docs]
def common_mean_bootstrap(Data1, Data2, NumSims=1000,
color1='r', color2='b',
save=False, save_folder='.', fmt='svg',
figsize=(7, 2.3), x_tick_bins=4,verbose=True):
"""
Conduct a bootstrap test (Tauxe, 2010) for a common mean on two declination,
inclination data sets. Plots are generated of the cumulative distributions
of the Cartesian coordinates of the means of the pseudo-samples (one for x,
one for y and one for z). If the 95 percent confidence bounds for each
component overlap, the two set of directions "pass" the test and are
consistent with sharing a common mean.
Parameters:
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
if Data2 is length of 1, treat as single direction
NumSims : number of bootstrap samples (default is 1000)
save : optional save of plots (default is False)
save_folder : path to directory where plots should be saved
fmt : format of figures to be saved (default is 'svg')
figsize : optionally adjust figure size (default is (7, 2.3))
x_tick_bins : because they occasionally overlap depending on the data, this
argument allows you adjust number of tick marks on the x axis of graphs
(default is 4)
Returns:
**three plots** (cumulative distributions of the X, Y, Z of bootstrapped means,
**result** (a boolean where 0 is fail and 1 is pass)
Examples:
Develop two populations of directions using ``ipmag.fishrot()``. Use the
function to determine if they share a common mean.
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_bootstrap(directions_A, directions_B)
"""
counter = 0
BDI1 = pmag.di_boot(Data1)
cart1 = pmag.dir2cart(BDI1).transpose()
X1, Y1, Z1 = cart1[0], cart1[1], cart1[2]
if np.array(Data2).shape[0] > 2:
BDI2 = pmag.di_boot(Data2)
cart2 = pmag.dir2cart(BDI2).transpose()
X2, Y2, Z2 = cart2[0], cart2[1], cart2[2]
else:
cart = pmag.dir2cart(Data2).transpose()
minimum = int(0.025 * len(X1))
maximum = int(0.975 * len(X1))
fignum = 1
fig = plt.figure(figsize=figsize)
plt.subplot(1, 3, 1)
X1, y = pmagplotlib.plot_cdf(fignum, X1, "X component", color1, "")
bounds1 = [X1[minimum], X1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, color1, '-')
if np.array(Data2).shape[0] > 2:
X2, y = pmagplotlib.plot_cdf(fignum, X2, "X component", color2, "")
bounds2 = [X2[minimum], X2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, color2, '--')
else:
pmagplotlib.plot_vs(fignum, [cart[0]], 'k', '--')
plt.ylim(0, 1)
plt.locator_params(nbins=x_tick_bins)
x_overlap = pmag.interval_overlap(bounds1,bounds2)
plt.subplot(1, 3, 2)
Y1, y = pmagplotlib.plot_cdf(fignum, Y1, "Y component", color1, "")
bounds1 = [Y1[minimum], Y1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, color1, '-')
if np.array(Data2).shape[0] > 2:
Y2, y = pmagplotlib.plot_cdf(fignum, Y2, "Y component", color2, "")
bounds2 = [Y2[minimum], Y2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, color2, '--')
else:
pmagplotlib.plot_vs(fignum, [cart[1]], 'k', '--')
plt.ylim(0, 1)
y_overlap = pmag.interval_overlap(bounds1,bounds2)
plt.subplot(1, 3, 3)
Z1, y = pmagplotlib.plot_cdf(fignum, Z1, "Z component", color1, "")
bounds1 = [Z1[minimum], Z1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, color1, '-')
if np.array(Data2).shape[0] > 2:
Z2, y = pmagplotlib.plot_cdf(fignum, Z2, "Z component", color2, "")
bounds2 = [Z2[minimum], Z2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, color2, '--')
else:
pmagplotlib.plot_vs(fignum, [cart[2]], 'k', '--')
plt.ylim(0, 1)
plt.locator_params(nbins=x_tick_bins)
z_overlap = pmag.interval_overlap(bounds1,bounds2)
plt.tight_layout()
if save:
plt.savefig(os.path.join(
save_folder, 'common_mean_bootstrap') + '.' + fmt,
dpi=300,bbox_inches='tight')
plt.show()
if ((x_overlap != 0) and (y_overlap != 0) and (z_overlap != 0)):
if verbose:print('Pass')
result = 1
return result
elif ((x_overlap == 0) and (y_overlap != 0) and (z_overlap != 0)):
if verbose:print('Fail, distinct in x')
result = 0
return result
elif ((x_overlap != 0) and (y_overlap == 0) and (z_overlap != 0)):
if verbose:print('Fail, distinct in y')
result = 0
return result
elif ((x_overlap != 0) and (y_overlap != 0) and (z_overlap == 0)):
if verbose:print('Fail, distinct in z')
result = 0
return result
elif ((x_overlap == 0) and (y_overlap == 0) and (z_overlap != 0)):
if verbose:print('Fail, distinct in x and y')
result = 0
return result
elif ((x_overlap == 0) and (y_overlap != 0) and (z_overlap == 0)):
if verbose:print('Fail, distinct in x and z')
result = 0
return result
elif ((x_overlap != 0) and (y_overlap == 0) and (z_overlap == 0)):
if verbose:print('Fail, distinct in y and z')
result = 0
return result
elif ((x_overlap == 0) and (y_overlap == 0) and (z_overlap == 0)):
if verbose:print('Fail, distinct in x, y and z')
result = 0
return result
[docs]
def common_mean_bootstrap_H23(Data1, Data2, num_sims=10000, alpha=0.05, plot=True, reversal=False,
save=False, save_folder='.', fmt='svg',verbose=False):
"""
Perform a bootstrap common mean direction test following Heslop et al. (2023).
This function uses a nonparametric bootstrap approach to test the null hypothesis of common
mean directions between two datasets. It extends the bootstrap common mean direction test of
Tauxe et al. 1991 by incorporating a null hypothesis significance testing framework.
Parameters:
Data1 (array): Directional data of the first set; each row is [declination, inclination].
Data2 (array): Directional data of the second set; each row is [declination, inclination].
num_sims (int, optional): Number of bootstrap simulations to run. Default is 10000.
alpha (float, optional): Significance level for hypothesis testing. Default is 0.05.
plot (bool, optional): If True, produces a histogram plot of the test statistic. Default is True.
reversal (bool, optional): If True, considers antipodal directions for the second dataset. Default is False.
save (bool, optional): If True, saves the histogram plot. Default is False.
save_folder (str, optional): Directory where the histogram plot will be saved. Default is the current directory.
fmt (str, optional): File format for saving the histogram plot. Default is 'svg'.
Returns:
tuple: Contains the following elements:
- result (int): 0 if null hypothesis is rejected, 1 otherwise.
- Lmin (float): The test statistic value.
- Lmin_c (float): The critical test statistic value.
- p (float): The p-value of the test.
References:
Heslop, D., Scealy, J. L., Wood, A. T. A., Tauxe, L., & Roberts, A. P. (2023).
A bootstrap common mean direction test. Journal of Geophysical Research: Solid Earth, 128, e2023JB026983.
https://doi.org/10.1029/2023JB026983
"""
X1 = np.transpose(pmag.dir2cart(Data1)) # normal directions in Cartesian coordinates (one direction per column)
if reversal:
X2 = -np.transpose(pmag.dir2cart(Data2)) # inverted reversed directions in Cartesian coordinates
else:
X2 = np.transpose(pmag.dir2cart(Data2))
n1 = np.shape(X1)[1] #number of observations in first data set
n2 = np.shape(X2)[1] #number of observations in second data set
n = n1 + n2 #total number of observations
X12 = np.hstack((X1,X2)) #form pooled data set
mhat1 = np.mean(X1,axis=1)
mhat1 /= np.linalg.norm(mhat1) #mean of first data set
mhat2 = np.mean(X2,axis=1)
mhat2 /= np.linalg.norm(mhat2) #mean of second data set
mhat12 = np.mean(X12,axis=1)
mhat12 /= np.linalg.norm(mhat12) #mean of pooled data set
Mhat1 = pmag.form_Mhat(mhat1) #Mhat of first data set
Ghat1 = pmag.form_Ghat(X1,Mhat1) #Ghat of first data set
Mhat2 = pmag.form_Mhat(mhat2) #Mhat of second data set
Ghat2 = pmag.form_Ghat(X2,Mhat2) #Ghat of second data set
Ahat = Mhat1.getH()*np.linalg.inv(Ghat1)*Mhat1
Ahat += Mhat2.getH()*np.linalg.inv(Ghat2)*Mhat2
Ahat *= n
D,V = np.linalg.eig(Ahat)
idx = np.argmin(D)
Lmin = D[idx] #minimum eigenvalue
mhat0 = V[:,idx] #eigenvector corresponding to pooled sample mean
Q1 = pmag.form_Q(mhat0,mhat1) #rotation matrix for first data set
X10 = np.matmul(Q1,X1) #rotated version of first data set
Q2 = pmag.form_Q(mhat0,mhat2) #rotation matrix for second data set
X20 = np.matmul(Q2,X2) #rotated version of second data set
Lmin_b = np.zeros(num_sims) #predefine output array for minimum eigenvalues
T_b = np.zeros(num_sims) ##predefine output array for Tb (equation 11)
for i in range(num_sims): #loop through bootstrap iterations
idx1 = np.random.randint(0,n1,n1) #select observation indicies with replacement
X10_b = np.asarray(X10[:,idx1]) #form bootstrap sample from rotated version of first data set
mhat10_b = np.mean(X10_b,axis=1) #mean direction of bootstrap sample
mhat10_b /= np.linalg.norm(mhat10_b)
mhat10_b = (np.asarray(mhat10_b)).flatten()
Mhat10_b = pmag.form_Mhat(mhat10_b) #\hat{M} for bootstrap sample
Ghat10_b = pmag.form_Ghat(X10_b,Mhat10_b) #\hat{G} for bootstrap sample
idx2 = np.random.randint(0,n2,n2) #select observation indicies with replacement
X20_b = np.asarray(X20[:,idx2]) #form bootstrap sample from rotated version of second data set
mhat20_b = np.mean(X20_b,axis=1) #mean direction of bootstrap sample
mhat20_b /= np.linalg.norm(mhat20_b)
mhat20_b = (np.asarray(mhat20_b)).flatten()
Mhat20_b = pmag.form_Mhat(mhat20_b) #\hat{M} for bootstrap sample
Ghat20_b = pmag.form_Ghat(X20_b,Mhat20_b) #\hat{G} for bootstrap sample
Ahat_b = Mhat10_b.getH()*np.linalg.inv(Ghat10_b)*Mhat10_b #bootstrap estimate of \hat{A}_0 (equation 8)
Ahat_b += Mhat20_b.getH()*np.linalg.inv(Ghat20_b)*Mhat20_b
Ahat_b *= n
D_b,V_b = np.linalg.eig(Ahat_b) #Eigenvalues and eigenvectors
Lmin_b[i] = np.min(D_b) #minimum eigenvalue for boostrap sample
T_b[i] = np.matmul(np.matmul(np.transpose(mhat0),Ahat_b),mhat0) #Bootstrap T for pooled data (equation 11)
p = (1+np.sum(Lmin_b>=Lmin))/(num_sims+1) # p-value (step 8 of CMDT, Section 3)
# (n.b., if p > 0.05 cannot reject null of common means at alpha = 0.05)
Lmin_c = np.quantile(Lmin_b,1-alpha) #test critical value
# (n.b., if Lmin > Lmin_c reject null of common means at alpha significance level)
if verbose: print("Heslop et al. (2023) test statistic value = {:.2f}".format(Lmin))
if verbose: print("Heslop et al. (2023) critical test statistic value = {:.2f}".format(Lmin_c))
if verbose: print("Estimated p-value = {:.2f}".format(p))
if p < alpha:
if verbose:print("Reject null of common means at alpha = {:.2f} confidence level".format(alpha))
result = 0
else:
if verbose:print("Cannot reject null of common means at alpha = {:.2f} confidence level".format(alpha))
result = 1
if plot==True:
fig=plt.figure()
ax1=fig.add_subplot(111)
plt.hist(Lmin_b,bins=int(np.sqrt(num_sims)),color = "0.6", ec="0.6");
#axes = plt.gca()
y_min, y_max = ax1.get_ylim()
plt.plot([Lmin,Lmin],[y_min,y_max],'--r',label='Test statistic:')
plt.plot([Lmin_c,Lmin_c],[y_min,y_max],'-k',label='Critical value')
if y_max<Lmin:y_max=Lmin+5
plt.ylim([y_min,y_max])
plt.xlim(np.min(Lmin_b),np.max(Lmin_b)+10)
plt.xlabel(r'$\lambda_{\rm{min}}^{(b)}$')
plt.ylabel('Frequency')
plt.minorticks_on()
plt.rcParams.update({'font.size': 12})
plt.legend(loc='upper right')
if result==0:
plt.text(.8,.7,'Fail',color='red',transform=ax1.transAxes)
else:
plt.text(.8,.7,'Pass',color='blue',transform=ax1.transAxes)
if save:
plt.savefig(os.path.join(
save_folder, 'bootstrap_test_histogram') + '.' + fmt)
plt.show()
return result, Lmin, Lmin_c, p
[docs]
def common_mean_watson(Data1, Data2, NumSims=5000, print_result=True, plot='no', save=False, save_folder='.', fmt='svg'):
"""
Conduct a Watson V test for a common mean on two directional data sets.
This function calculates Watson's V statistic from input lists through
Monte Carlo simulation in order to test whether two populations of
directional data could have been drawn from a common mean. The critical
angle between the two sample mean directions and the corresponding
McFadden and McElhinny (1990) classification is printed.
Parameters:
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
NumSims : number of Monte Carlo simulations (default is 5000)
print_result : default is to print the test result (True)
plot : the default is no plot ('no').
Putting 'yes' will the plot the CDF from the Monte Carlo simulations.
save : optional save of plots (default is False)
save_folder : path to where plots will be saved (default is current)
fmt : format of figures to be saved (default is 'svg')
Returns:
**printed text** (text describing the test result is printed),
**result** (a boolean where 0 is fail and 1 is pass),
**angle** (angle between the Fisher means of the two data sets),
**critical_angle** (critical angle for the test to pass),
**classification** (MM1990 classification for a positive test),
Examples:
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean.
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_watson(directions_A, directions_B)
"""
pars_1 = pmag.fisher_mean(Data1)
pars_2 = pmag.fisher_mean(Data2)
cart_1 = pmag.dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]])
cart_2 = pmag.dir2cart([pars_2['dec'], pars_2['inc'], pars_2["r"]])
Sw = pars_1['k'] * pars_1['r'] + pars_2['k'] * pars_2['r'] # k1*r1+k2*r2
xhat_1 = pars_1['k'] * cart_1[0] + pars_2['k'] * cart_2[0] # k1*x1+k2*x2
xhat_2 = pars_1['k'] * cart_1[1] + pars_2['k'] * cart_2[1] # k1*y1+k2*y2
xhat_3 = pars_1['k'] * cart_1[2] + pars_2['k'] * cart_2[2] # k1*z1+k2*z2
Rw = np.sqrt(xhat_1**2 + xhat_2**2 + xhat_3**2)
V = 2 * (Sw - Rw)
# keep weighted sum for later when determining the "critical angle"
# let's save it as Sr (notation of McFadden and McElhinny, 1990)
Sr = Sw
# do monte carlo simulation of datasets with same kappas as data,
# but a common mean
counter = 0
Vp = [] # set of Vs from simulations
for k in range(NumSims):
# get a set of N1 fisher distributed vectors with k1,
# calculate fisher stats
Dirp = []
for i in range(pars_1["n"]):
Dirp.append(pmag.fshdev(pars_1["k"]))
pars_p1 = pmag.fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2,
# calculate fisher stats
Dirp = []
for i in range(pars_2["n"]):
Dirp.append(pmag.fshdev(pars_2["k"]))
pars_p2 = pmag.fisher_mean(Dirp)
# get the V for these
Vk = pmag.vfunc(pars_p1, pars_p2)
Vp.append(Vk)
# sort the Vs, get Vcrit (95th percentile one)
Vp.sort()
k = int(.95 * NumSims)
Vcrit = Vp[k]
# equation 18 of McFadden and McElhinny, 1990 calculates the critical
# value of R (Rwc)
Rwc = Sr - (Vcrit/2)
# following equation 19 of McFadden and McElhinny (1990) the critical
# angle is calculated. If the observed angle (also calculated below)
# between the data set means exceeds the critical angle the hypothesis
# of a common mean direction may be rejected at the 95% confidence
# level. The critical angle is simply a different way to present
# Watson's V parameter so it makes sense to use the Watson V parameter
# in comparison with the critical value of V for considering the test
# results. What calculating the critical angle allows for is the
# classification of McFadden and McElhinny (1990) to be made
# for data sets that are consistent with sharing a common mean.
k1 = pars_1['k']
k2 = pars_2['k']
R1 = pars_1['r']
R2 = pars_2['r']
critical_angle = np.degrees(np.arccos(((Rwc**2) - ((k1 * R1)**2)
- ((k2 * R2)**2))/
(2 * k1 * R1 * k2 * R2)))
D1 = (pars_1['dec'], pars_1['inc'])
D2 = (pars_2['dec'], pars_2['inc'])
angle = pmag.angle(D1, D2)
if print_result:
print("Results of Watson V test: ")
print("")
print("Watson's V: " '%.1f' % (V))
print("Critical value of V: " '%.1f' % (Vcrit))
if V < Vcrit:
if print_result:
print('"Pass": Since V is less than Vcrit, the null hypothesis')
print('that the two populations are drawn from distributions')
print('that share a common mean direction can not be rejected.')
result = 1
elif V > Vcrit:
if print_result:
print('"Fail": Since V is greater than Vcrit, the two means can')
print('be distinguished at the 95% confidence level.')
result = 0
classification = ''
if print_result:
print("")
print("M&M1990 classification:")
print("")
print("Angle between data set means: " '%.1f' % (angle))
print("Critical angle for M&M1990: " '%.1f' % (critical_angle))
if print_result:
if V > Vcrit:
print("")
elif V < Vcrit:
if critical_angle < 5:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'A'")
classification = 'A'
elif critical_angle < 10:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'B'")
classification = 'B'
elif critical_angle < 20:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'C'")
classification = 'C'
else:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'INDETERMINATE;")
classification = 'indeterminate'
if plot == 'yes':
CDF = {'cdf': 1}
# pmagplotlib.plot_init(CDF['cdf'],5,5)
plt.figure(figsize=(3.5, 2.5))
p1 = pmagplotlib.plot_cdf(CDF['cdf'], Vp, "Watson's V", 'r', "")
p2 = pmagplotlib.plot_vs(CDF['cdf'], [V], 'g', '-')
p3 = pmagplotlib.plot_vs(CDF['cdf'], [Vp[k]], 'b', '--')
# pmagplotlib.draw_figs(CDF)
if save:
plt.savefig(os.path.join(
save_folder, 'common_mean_watson') + '.' + fmt)
pmagplotlib.show_fig(CDF['cdf'])
return result, angle[0], critical_angle, classification
[docs]
def common_mean_bayes(Data1, Data2, reversal_test=False):
'''
Estimate the probability that two Fisher-distributed sets of
directions originate from populations with a common mean using
the Bayesian framework of Heslop and Roberts (2018). This version
of the test is the one involving distributions with common precision.
Parameters:
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
reversal_test : whether to flip one populations to its antipode
(default is False)
Returns:
**BF0** (Bayes factor),
**P** (posterior probability of the hypothesis),
**support** (category of support based on classification of P)
'''
X1=pmag.dir2cart(Data1)
X2=pmag.dir2cart(Data2)
if reversal_test==True:
X12=np.concatenate((X1,-X2), axis=0) #pool site directions
else:
X12=np.concatenate((X1,X2), axis=0) #pool site directions
def log_like(k,N,R):
return N*np.log(k/4/np.pi)-N*log_sinh(k)+np.log(4*np.pi) \
+log_sinh(k*R)-np.log(k*R)
def log_sinh(k):
if k>700:
s=k-np.log(2.0)
else:
s=np.log(np.sinh(k))
return s
def log_prior(k):
return np.log(4.0)+2.0*np.log(k)-np.log(np.pi)-2.0*np.log(1.+k**2)
def integrand1(k,x):
R=np.sqrt(np.sum(np.sum(x,axis=0)**2))
N=np.size(x,axis=0)
val = np.exp(log_like(k,N,R)+log_prior(k))
return val
def integrand2(k,x1,x2):
R1=np.sqrt(np.sum(np.sum(x1,axis=0)**2))
N1=np.size(x1,axis=0)
R2=np.sqrt(np.sum(np.sum(x2,axis=0)**2))
N2=np.size(x2,axis=0)
val = np.exp(log_like(k,N1,R1)+log_like(k,N2,R2)+log_prior(k))
return val
mL1 = quad(integrand1, 0, np.inf, args=(X12))[0] #Bayes factor numerator (Eqn 13)
mL2 = quad(integrand2, 0, np.inf, args=(X1,X2))[0] #Bayes factor denominator (Eqn 14)
BF0=mL1/mL2
P=BF0/(1.+BF0)
if P<0.01:
support = 'Different means: very strong support'
if P>=0.01 and P<0.05:
support = 'Different means: strong support'
if P>=0.05 and P<0.25:
support = 'Different means: positive support'
if P>=0.25 and P<0.75:
support = 'Ambiguous: weak support'
if P>=0.75 and P<0.95:
support = 'Common mean: positive support'
if P>=0.95 and P<0.99:
support = 'Common mean: strong support'
if P>=0.99:
support = 'Common mean: very strong support'
print(support)
return BF0, P, support
[docs]
def separate_directions(dec=None, inc=None, di_block=None):
"""
Separates directional data into two modes based on the principal direction.
Parameters:
dec (list, optional): List of declinations. Defaults to None.
inc (list, optional): List of inclinations. Defaults to None.
di_block (list of lists, optional): Nested list of [dec,inc]. Can be provided
instead of separate dec, inc lists. If provided, it takes precedence.
Returns:
tuple: Depending on input, either:
- dec1, inc1, dec2, inc2: Lists of declinations and inclinations for the
two modes (if separate dec, inc lists are provided)
- polarity1, polarity2: Nested lists of [dec,inc] for the two modes (if
di_block is provided)
"""
if di_block is None:
di_block = make_di_block(dec, inc, unit_vector=False)
polarity1, polarity2 = pmag.separate_directions(di_block)
if dec is not None and inc is not None:
if len(polarity1) > 0 :
dec1 = polarity1[:, 0].tolist()
inc1 = polarity1[:, 1].tolist()
else:
dec1 = None
inc1 = None
if len(polarity2) > 0 :
dec2 = polarity2[:, 0].tolist()
inc2 = polarity2[:, 1].tolist()
else:
dec2 = None
inc2 = None
return dec1, inc1, dec2, inc2
else:
return make_di_block(dec1, inc1), make_di_block(dec2, inc2)
[docs]
def reversal_test_bootstrap(dec=None, inc=None, di_block=None, plot_stereo=False,
color1='blue', color2='red',
save=False, save_folder='.', fmt='svg',verbose=True):
"""
Conduct a reversal test using bootstrap statistics (Tauxe, 2010) to
determine whether two populations of directions could be from an antipodal
common mean.
Parameters:
dec: list of declinations
inc: list of inclinations
di_block: a nested list of [dec,inc]
A di_block can be provided in which case it will be used instead of
dec, inc lists.
plot_stereo : before plotting the CDFs, plot stereonet with the
bidirectionally separated data (default is False)
save : boolean argument to save plots (default is False)
save_folder : directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'svg')
Returns:
A boolean where 0 is fail and 1 is pass is returned.
Plots of the cumulative distribution of Cartesian components are shown
an equal area plot if `plot_stereo = True`.
Examples:
Populations of roughly antipodal directions are developed here using
``ipmag.fishrot``. These directions are combined into a single di_block
given that the function determines the principal component and splits the
data accordingly by polarity.
>>> directions_n = ipmag.fishrot(k=20, n=30, dec=5, inc=-60)
>>> directions_r = ipmag.fishrot(k=35, n=25, dec=182, inc=57)
>>> directions = directions_n + directions_r
>>> ipmag.reversal_test_bootstrap(di_block=directions, plot_stereo = True)
Data can also be input to the function as separate lists of dec and inc.
In this example, the di_block from above is split into lists of dec and inc
which are then used in the function:
>>> direction_dec, direction_inc, direction_moment = ipmag.unpack_di_block(directions)
>>> ipmag.reversal_test_bootstrap(dec=direction_dec,inc=direction_inc, plot_stereo = True)
"""
if di_block is None:
all_dirs = make_di_block(dec, inc)
else:
all_dirs = di_block
directions1, directions2 =pmag.flip(all_dirs)
if plot_stereo:
# plot equal area with two modes
plt.figure(num=0, figsize=(4, 4))
plot_net(0)
plot_di(di_block=directions1, color=color1),
plot_di(di_block=do_flip(di_block=directions2), color=color2)
result = common_mean_bootstrap(directions1, directions2,
color1=color1, color2=color2,
save=save, save_folder=save_folder, fmt=fmt,verbose=True)
return result
[docs]
def reversal_test_bootstrap_H23(dec=None, inc=None, di_block=None, num_sims=10000, alpha=0.05, plot=True,
save=False, save_folder='.', fmt='svg',verbose=True):
"""
Bootstrap reversal test following Heslop et al. (2023).
This function calls common_mean_bootstrap_H23 with directional data that have been flipped,
for a reversal test. The directional data can be provided either as separate
declination and inclination arrays or as a di_block array.
Parameters:
dec (array): Array of declinations, only considered if di_block is None.
inc (array): Array of inclinations, only considered if di_block is None.
di_block (array, optional): Directional data as [dec, inc] for each sample. If provided,
dec and inc are ignored.
num_sims (int, optional): Number of bootstrap simulations. Default is 1000.
alpha (float, optional): Significance level for hypothesis testing. Default is 0.05.
plot (bool, optional): If True, produce a histogram plot of the test statistic. Default is True.
save (bool, optional): If True, save the histogram plot. Default is False.
save_folder (str, optional): Directory where the histogram plot will be saved. Default is the current directory.
fmt (str, optional): File format for saving the histogram plot. Default is 'svg'.
Returns:
tuple: Contains the following elements:
- result (int): 0 if null hypothesis is rejected, 1 otherwise.
- Lmin (float): The test statistic value.
- Lmin_c (float): The critical test statistic value.
- p (float): The p-value of the test.
"""
if di_block is None:
all_dirs = make_di_block(dec, inc)
else:
all_dirs = di_block
F1, F2 = pmag.flip(all_dirs)
return common_mean_bootstrap_H23(F1, F2, num_sims=num_sims, alpha=alpha, plot=plot,
save=save, save_folder=save_folder, fmt=fmt,verbose=verbose)
[docs]
def reversal_test_MM1990(dec=None, inc=None, di_block=None, plot_CDF=False,
plot_stereo=False, save=False, save_folder='.', fmt='svg'):
"""
Calculates Watson's V statistic from input files through Monte Carlo
simulation in order to test whether normal and reversed populations could
have been drawn from a common mean. Also provides the critical angle between
the two sample mean directions and the corresponding McFadden and McElhinny
(1990) classification. This function is a wrapper around the
ipmag.common_mean_watson() function with the first step of splitting
the data into two polarities using the pmag.flip() function and flipping
the reverse direction to their antipode.
Parameters:
dec (list, optional): List of declinations.
inc (list, optional): List of inclinations.
di_block (list of lists, optional): Nested list of [dec,inc]. If provided, it
takes precedence over separate dec and inc lists.
plot_CDF (bool, optional): If True, plot the CDF accompanying the results. Defaults to False.
plot_stereo (bool, optional): If True, plot stereonet with bidirectionally separated data. Defaults to False.
save (bool, optional): If True, save the plots. Defaults to False.
save_folder (str, optional): Directory path for saving plots. Defaults to current directory.
fmt (str, optional): Format of saved figures. Defaults to 'svg'.
Returns:
result (bool): 0 indicates fail, 1 indicates pass.
angle (float): Angle between the Fisher means of the two data sets.
critical_angle (float): Critical angle for the test to pass.
classification (str): MM1990 classification for a positive test.
Examples:
Populations of roughly antipodal directions are developed here using
``ipmag.fishrot``. These directions are combined into a single di_block
given that the function determines the principal component and splits the
data accordingly by polarity.
>>> directions_n = ipmag.fishrot(k=20, n=30, dec=5, inc=-60)
>>> directions_r = ipmag.fishrot(k=35, n=25, dec=182, inc=57)
>>> directions = directions_n + directions_r
>>> ipmag.reversal_test_MM1990(di_block=directions, plot_stereo = True)
Data can also be input to the function as separate lists of dec and inc.
In this example, the di_block from above is split into lists of dec and inc
which are then used in the function:
>>> direction_dec, direction_inc, direction_moment = ipmag.unpack_di_block(directions)
>>> ipmag.reversal_test_MM1990(dec=direction_dec,inc=direction_inc, plot_stereo = True)
"""
if di_block is None:
all_dirs = make_di_block(dec, inc)
else:
all_dirs = di_block
directions1, directions2 = pmag.flip(all_dirs)
if plot_stereo:
# plot equal area with two modes
plt.figure(num=0, figsize=(4, 4))
plot_net(0)
plot_di(di_block=directions1, color='b'),
plot_di(di_block=do_flip(di_block=directions2), color='r')
if plot_CDF == False:
result, angle, critical_angle, classification=common_mean_watson(directions1, directions2,
save=save, save_folder=save_folder, fmt=fmt)
else:
result, angle, critical_angle, classification=common_mean_watson(directions1, directions2, plot='yes',
save=save, save_folder=save_folder, fmt=fmt)
return result, angle, critical_angle, classification
[docs]
def conglomerate_test_Watson(R, n):
"""
The Watson (1956) test of a directional data set for randomness compares
the resultant vector (R) of a group of directions to values of Ro. If R
exceeds Ro, the null hypothesis of randomness is rejected. If R is less
than Ro, the null hypothesis of randomness is considered to not be rejected.
Parameters:
R : the resultant vector length of the directions
n : the number of directions
Returns:
**printed text** (text describing test result),
**result** (a dictionary with the Watson (1956) R values)
"""
Ro_values = {5: {95: 3.50, 99: 4.02}, 6: {95: 3.85, 99: 4.48},
7: {95: 4.18, 99: 4.89}, 8: {95: 4.48, 99: 5.26},
9: {95: 4.76, 99: 5.61}, 10: {95: 5.03, 99: 5.94},
11: {95: 5.29, 99: 6.25}, 12: {95: 5.52, 99: 6.55},
13: {95: 5.75, 99: 6.84}, 14: {95: 5.98, 99: 7.11},
15: {95: 6.19, 99: 7.36}, 16: {95: 6.40, 99: 7.60},
17: {95: 6.60, 99: 7.84}, 18: {95: 6.79, 99: 8.08},
19: {95: 6.98, 99: 8.33}, 20: {95: 7.17, 99: 8.55}}
if n < 5:
print('too few directions for a conglomerate test')
return
elif n < 21:
Ro_95 = Ro_values[n][95]
Ro_99 = Ro_values[n][99]
else:
Ro_95 = np.sqrt(7.815*(n/3))
Ro_99 = np.sqrt(11.345*(n/3))
print('R = ' + str(R))
print('Ro_95 = ' + str(Ro_95))
print('Ro_99 = ' + str(Ro_99))
if R < Ro_95:
print('This population "passes" a conglomerate test as the null hypothesis of randomness cannot be rejected at the 95% confidence level')
if R > Ro_95:
print(
'The null hypothesis of randomness can be rejected at the 95% confidence level')
if R > Ro_99:
print(
'The null hypothesis of randomness can be rejected at the 99% confidence level')
result = {'n': n, 'R': R, 'Ro_95': Ro_95, 'Ro_99': Ro_99}
return result
[docs]
def fishqq(lon=None, lat=None, di_block=None,plot=True,save=False,fmt='png',save_folder='.'):
"""
Test whether a distribution is Fisherian and make a corresponding Q-Q plot.
The Q-Q plot shows the data plotted against the value expected from a
Fisher distribution. The first plot is the uniform plot which is the
Fisher model distribution in terms of longitude (declination). The second
plot is the exponential plot which is the Fisher model distribution in terms
of latitude (inclination). In addition to the plots, the test statistics Mu
(uniform) and Me (exponential) are calculated and compared against the
critical test values. If Mu or Me are too large in comparison to the test
statistics, the hypothesis that the distribution is Fisherian is rejected
(see Fisher et al., 1987). These test statistics are returned in a dictionary.
Parameters:
lon : longitude or declination of the data
lat : latitude or inclination of the data
or
di_block: a nested list of [dec,inc]
A di_block can be provided in which case it will be used instead of
dec, inc lists.
plot : boolean to decide whether to make a plot (default is True)
save : boolean to decide whether plot is saved (default is False)
save_folder : relative directory where plots will be saved
(default is current directory, '.')
fmt : format of saved plot (default is 'png')
Returns:
dictionary
- lon, mean longitude (or declination)
- lat, mean latitude (or inclination)
- N, number of vectors
- Mu, Mu test statistic value for the data
- Mu_critical, critical value for Mu
- Me, Me test statistic value for the data
- Me_critical, critical value for Me
if the data has two modes with N >=10 (N and R)
two of these dictionaries will be returned
Examples:
In this example, directions are sampled from a Fisher distribution using
``ipmag.fishrot`` and then the ``ipmag.fishqq`` function is used to test
whether that distribution is Fisherian:
>>> directions = ipmag.fishrot(k=40, n=50, dec=200, inc=50)
>>> ipmag.fishqq(di_block = directions)
{'Dec': 199.73564290371894,
'Inc': 49.017612342358298,
'Me': 0.78330310031220352,
'Me_critical': 1.094,
'Mode': 'Mode 1',
'Mu': 0.69915926146177099,
'Mu_critical': 1.207,
'N': 50,
'Test_result': 'consistent with Fisherian model'}
The above example passed a di_block to the function as an input. Lists of
paired declination and inclination can also be used as inputs. Here the
directions di_block is unpacked to separate declination and inclination
lists using the ``ipmag.unpack_di_block`` functionwhich are then used as
input to fishqq:
>>> dec_list, inc_list = ipmag.unpack_di_block(directions)
>>> ipmag.fishqq(lon=dec_list, lat=inc_list)
"""
if di_block is None:
all_dirs = make_di_block(lon, lat)
else:
all_dirs = di_block
ppars = pmag.doprinc(all_dirs) # get principal directions
QQ_dict1 = {}
QQ_dict2 = {}
QQ = {'unf': 1, 'exp': 2}
fignum=1
di=np.array(all_dirs).transpose()
decs=di[0]
incs=di[1]
all_dirs=np.column_stack((decs,incs))
nDIs,rDIs=pmag.separate_directions(all_dirs)
if len(nDIs) >= 10:
ppars = pmag.doprinc(nDIs) # get principal directions
Dnbar, Inbar = ppars['dec'], ppars['inc']
Nn = len(nDIs)
az=np.ones(Nn)*(Dnbar - 180.)
pl=np.ones(Nn)*(90.-Inbar)
Ds=nDIs.transpose()[0]
Is=nDIs.transpose()[1]
ndata=np.column_stack((Ds,Is,az,pl))
D1,I1=pmag.dotilt_V(ndata)
D1=(D1-180.)%360 # Somehow this got lost
Dtit = 'Mode 1 Declinations'
Itit = 'Mode 1 Inclinations'
if plot:
plt.figure(fignum,figsize=(6, 3))
fignum+=1
Mu_n, Mu_ncr = pmagplotlib.plot_qq_unf(
QQ['unf'], D1, Dtit, subplot=True) # make plot
Me_n, Me_ncr = pmagplotlib.plot_qq_exp(
QQ['exp'], I1, Itit, subplot=True) # make plot
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_folder, 'QQ_mode1')+'.'+fmt, dpi=450)
if Mu_n <= Mu_ncr and Me_n <= Me_ncr:
F_n = 'Consistent with Fisher distribution'
else:
F_n = 'Fisher distribution rejected'
QQ_dict1['Mode'] = 'Mode 1'
QQ_dict1['Dec'] = Dnbar
QQ_dict1['Inc'] = Inbar
QQ_dict1['N'] = Nn
QQ_dict1['Mu'] = Mu_n
QQ_dict1['Mu_critical'] = Mu_ncr
QQ_dict1['Me'] = Me_n
QQ_dict1['Me_critical'] = Me_ncr
QQ_dict1['Test_result'] = F_n
if len(rDIs) >= 10:
#D1, I1 = [], []
ppars = pmag.doprinc(rDIs) # get principal directions
Drbar, Irbar = ppars['dec'] - 180., -ppars['inc']
Nr = len(rDIs)
az=np.ones(Nr)*(Drbar - 180.)
pl=np.ones(Nr)*(90.-Irbar)
Ds=rDIs.transpose()[0]
Is=rDIs.transpose()[1]
rdata=np.column_stack((Ds,Is,az,pl))
D2,I2=pmag.dotilt_V(rdata)
D2=(D2-180.)%360 # Somehow this got lost
Dtit = 'Mode 2 Declinations'
Itit = 'Mode 2 Inclinations'
ppars = pmag.doprinc(rDIs) # get principal directions
if ppars['dec']>90 and ppars['dec']<270:
Drbar = ppars['dec'] - 180.
if ppars['inc']<0:
Irbar['inc']=-ppars['inc']
if plot:
plt.figure(fignum,figsize=(6, 3))
Mu_r, Mu_rcr = pmagplotlib.plot_qq_unf(
QQ['unf'], D2, Dtit, subplot=True) # make plot
Me_r, Me_rcr = pmagplotlib.plot_qq_exp(
QQ['exp'], I2, Itit, subplot=True) # make plot
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_folder, 'QQ_mode2')+'.'+fmt, dpi=450)
if Mu_r <= Mu_rcr and Me_r <= Me_rcr:
F_r = 'Consistent with Fisher distribution'
else:
F_r = 'Fisher distribution rejected'
QQ_dict2['Mode'] = 'Mode 2'
QQ_dict2['Dec'] = Drbar
QQ_dict2['Inc'] = Irbar
QQ_dict2['N'] = Nr
QQ_dict2['Mu'] = Mu_r
QQ_dict2['Mu_critical'] = Mu_rcr
QQ_dict2['Me'] = Me_r
QQ_dict2['Me_critical'] = Me_rcr
QQ_dict2['Test_result'] = F_r
if QQ_dict2:
return QQ_dict1, QQ_dict2
elif QQ_dict1:
return QQ_dict1
else:
print('you need N> 10 for at least one mode')
[docs]
def lat_from_inc(inc, a95=None):
"""
Calculate paleolatitude from inclination using the dipole equation.
Parameter:
inc: (paleo)magnetic inclination in degrees
a95: 95% confidence interval from Fisher mean
Returns:
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat
Examples:
Calculate the paleolatitude implied by an inclination of 45 degrees:
>>> ipmag.lat_from_inc(45)
26.56505117707799
Calculate the paleolatitude and the maximum and minimum paleolatitude
implied by an inclination of 20 degrees with an uncertainty on the
mean (a95) of 5:
>>> ipmag.lat_from_inc(20, a95=5)
(10.314104815618196, 13.12426812279171, 7.630740212430057)
"""
rad = np.pi/180.0
paleo_lat = np.arctan(0.5 * np.tan(inc * rad))/rad
if a95 is not None:
paleo_lat_max = np.arctan(0.5 * np.tan((inc + a95) * rad))/rad
paleo_lat_min = np.arctan(0.5 * np.tan((inc - a95) * rad))/rad
return paleo_lat, paleo_lat_max, paleo_lat_min
else:
return paleo_lat
[docs]
def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole.
Parameters:
ref_loc_lon: longitude of reference location in degrees E
ref_loc_lat: latitude of reference location in degrees N
pole_plon: paleopole longitude in degrees in degrees E
pole_plat: paleopole latitude in degrees in degrees N
Returns:
paleolatitude for location based on pole
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat)
[docs]
def inc_from_lat(lat):
"""
Calculate inclination predicted from latitude using the dipole equation.
Parameter:
lat : latitude in degrees
Returns:
inclination calculated from latitude using the dipole equation
Examples:
Calculate the inclination implied by an latitude of 45 degrees:
>>> ipmag.inc_from_lat(45)
63.434948822922
"""
rad = np.pi/180.0
inc = np.arctan(2 * np.tan(lat * rad))/rad
return inc
[docs]
def plot_net(fignum=None, tick_spacing=10, ax=None):
"""
Draws circle and tick marks for equal area projection.
Parameters:
fignum: int or None
Figure number to use for creating a new figure if no axis is provided.
tick_spacing: int
Interval for declination tick marks, default is 10.
ax: matplotlib.axes.Axes or None
Axis to plot on. If None, the current axis will be used (or created if fignum is given).
"""
if ax is None:
if fignum is not None:
plt.figure(num=fignum)
plt.clf()
ax = plt.gca()
ax.axis("off")
Dcirc = np.arange(0, 361.0)
Icirc = np.zeros(361, dtype=float)
Xcirc, Ycirc = [], []
for k in range(361):
XY = pmag.dimap(Dcirc[k], Icirc[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
ax.plot(Xcirc, Ycirc, "k")
# Put on the tick marks
Xsym, Ysym = [], []
for I in range(tick_spacing, 100, tick_spacing):
XY = pmag.dimap(0.0, I)
Xsym.append(XY[0])
Ysym.append(XY[1])
ax.scatter(Xsym, Ysym, color="black", marker="_", s=10)
Xsym, Ysym = [], []
for I in range(tick_spacing, 100, tick_spacing):
XY = pmag.dimap(90.0, I)
Xsym.append(XY[0])
Ysym.append(XY[1])
ax.scatter(Xsym, Ysym, color="black", marker="|", s=10)
Xsym, Ysym = [], []
for I in range(tick_spacing, 90, tick_spacing):
XY = pmag.dimap(180.0, I)
Xsym.append(XY[0])
Ysym.append(XY[1])
ax.scatter(Xsym, Ysym, color="black", marker="_", s=10)
Xsym, Ysym = [], []
for I in range(tick_spacing, 90, tick_spacing):
XY = pmag.dimap(270.0, I)
Xsym.append(XY[0])
Ysym.append(XY[1])
ax.scatter(Xsym, Ysym, color="black", marker="|", s=10)
for D in range(0, 360, tick_spacing):
Xtick, Ytick = [], []
for I in range(4):
XY = pmag.dimap(D, I)
Xtick.append(XY[0])
Ytick.append(XY[1])
ax.plot(Xtick, Ytick, "k")
ax.axis("equal")
ax.axis((-1.05, 1.05, -1.05, 1.05))
[docs]
def plot_di(dec=None, inc=None, di_block=None, color='k', marker='o', markersize=20, legend='no', label='', connect_points=False, lw=0.25, lc='k', la=0.5, title=None, edge=None, alpha=1, zorder=2):
"""
Plot declination, inclination data on an equal area plot.
Before this function is called a plot needs to be initialized with code that looks
something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Parameters:
dec : declination being plotted
inc : inclination being plotted
di_block: a nested list of [dec,inc,1.0] (di_block can be provided instead of dec, inc in which case it will be used)
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default marker is a circle ('o')
markersize : default size is 20
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
label : the default label is blank ('')
connect_points : option to connect points in order of plotting, default is False
lw : linewidth of connecting lines
lc : color of connecting lines
la : alpha of connecting lines
title : the default title is False
edge : marker edge color - if blank, is color of marker
alpha : opacity
zorder : zorder of marker
"""
X_down = []
X_up = []
Y_down = []
Y_up = []
color_down = []
color_up = []
if di_block is not None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
dec, inc, intensity = di_lists
if len(di_lists) == 2:
dec, inc = di_lists
try:
length = len(dec)
for n in range(len(dec)):
XY = pmag.dimap(dec[n], inc[n])
if inc[n] >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
if type(color) == list:
color_down.append(color[n])
else:
color_down.append(color)
else:
X_up.append(XY[0])
Y_up.append(XY[1])
if type(color) == list:
color_up.append(color[n])
else:
color_up.append(color)
except:
XY = pmag.dimap(dec, inc)
if inc >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
color_down.append(color)
else:
X_up.append(XY[0])
Y_up.append(XY[1])
color_up.append(color)
if len(X_up) > 0:
if connect_points:
plt.plot(X_up, Y_up, ls = '-', linewidth=lw, color =lc, alpha = la, zorder=1)
plt.scatter(X_up, Y_up, facecolors='none', edgecolors=color_up,
s=markersize, marker=marker, label=label,alpha=alpha, zorder=zorder)
if len(X_down) > 0:
if connect_points:
plt.plot(X_down, Y_down, ls = '-', linewidth=lw, color =lc, alpha = la, zorder=1)
plt.scatter(X_down, Y_down, facecolors=color_down, edgecolors=edge,
s=markersize, marker=marker, label=label,alpha=alpha, zorder=zorder)
if legend == 'yes':
plt.legend(loc=2)
plt.tight_layout()
if title != None:
plt.title(title)
[docs]
def plot_di_mean(dec, inc, a95, color='k', marker='o', markersize=20, label='', legend='no', zorder=2):
"""
Plot a mean direction (declination, inclination) with alpha_95 ellipse on
an equal area plot.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Parameters:
dec : declination of mean being plotted
inc : inclination of mean being plotted
a95 : a95 confidence ellipse of mean being plotted
color : the default color is black. Other colors can be chosen (e.g. 'r').
marker : the default is a circle. Other symbols can be chosen (e.g. 's').
markersize : the default is 20. Other sizes can be chosen.
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
zorder : zorder of marker
"""
DI_dimap = pmag.dimap(dec, inc)
if inc < 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors='white',
marker=marker, s=markersize, label=label, zorder=zorder)
if inc >= 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors=color,
marker=marker, s=markersize, label=label, zorder=zorder)
Xcirc, Ycirc = [], []
Da95, Ia95 = pmag.circ(dec, inc, a95)
if legend == 'yes':
plt.legend(loc=2)
for k in range(len(Da95)):
XY = pmag.dimap(Da95[k], Ia95[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, c=color)
plt.tight_layout()
[docs]
def plot_di_mean_bingham(bingham_dictionary, fignum=1, color='k', marker='o', markersize=20, label='', legend='no'):
"""
see plot_di_mean_ellipse
"""
plot_di_mean_ellipse(bingham_dictionary, fignum=fignum, color=color,
marker=marker, markersize=markersize, label=label, legend=legend)
[docs]
def plot_di_mean_ellipse(dictionary, fignum=1, color='k', marker='o', markersize=20, label='', legend='no'):
"""
Plot a mean direction (declination, inclination) confidence ellipse.
Parameters:
dictionary : a dictionary generated by the pmag.dobingham or pmag.dokent functions
"""
pars = []
pars.append(dictionary['dec'])
pars.append(dictionary['inc'])
pars.append(dictionary['Zeta'])
pars.append(dictionary['Zdec'])
pars.append(dictionary['Zinc'])
pars.append(dictionary['Eta'])
pars.append(dictionary['Edec'])
pars.append(dictionary['Einc'])
DI_dimap = pmag.dimap(dictionary['dec'], dictionary['inc'])
if dictionary['inc'] < 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors='white',
marker=marker, s=markersize, label=label)
if dictionary['inc'] >= 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors=color,
marker=marker, s=markersize, label=label)
pmagplotlib.plot_ell(fignum, pars, color, 0, 1)
[docs]
def plot_bootstrap_confidence(mean_dec, mean_inc, confidence_DI,
mean_color='k', confidence_color='k',
mean_marker='o', confidence_marker='.',
mean_markersize=20, confidence_markersize=1):
"""
Plot mean and bootstrap confidence outline on an equal area plot. The input confidence_DI
is the output from the mean_bootstrap_confidence() function.
Before this function is called a plot needs to be initialized with code that looks
something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Parameters:
mean_dec: Declination of the mean point.
mean_inc: Inclination of the mean point.
confidence_DI: A nested list of [dec, inc, 1.0] representing the bootstrap confidence.
mean_color: Color of the mean point. Default is black.
confidence_color: Color of the confidence points. Default is black.
mean_marker: Marker style for the mean point. Default is 'o' (circle).
confidence_marker: Marker style for the confidence points. Default is 'o' (circle).
mean_markersize: Marker size for the mean point. Default is 20.
confidence_markersize: Marker size for the confidence points. Default is 1.
"""
plot_di(dec=mean_dec, inc=mean_inc, color=mean_color, marker=mean_marker,
markersize=mean_markersize)
plot_di(di_block=confidence_DI, color=confidence_color, marker=confidence_marker,
markersize=confidence_markersize)
[docs]
def make_orthographic_map(central_longitude=0, central_latitude=0, figsize=(8, 8),
add_land=True, land_color='tan', land_edge_color='black',
add_ocean=False, ocean_color='lightblue', grid_lines=True,
lat_grid=[-80., -60., -30.,
0., 30., 60., 80.],
lon_grid=[-180., -150., -120., -90., -60., -30., 0., 30., 60., 90., 120., 150., 180.]):
'''
Function creates and returns an orthographic map projection using cartopy
Parameters:
central_longitude : central longitude of projection (default is 0)
central_latitude : central latitude of projection (default is 0)
figsize : size of the figure (default is 8x8)
add_land : chose whether land is plotted on map (default is true)
land_color : specify land color (default is 'tan')
add_ocean : chose whether land is plotted on map (default is False, change to True to plot)
ocean_color : specify ocean color (default is 'lightblue')
grid_lines : chose whether grid lines are plotted on map (default is true)
lat_grid : specify the latitude grid (default is 30 degree spacing)
lon_grid : specify the longitude grid (default is 30 degree spacing)
Examples:
>>> map_axis = make_orthographic_map(central_longitude=200,central_latitude=30)
'''
# wrapper class to change the default resolution of the orthographic projection
# class LowerThresholdOrthographic(ccrs.Orthographic):
# @property
# def threshold(self):
# return 1e3
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.make_orthographic_map')
return
fig = plt.figure(figsize=figsize)
map_projection = ccrs.Orthographic(
central_longitude=central_longitude, central_latitude=central_latitude)
ax = plt.axes(projection=map_projection)
ax.set_global()
if add_ocean:
ax.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=ocean_color)
if add_land:
ax.add_feature(cartopy.feature.LAND, zorder=0,
facecolor=land_color, edgecolor=land_edge_color)
if grid_lines:
ax.gridlines(xlocs=lon_grid, ylocs=lat_grid, linewidth=1,
color='black', linestyle='dotted')
return ax
[docs]
def make_mollweide_map(central_longitude=0, figsize=(8, 8),
add_land=True, land_color='tan', land_edge_color='black',
add_ocean=False, ocean_color='lightblue', grid_lines=True,
lat_grid=[-180., -150., -120., -90., -60., -30.,
0., 30., 60., 90., 120., 150., 180.],
lon_grid=[-180., -150., -120., -90., -60., -30., 0., 30., 60., 90., 120., 150., 180.]):
'''
Function creates and returns a Mollweide map projection using cartopy
Parameters:
central_longitude : central longitude of projection (default is 0)
central_latitude : central latitude of projection (default is 0)
figsize : size of the figure (default is 8x8)
add_land : chose whether land is plotted on map (default is True)
land_color : specify land color (default is 'tan')
add_ocean : chose whether land is plotted on map (default is False, change to True to plot)
ocean_color : specify ocean color (default is 'lightblue')
grid_lines : chose whether grid lines are plotted on map (default is true)
lat_grid : specify the latitude grid (default is 30 degree spacing)
lon_grid : specify the longitude grid (default is 30 degree spacing)
Examples:
>>> map_axis = make_mollweide_map(central_longitude=200)
'''
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.make_molleweide_map')
return
fig = plt.figure(figsize=figsize)
map_projection = ccrs.Mollweide(central_longitude=central_longitude)
ax = plt.axes(projection=map_projection)
if add_ocean:
ax.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=ocean_color)
if add_land:
ax.add_feature(cartopy.feature.LAND, zorder=0,
facecolor=land_color, edgecolor=land_edge_color)
ax.set_global()
if grid_lines:
ax.gridlines(xlocs=lon_grid, ylocs=lat_grid)
return ax
[docs]
def make_robinson_map(central_longitude=0, figsize=(8, 8),
add_land=True, land_color='tan', add_ocean=False, ocean_color='lightblue', grid_lines=True,
lat_grid=[-180., -150., -120., -90., -60., -30.,
0., 30., 60., 90., 120., 150., 180.],
lon_grid=[-180., -150., -120., -90., -60., -30., 0., 30., 60., 90., 120., 150., 180.]):
'''
Function creates and returns a Robinson map projection using cartopy
Parameters:
central_longitude : central longitude of projection (default is 0)
central_latitude : central latitude of projection (default is 0)
figsize : size of the figure (default is 8x8)
add_land : chose whether land is plotted on map (default is True)
land_color : specify land color (default is 'tan')
add_ocean : chose whether land is plotted on map (default is False, change to True to plot)
ocean_color : specify ocean color (default is 'lightblue')
grid_lines : chose whether grid lines are plotted on map (default is true)
lat_grid : specify the latitude grid (default is 30 degree spacing)
lon_grid : specify the longitude grid (default is 30 degree spacing)
Examples:
>>> map_axis = make_Robinson_map(central_longitude=200)
'''
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.make_robinson_map')
return
fig = plt.figure(figsize=figsize)
map_projection = ccrs.Robinson(central_longitude=central_longitude)
ax = plt.axes(projection=map_projection)
if add_ocean:
ax.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=ocean_color)
if add_land:
ax.add_feature(cartopy.feature.LAND, zorder=0,
facecolor=land_color, edgecolor='black')
ax.set_global()
if grid_lines:
ax.gridlines(xlocs=lon_grid, ylocs=lat_grid)
return ax
[docs]
def plot_pole(map_axis, plon, plat, A95, label='', color='k', edgecolor='k',
marker='o', markersize=20, legend='no',outline=True,
filled_pole=False, fill_color='k', fill_alpha=1.0,
mean_alpha = 1.0, A95_alpha=1.0, zorder=100):
"""
This function plots a paleomagnetic pole and A95 error ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Parameters:
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
color : symbol color; the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default marker is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
filled_pole : if True, the A95 ellipse will be filled with color
fill_color : color of fill; the default is black.
fill_alpha : transparency of filled ellipse (the default is 1.0; no transparency).
mean_alpha : transparency of pole mean (the default is 1.0; no transparency).
zorder : plotting order (default is 100; higher will move to top of plot)
Examples:
>>> plon = 200
>>> plat = 60
>>> A95 = 6
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole(map_axis, plon, plat, A95 ,color='red',markersize=40, zorder=20)
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_pole')
return
A95_km = A95 * 111.32
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=zorder, transform=ccrs.PlateCarree(), alpha = mean_alpha)
if filled_pole==False:
equi(map_axis, plon, plat, A95_km, color, alpha=A95_alpha)
elif filled_pole==True:
equi(map_axis, plon, plat, A95_km, fill_color, alpha=fill_alpha, outline=outline,fill=True)
if legend == 'yes':
plt.legend(loc=2)
[docs]
def plot_poles(map_axis, plon, plat, A95, label='', color='k', edgecolor='k',
marker='o', markersize=20, legend='no',outline=True,
filled_pole=False, fill_color='k', fill_alpha=1.0, alpha=1.0, zorder=101, lw=1):
"""
This function plots paleomagnetic poles and A95 error ellipses on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Parameters:
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
color : the default color is black. Other colors can be chosen (e.g. 'r')
a list of colors can also be given so that each pole has a distinct color
edgecolor : the default edgecolor is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
filled_pole : if True, the A95 ellipse will be filled with color
fill_color : color of fill; the default is black.
fill_alpha : transparency of filled ellipse (the default is 1.0; no transparency).
alpha : transparency of pole mean (the default is 1.0; no transparency).
zorder : plotting order (default is 100; higher will move to top of plot)
Examples:
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95s = [6, 3, 10]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color='red', markersize=40)
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95s = [6, 3, 10]
>>> colors = ['red','green','blue']
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color=colors, markersize=40)
"""
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=zorder, transform=ccrs.PlateCarree(), alpha=alpha)
if filled_pole==False:
if isinstance(color,str)==True:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color, alpha=alpha, lw=lw)
else:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color[n], alpha=alpha, lw=lw)
elif filled_pole==True:
if isinstance(fill_color,str)==True:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, fill_color, alpha=fill_alpha, outline=outline, fill=True, lw=lw)
else:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, fill_color[n], alpha=fill_alpha, outline=outline, fill=True, lw=lw)
if legend == 'yes':
plt.legend(loc=2)
[docs]
def plot_pole_ellipse(map_axis, dictionary,
color='k', edgecolor='k', marker='s',
markersize=20, label='', alpha=1.0, lw=1, lower=True, zorder=100):
"""
Plot a mean pole confidence ellipse associated with a Kent distribution
Parameters:
map_axis : the name of the current map axis that has been developed using cartopy
dictionary : a dictionary generated by the pmag.dobingham or pmag.dokent functions
color : symbol color; the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default marker is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
filled_pole : if True, the A95 ellipse will be filled with color
fill_color : color of fill; the default is black.
fill_alpha : transparency of filled ellipse (the default is 1.0; no transparency).
lower : hemisphere to plot the ellipse when calling function pmagplotlib.plot_ell (default is True)
zorder : plotting order (default is 100; higher will move to top of plot)
Examples:
>>> kent_dict = {'dec': 287.53798364307437,
'inc': 88.56067392991959,
'n': 5,
'Zdec': 54.83073632264832,
'Zinc': 0.8721861867684042,
'Edec': 144.84816793561657,
'Einc': 1.1448791390804505,
'Zeta': 4.640345964184263,
'Eta': 6.8378968512569465,
'R1': 0.9914595207919079,
'R2': 0.006259515780690272}
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=90)
>>> ipmag.plot_pole_ellipse(map_axis,kent_dict, color='red',markersize=40)
"""
pars = []
pars.append(dictionary['dec'])
pars.append(dictionary['inc'])
pars.append(dictionary['Zeta'])
pars.append(dictionary['Zdec'])
pars.append(dictionary['Zinc'])
pars.append(dictionary['Eta'])
pars.append(dictionary['Edec'])
pars.append(dictionary['Einc'])
map_axis.scatter(dictionary['dec'], dictionary['inc'], marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, transform=ccrs.PlateCarree(), zorder=zorder)
fignum=1
ellipse_points = np.array(pmagplotlib.plot_ell(fignum, pars, lower=lower, plot=False)).T
map_axis.plot(ellipse_points[0], ellipse_points[1], color=color,
transform=ccrs.Geodetic(), alpha=alpha, lw=lw,
zorder=zorder)
return map_axis
[docs]
def plot_pole_dp_dm(map_axis, plon, plat, slon, slat, dp, dm, pole_label='pole', site_label='site',
pole_color='k', pole_edgecolor='k', pole_marker='o',
site_color='r', site_edgecolor='r', site_marker='s',
markersize=20, legend=True, transform=ccrs.PlateCarree()):
"""
This function plots a paleomagnetic pole and a dp/dm confidence ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Parameters:
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
slon : the longitude of the site (in degrees E)
slat : the latitude of the site (in degrees)
dp : the semi-minor axis of the confidence ellipse (in degrees)
dm : the semi-major axis of the confidence ellipse (in degrees)
pole_color : the default color is black. Other colors can be chosen (e.g. 'g')
site_color : the default color is red. Other colors can be chosen (e.g. 'g')
pole_marker : the default is a circle. Other symbols can be chosen (e.g. 's')
site_marker : the default is a square. Other symbols can be chosen (e.g. '^')
markersize : the default is 20. Other size can be chosen
pole_label : string that labels the pole.
site_label : string that labels the site
legend : the default is a legend (True). Putting False will suppress legend plotting.
transform : the default is the PlateCarree transform in Cartopy.
Other transforms can be chosen (e.g. ccrs.geodetic), but this parameter
rarely needs to be changed by the user and is included for completeness
and in case of artifacts arising from the PlateCarree transform on some
map projections in which case the Geodetic transform may work better.
Examples:
>>> dec = 280
>>> inc = 45
>>> a95 = 5
>>> site_lat = 45
>>> site_lon = -100
>>> pole = pmag.dia_vgp(dec, inc, a95, site_lat, site_lon)
>>> pole_lon = pole[0]
>>> pole_lat = pole[1]
>>> dp = pole[2]
>>> dm = pole[3]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole_dp_dm(map_axis,pole_lon,pole_lat,site_lon,site_lat,dp,dm)
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_pole_dp_dm')
return
dp_km = dp*111.32
dm_km = dm*111.32
map_axis.scatter(plon, plat, marker=pole_marker,
color=pole_color, edgecolors=pole_edgecolor, s=markersize,
label=pole_label, zorder=101, transform=ccrs.PlateCarree())
map_axis.scatter(slon, slat, marker=site_marker,
color=site_color, edgecolors=site_edgecolor, s=markersize,
label=site_label, zorder=101, transform=ccrs.PlateCarree())
# the orientation of the ellipse needs to be determined using the
# two laws of cosines for spherical triangles where the triangle is
# A: site, B: north pole, C: paleomagnetic pole (see Fig. A.2 of Butler)
site_lon_rad = np.deg2rad(slon)
site_lat_rad = np.deg2rad(slat)
c_rad = np.deg2rad(90-slat)
pole_lon_rad = np.deg2rad(plon)
pole_lat_rad = np.deg2rad(plat)
a_rad = np.deg2rad(90-plat)
B_rad = np.abs(pole_lon_rad-site_lon_rad)
cos_b = np.cos(c_rad)*np.cos(a_rad) + np.sin(c_rad) * \
np.sin(a_rad)*np.cos(B_rad)
b_rad = np.arccos(cos_b)
sin_C = (np.sin(B_rad)/np.sin(b_rad))*np.sin(c_rad)
C_rad = np.arcsin(sin_C)
# need to make the rotation of the ellipse go the right way
if slon-plon > 180:
if plon >= slon and plat >= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat >= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon >= slon and plat <= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat <= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif slon-plon <= 180:
if plon >= slon and plat >= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat >= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon >= slon and plat <= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat <= slat:
C_deg = np.abs(np.rad2deg(C_rad))
# print(C_deg)
ellipse(map_axis, plon, plat, dp_km, dm_km, C_deg, color=pole_color, transform=transform)
if legend:
plt.legend(loc=2)
[docs]
def plot_poles_colorbar(map_axis, plons, plats, A95s, colorvalues, vmin, vmax,
colormap='viridis', edgecolor='k', marker='o', markersize=20,
alpha=1.0, colorbar=True, colorbar_label='pole age (Ma)',
outline='True',filled_pole=False, fill_alpha=1.0, lw=1):
"""
This function plots multiple paleomagnetic pole and A95 error ellipse on a cartopy map axis.
The poles are colored by the defined colormap.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Parameters:
map_axis : the name of the current map axis that has been developed using cartopy
plons : the longitude of the paleomagnetic pole being plotted (in degrees E)
plats : the latitude of the paleomagnetic pole being plotted (in degrees)
A95s : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
colorvalues : what attribute is being used to determine the colors
vmin : what is the minimum range for the colormap
vmax : what is the maximum range for the colormap
colormap : the colormap used (default is 'viridis'; others should be put as a string with quotes, e.g. 'plasma')
edgecolor : the color desired for the symbol outline
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
colorbar : the default is to include a colorbar (True). Putting False will make it so no legend is plotted.
colorbar_label : label for the colorbar
Examples:
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95s = [6, 3, 10]
>>> ages = [100,200,300]
>>> vmin = 0
>>> vmax = 300
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles_colorbar(map_axis, plons, plats, A95s, ages, vmin, vmax)
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_poles_colorbar')
return
color_mapping = plt.cm.ScalarMappable(cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
colors = color_mapping.to_rgba(colorvalues).tolist()
plot_poles(map_axis, plons, plats, A95s,
label='', color=colors, edgecolor=edgecolor, marker=marker,
markersize=markersize,filled_pole=filled_pole,outline=outline,
fill_color=colors, fill_alpha=fill_alpha, alpha=alpha, lw=lw)
if colorbar:
sm = plt.cm.ScalarMappable(
cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
plt.colorbar(sm, orientation='horizontal', shrink=0.8,
pad=0.05, label=colorbar_label)
[docs]
def plot_vgp(map_axis, vgp_lon=None, vgp_lat=None, di_block=None, label='', color='k', marker='o',
edge='black', markersize=20, alpha=1, legend=False, zorder=100):
"""
This function plots a paleomagnetic pole position onto a cartopy map axis.
Before this function is called, a map plot needs to be initialized with code
such as that in the ```ipmag.make_orthographic_map()``` function (see example below).
Parameters:
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
color : the color desired for the symbol (default is 'k' aka black)
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
edge : the color of the edge of the marker (default is black); can be set to None to have no edge
markersize : size of the marker in pt (default is 20)
alpha : the transparency of the points (defaul is 1 which is opaque, 0 is fully transparent)
label : the default is no label. Labels can be assigned.
legend : the default is no legend (False). Putting True will plot a legend.
zorder : plotting order (default is 100; higher will move to top of plot)
Examples:
>>> vgps = ipmag.fishrot(dec=200,inc=30)
>>> vgp_lon_list,vgp_lat_list,intensities= ipmag.unpack_di_block(vgps)
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_vgp(map_axis,vgp_lon=vgp_lon_list,vgp_lat=vgp_lat_list,color='red',markersize=40,zorder=20))
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_vgp')
return
if di_block != None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
vgp_lon, vgp_lat, intensity = di_lists
if len(di_lists) == 2:
vgp_lon, vgp_lat = di_lists
if edge==None:
map_axis.scatter(vgp_lon, vgp_lat, marker=marker, edgecolors=None,
s=markersize, color=color, label=label, zorder=zorder,
alpha=alpha, transform=ccrs.PlateCarree())
else:
map_axis.scatter(vgp_lon, vgp_lat, marker=marker, edgecolors=[edge],
s=markersize, color=color, label=label, zorder=zorder,
alpha=alpha, transform=ccrs.PlateCarree())
map_axis.set_global()
if legend:
plt.legend(loc=2)
[docs]
def vgp_calc(dataframe, tilt_correction='yes', site_lon='site_lon', site_lat='site_lat',
dec_is='dec_is', inc_is='inc_is', dec_tc='dec_tc', inc_tc='inc_tc',
recalc_label=False):
"""
This function calculates paleomagnetic poles using directional data and site
location data within a pandas.DataFrame. The function adds the columns
'paleolatitude', 'vgp_lat', 'vgp_lon', 'vgp_lat_rev', and 'vgp_lon_rev'
to the dataframe. The '_rev' columns allow for subsequent choice as to which
polarity will be used for the VGPs.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
tilt-correction : 'yes' is the default and uses tilt-corrected data (dec_tc, inc_tc), 'no' uses data that is not tilt-corrected and is in geographic coordinates
dataframe['site_lat'] : the name of the Dataframe column containing the latitude of the site
dataframe['site_lon'] : the name of the Dataframe column containing the longitude of the site
dataframe['inc_tc'] : the name of the Dataframe column containing the tilt-corrected inclination (used by default tilt-correction='yes')
dataframe['dec_tc'] : the name of the Dataframe column containing the tilt-corrected declination (used by default tilt-correction='yes')
dataframe['inc_is'] : the name of the Dataframe column containing the insitu inclination (used when tilt-correction='no')
dataframe['dec_is'] : the name of the Dataframe column containing the insitu declination (used when tilt-correction='no')
Returns
-------
dataframe['paleolatitude']
dataframe['colatitude']
dataframe['vgp_lat']
dataframe['vgp_lon']
dataframe['vgp_lat_rev']
dataframe['vgp_lon_rev']
"""
if recalc_label:
vgp_lat_name = 'vgp_lat_recalc'
vgp_lon_name = 'vgp_lon_recalc'
vgp_lat_rev_name = 'vgp_lat_rev_recalc'
vgp_lon_rev_name = 'vgp_lon_rev_recalc'
else:
vgp_lat_name = 'vgp_lat'
vgp_lon_name = 'vgp_lon'
vgp_lat_rev_name = 'vgp_lat_rev'
vgp_lon_rev_name = 'vgp_lon_rev'
dataframe.is_copy = False
if tilt_correction == 'yes':
# calculate the paleolatitude/colatitude
dataframe['paleolatitude'] = np.degrees(
np.arctan(0.5 * np.tan(np.radians(dataframe[inc_tc]))))
dataframe['colatitude'] = 90 - dataframe['paleolatitude']
# calculate the latitude of the pole
dataframe[vgp_lat_name] = np.degrees(np.arcsin(np.sin(np.radians(dataframe[site_lat])) *
np.cos(np.radians(dataframe['colatitude'])) +
np.cos(np.radians(dataframe[site_lat])) *
np.sin(np.radians(dataframe['colatitude'])) *
np.cos(np.radians(dataframe[dec_tc]))))
# calculate the longitudinal difference between the pole and the site
# (beta)
dataframe['beta'] = np.degrees(np.arcsin((np.sin(np.radians(dataframe['colatitude'])) *
np.sin(np.radians(dataframe[dec_tc])))/
(np.cos(np.radians(dataframe[vgp_lat_name])))))
# generate a boolean array (mask) to use to distinguish between the two possibilities for pole longitude
# and then calculate pole longitude using the site location and
# calculated beta
mask = np.cos(np.radians(dataframe['colatitude'])) > np.sin(
np.radians(dataframe[site_lat])) * np.sin(np.radians(dataframe[vgp_lat_name]))
dataframe[vgp_lon_name] = np.where(mask, (dataframe[site_lon] + dataframe['beta']) %
360., (dataframe[site_lon] + 180 - dataframe['beta']) % 360.)
# calculate the antipode of the poles
dataframe[vgp_lat_rev_name] = -dataframe[vgp_lat_name]
dataframe[vgp_lon_rev_name] = (dataframe[vgp_lon_name] - 180.) % 360.
# the 'colatitude' and 'beta' columns were created for the purposes of the pole calculations
# but aren't of further use and are deleted
del dataframe['colatitude']
del dataframe['beta']
if tilt_correction == 'no':
# calculate the paleolatitude/colatitude
dataframe['paleolatitude'] = np.degrees(
np.arctan(0.5 * np.tan(np.radians(dataframe[inc_is]))))
dataframe['colatitude'] = 90 - dataframe['paleolatitude']
# calculate the latitude of the pole
dataframe[vgp_lat_name] = np.degrees(np.arcsin(np.sin(np.radians(dataframe[site_lat])) *
np.cos(np.radians(dataframe['colatitude'])) +
np.cos(np.radians(dataframe[site_lat])) *
np.sin(np.radians(dataframe['colatitude'])) *
np.cos(np.radians(dataframe[dec_is]))))
# calculate the longitudinal difference between the pole and the site
# (beta)
dataframe['beta'] = np.degrees(np.arcsin((np.sin(np.radians(dataframe['colatitude'])) *
np.sin(np.radians(dataframe[dec_is])))/
(np.cos(np.radians(dataframe[vgp_lat_name])))))
# generate a boolean array (mask) to use to distinguish between the two possibilities for pole longitude
# and then calculate pole longitude using the site location and
# calculated beta
mask = np.cos(np.radians(dataframe['colatitude'])) > np.sin(
np.radians(dataframe[site_lat])) * np.sin(np.radians(dataframe[vgp_lat_name]))
dataframe[vgp_lon_name] = np.where(mask, (dataframe[site_lon] + dataframe['beta']) %
360., (dataframe[site_lon] + 180 - dataframe['beta']) % 360.)
# calculate the antipode of the poles
dataframe[vgp_lat_rev_name] = -dataframe[vgp_lat_name]
dataframe[vgp_lon_rev_name] = (dataframe[vgp_lon_name] - 180.) % 360.
# the 'colatitude' and 'beta' columns were created for the purposes of the pole calculations
# but aren't of further use and are deleted
del dataframe['colatitude']
del dataframe['beta']
return(dataframe)
[docs]
def sb_vgp_calc(dataframe, site_correction='yes', dec_tc='dec_tc', inc_tc='inc_tc'):
"""
This function calculates the angular dispersion of VGPs and corrects
for within site dispersion (unless site_correction = 'no') to return
a value S_b. The input data needs to be within a pandas Dataframe.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
the data frame needs to contain these columns:
dataframe['site_lat'] : latitude of the site
dataframe['site_lon'] : longitude of the site
dataframe['k'] : fisher precision parameter for directions
dataframe['vgp_lat'] : VGP latitude
dataframe['vgp_lon'] : VGP longitude
----- the following default parameters can be changes by keyword argument -----
dataframe['inc_tc'] : tilt-corrected inclination
dataframe['dec_tc'] : tilt-corrected declination
plot : default is 'no', will make a plot of poles if 'yes'
"""
# calculate the mean from the directional data
dataframe_dirs = []
for n in range(0, len(dataframe)):
dataframe_dirs.append([dataframe[dec_tc][n],
dataframe[inc_tc][n], 1.])
dataframe_dir_mean = pmag.fisher_mean(dataframe_dirs)
# calculate the mean from the vgp data
dataframe_poles = []
dataframe_pole_lats = []
dataframe_pole_lons = []
for n in range(0, len(dataframe)):
dataframe_poles.append([dataframe['vgp_lon'][n],
dataframe['vgp_lat'][n], 1.])
dataframe_pole_lats.append(dataframe['vgp_lat'][n])
dataframe_pole_lons.append(dataframe['vgp_lon'][n])
dataframe_pole_mean = pmag.fisher_mean(dataframe_poles)
# calculate mean paleolatitude from the directional data
dataframe['paleolatitude'] = lat_from_inc(dataframe_dir_mean['inc'])
angle_list = []
for n in range(0, len(dataframe)):
angle = pmag.angle([dataframe['vgp_lon'][n], dataframe['vgp_lat'][n]],
[dataframe_pole_mean['dec'], dataframe_pole_mean['inc']])
angle_list.append(angle[0])
dataframe['delta_mean_pole'] = angle_list
if site_correction == 'yes':
# use eq. 2 of Cox (1970) to translate the directional precision parameter
# into pole coordinates using the assumption of a Fisherian distribution in
# directional coordinates and the paleolatitude as calculated from mean
# inclination using the dipole equation
dataframe['K'] = dataframe['k']/(0.125 * (5 + 18 * np.sin(np.deg2rad(dataframe['paleolatitude']))**2
+ 9 * np.sin(np.deg2rad(dataframe['paleolatitude']))**4))
dataframe['Sw'] = 81/(dataframe['K']**0.5)
summation = 0
N = 0
for n in range(0, len(dataframe)):
quantity = dataframe['delta_mean_pole'][n]**2 - \
dataframe['Sw'][n]**2/dataframe['n'][n]
summation += quantity
N += 1
Sb = ((1.0/(N - 1.0)) * summation)**0.5
if site_correction == 'no':
summation = 0
N = 0
for n in range(0, len(dataframe)):
quantity = dataframe['delta_mean_pole'][n]**2
summation += quantity
N += 1
Sb = ((1.0/(N - 1.0)) * summation)**0.5
return Sb
[docs]
def bin_trace(lon_samples, lat_samples, resolution):
"""
Given a trace of samples in longitude and latitude, bin them
in latitude and longitude, and normalize the bins so that
the integral of probability density over the sphere is one.
The resolution keyword gives the number of divisions in latitude.
The divisions in longitude is twice that.
Parameters:
lon_samples: a list of longitudes
lat_samples: a list of latitudes
resolution: The resolution keyword gives the number of divisions in latitude.
The divisions in longitude is twice that.
"""
lats = np.linspace(-90., 90., resolution, endpoint=True)
lons = np.linspace(-180., 180., 2 * resolution, endpoint=True)
lon_grid, lat_grid = np.meshgrid(lons, lats)
hist = np.zeros_like(lon_grid)
dlon = 360. / (2. * resolution)
dlat = 180. / resolution
for lon, lat in zip(lon_samples, lat_samples):
lon = np.mod(lon, 360.)
if lon > 180.:
lon = lon - 360.
if lat < -90. or lat > 90.:
# Just skip invalid latitudes if they happen to arise
continue
lon_index = int(np.floor((lon + 180.) / dlon))
lat_index = int(np.floor((lat + 90.) / dlat))
hist[lat_index, lon_index] += 1
lat_grid += dlat / 2.
lon_grid += dlon / 2.
return lon_grid, lat_grid, hist
[docs]
def density_distribution(lon_samples, lat_samples, resolution=30):
'''
calculate density distribution of a given set of vectos on a sphere
Parameters:
lon_samples: a list of longitudes
lat_samples: a list of latitudes
resolution: the resolution at which to calculate the vectors distribution.
the higher the number, the finer the resolution (default is 30)
'''
count = len(lon_samples)
lon_grid, lat_grid, hist = bin_trace(lon_samples, lat_samples, resolution)
return lon_grid, lat_grid, hist / count
[docs]
def cumulative_density_distribution(lon_samples, lat_samples, resolution=30):
'''
compute cumulative density distribution of a set of vectors on a unit sphere
Parameters:
lon_samples: a list of longitudes
lat_samples: a list of latitudes
resolution: the resolution at which to calculate the vectors distribution.
the higher the number, the finer the resolution
Returns:
Tuple
longitude grid, latitude grid, and cumulative densities
'''
lon_grid, lat_grid, hist = bin_trace(lon_samples, lat_samples, resolution)
# Compute the cumulative density
hist = hist.ravel()
i_sort = np.argsort(hist)[::-1]
i_unsort = np.argsort(i_sort)
hist_cumsum = hist[i_sort].cumsum()
hist_cumsum /= hist_cumsum[-1]
return lon_grid, lat_grid, hist_cumsum[i_unsort].reshape(lat_grid.shape)
[docs]
def plot_distributions(ax, lon_samples, lat_samples, to_plot='d', resolution=100, **kwargs):
'''
plot distributions of a group of vectors on a unit sphere
Parameters:
ax: matplotlib axis
lon_samples: a list or array of longitude samples
lat_samples: a list or array of latitude samples
to_plot: the type of distribution plot to show, can be 'd' as colormesh, 'e' as contour, 's' as discrete scatter plots
resolution: the resolution at which to plot the distributions
kwargs: other keyword arguments inherited from matplotlib
'''
cmap=kwargs.get('cmap', 'viridis')
artists = []
if 'd' in to_plot:
lon_grid, lat_grid, density = density_distribution(
lon_samples, lat_samples, resolution)
density = np.ma.masked_where(density <= 0.05*density.max(), density)
a = ax.pcolormesh(lon_grid, lat_grid, density, cmap=cmap,
transform=ccrs.PlateCarree())
artists.append(a)
if 'e' in to_plot:
lon_grid, lat_grid, cumulative_density = cumulative_density_distribution(
lon_samples, lat_samples, resolution)
a = ax.contour(lon_grid, lat_grid, cumulative_density, levels=kwargs.get('levels',[
0.95]), colors=kwargs.get('colors', 'k'), transform=ccrs.PlateCarree(),
linewidths=kwargs.get('lw', 1), zorder = kwargs.get('zorder', 100))
artists.append(a)
if 's' in to_plot:
a = ax.scatter(lon_samples, lat_samples, color=kwargs.get('color', 'C0'), alpha=0.1, transform=ccrs.PlateCarree(), edgecolors=None, **kwargs)
artists.append(a)
return artists
[docs]
def make_di_block(dec, inc, unit_vector=True):
"""
Some pmag.py and ipmag.py functions require or will take a list of unit
vectors [dec,inc,1.] as input. This function takes declination and
inclination data and make it into such a nested list of lists.
Parameters:
dec : list of declinations
inc : list of inclinations
unit_vector : if True will return [dec,inc,1.]; if False will return [dec,inc]
Returns:
di_block
nested list of declination, inclination lists
Examples:
>>> decs = [180.3, 179.2, 177.2]
>>> incs = [12.1, 13.7, 11.9]
>>> ipmag.make_di_block(decs,incs)
[[180.3, 12.1, 1.0], [179.2, 13.7, 1.0], [177.2, 11.9, 1.0]]
"""
di_block = []
if unit_vector==True:
for n in range(0, len(dec)):
di_block.append([dec[n], inc[n], 1.0])
if unit_vector==False:
for n in range(0, len(dec)):
di_block.append([dec[n], inc[n]])
return di_block
[docs]
def unpack_di_block(di_block):
"""
This function unpacks a nested list of [dec,inc,mag_moment] into a list of
declination values, a list of inclination values and a list of magnetic
moment values. Mag_moment values are optional, while dec and inc values are
required.
Parameters
-----------
di_block : nested list of declination, inclination lists
Returns
-----------
dec : list of declinations
inc : list of inclinations
mag_moment : list of magnetic moment (if present in di_block)
Example
-----------
The di_block nested lists of lists can be unpacked using the function
>>> directions = [[180.3, 12.1, 1.0], [179.2, 13.7, 1.0], [177.2, 11.9, 1.0]]
>>> ipmag.unpack_di_block(directions)
([180.3, 179.2, 177.2], [12.1, 13.7, 11.9], [1.0, 1.0, 1.0])
These unpacked values can be assigned to variables:
>>> dec, inc, moment = ipmag.unpack_di_block(directions)
"""
dec_list = []
inc_list = []
moment_list = []
for n in range(0, len(di_block)):
dec = di_block[n][0]
inc = di_block[n][1]
dec_list.append(dec)
inc_list.append(inc)
if len(di_block[n]) > 2:
moment = di_block[n][2]
moment_list.append(moment)
return dec_list, inc_list, moment_list
[docs]
def make_diddd_array(dec, inc, dip_direction, dip):
"""
Some pmag.py functions such as the bootstrap fold test require a numpy array
of dec, inc, dip direction, dip [dec, inc, dd, dip] as input. This function
makes such an array.
Parameters:
dec : paleomagnetic declination in degrees
inc : paleomagnetic inclination in degrees
dip_direction : the dip direction of bedding (in degrees between 0 and 360)
dip: dip of bedding (in degrees)
Returns:
array
an array of [dec, inc, dip_direction, dip]
Examples:
Data in separate lists of dec, inc, dip_direction, dip data can be made into
an array.
>>> dec = [132.5,124.3,142.7,130.3,163.2]
>>> inc = [12.1,23.2,34.2,37.7,32.6]
>>> dip_direction = [265.0,265.0,265.0,164.0,164.0]
>>> dip = [20.0,20.0,20.0,72.0,72.0]
>>> data_array = ipmag.make_diddd_array(dec,inc,dip_direction,dip)
>>> data_array
array([[ 132.5, 12.1, 265. , 20. ],
[ 124.3, 23.2, 265. , 20. ],
[ 142.7, 34.2, 265. , 20. ],
[ 130.3, 37.7, 164. , 72. ],
[ 163.2, 32.6, 164. , 72. ]])
"""
diddd_block = []
for n in range(0, len(dec)):
diddd_block.append([dec[n], inc[n], dip_direction[n], dip[n]])
diddd_array = np.array(diddd_block)
return diddd_array
[docs]
def shoot(lon, lat, azimuth, maxdist=None):
"""
This function enables A95 error ellipses to be drawn around
paleomagnetic poles in conjunction with equi
(from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/)
"""
glat1 = lat * np.pi / 180.
glon1 = lon * np.pi / 180.
s = maxdist/1.852
faz = azimuth * np.pi / 180.
EPS = 0.00000000005
# if ((np.abs(np.cos(glat1)) < EPS) and not (np.abs(np.sin(faz)) < EPS)): #why was this ever a thing? it works fine at the north pole
# raise ValueError("Only N-S courses are meaningful, starting at a pole!")
a = 6378.13/1.852
f = 1/298.257223563
r = 1 - f
tu = r * np.tan(glat1)
sf = np.sin(faz)
cf = np.cos(faz)
if (cf == 0):
b = 0.
else:
b = 2. * np.arctan2(tu, cf)
cu = 1.0/np.sqrt(1 + tu * tu)
su = tu * cu
sa = cu * sf
c2a = 1 - sa * sa
x = 1. + np.sqrt(1. + c2a * (1./(r * r) - 1.))
x = (x - 2.0)/ x
c = 1. - x
c = (x * x / 4. + 1.)/ c
d = (0.375 * x * x - 1.) * x
tu = s/ (r * a * c)
y = tu
c = y + 1
while (np.abs(y - c) > EPS):
sy = np.sin(y)
cy = np.cos(y)
cz = np.cos(b + y)
e = 2. * cz * cz - 1.
c = y
x = e * cy
y = e + e - 1.
y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) *
d / 4. - cz) * sy * d + tu
b = cu * cy * cf - su * sy
c = r * np.sqrt(sa * sa + b * b)
d = su * cy + cu * sy * cf
glat2 = (np.arctan2(d, c) + np.pi) % (2 * np.pi) - np.pi
c = cu * cy - su * sy * cf
x = np.arctan2(sy * sf, c)
c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16.
d = ((e * cy * c + cz) * sy * c + y) * sa
glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2 * np.pi)) - np.pi
baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi)
glon2 *= 180.0/np.pi
glat2 *= 180.0/np.pi
baz *= 180.0/np.pi
return (glon2, glat2, baz)
[docs]
def equi(map_axis, centerlon, centerlat, radius, color, alpha=1.0, outline=True, fill=False, lw=1):
"""
This function enables A95 error ellipses to be drawn in cartopy around
paleomagnetic poles in conjunction with shoot
(modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/).
Parameters:
map_axis : cartopy axis
centerlon : longitude of the center of the ellipse
centerlat : latitude of the center of the ellipse
radius : radius of ellipse (in degrees)
color : color of ellipse
alpha : transparency - if filled, the transparency will only apply
to the facecolor of the ellipse
outline : boolean specifying if the ellipse should be plotted as a filled polygon or
as a set of line segments
fill : boolean specifying if the ellipse should be plotted as a filled polygon
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.equi')
return
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in range(0, 360):
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
X = X[::-1]
Y = Y[::-1]
# for non-filled ellipses
if fill==False:
plt.plot(X, Y, color=color,
transform=ccrs.Geodetic(), alpha=alpha, lw=lw)
# for filled ellipses
else:
XY = np.stack([X,Y],axis=1)
if outline==True:
circle_edge = Polygon(XY,
edgecolor=color,facecolor='none',
transform=ccrs.Geodetic())
map_axis.add_patch(circle_edge)
circle_face = Polygon(XY,
edgecolor='none',facecolor=color,alpha=alpha,
transform=ccrs.Geodetic())
map_axis.add_patch(circle_face)
[docs]
def ellipse(map_axis, centerlon, centerlat, major_axis, minor_axis, angle, n=360, filled=False,
transform=None, **kwargs):
"""
This function enables general error ellipses to be drawn on the cartopy projection of the input map axis
using a center and a set of major and minor axes and a rotation angle east of north.
(Adapted from equi).
Parameters
-----------
map_axis : cartopy axis
centerlon : longitude of the center of the ellipse
centerlat : latitude of the center of the ellipse
major_axis : Major axis of ellipse in km
minor_axis : Minor axis of ellipse in km
angle : angle of major axis in degrees east of north
n : number of points with which to apporximate the ellipse
filled : boolean specifying if the ellipse should be plotted as a filled polygon or
as a set of line segments (Doesn't work right now)
kwargs : any other key word arguments can be passed for the line
Returns:
The map object with the ellipse plotted on it
"""
if transform == None:
transform=ccrs.PlateCarree()
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.ellipse')
return False
angle = angle*(np.pi/180)
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in np.linspace(0, 360, n):
az_rad = azimuth*(np.pi/180)
radius = ((major_axis*minor_axis)/(((minor_axis*np.cos(az_rad-angle))
** 2 + (major_axis*np.sin(az_rad-angle))**2)**.5))
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append((360+glon2) % 360)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
if filled:
ellip = np.array((X, Y)).T
ellip = map_axis.projection.transform_points(
transform, ellip[:, 0], ellip[:, 1])
poly = Polygon(ellip[:, :2],**kwargs)
map_axis.add_patch(poly)
else:
try:
if "facecolor" in kwargs: kwargs["color"] = kwargs.pop("facecolor")
if "edgecolor" in kwargs: kwargs["color"] = kwargs.pop("edgecolor")
map_axis.plot(X, Y, transform=transform, **kwargs)
return True
except ValueError:
return False
[docs]
def combine_magic(filenames, outfile='measurements.txt', data_model=3, magic_table='measurements',
dir_path=".", input_dir_path=""):
"""
Takes a list of magic-formatted files, concatenates them, and creates a
single file. Returns output filename if the operation was successful.
Parameters:
filenames : list of MagIC formatted files
outfile : name of output file [e.g., measurements.txt]
data_model : data model number (2.5 or 3), default 3
magic_table : name of magic table, default 'measurements'
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
Returns:
outfile name if success, False if failure
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
if float(data_model) == 3.0:
outfile = pmag.resolve_file_name(outfile, output_dir_path)
output_dir_path, file_name = os.path.split(outfile)
con = cb.Contribution(output_dir_path, read_tables=[])
# make sure files actually exist
filenames = [pmag.resolve_file_name(f, input_dir_path) for f in filenames]
#filenames = [os.path.realpath(f) for f in filenames]
filenames = [f for f in filenames if os.path.exists(f)]
if not filenames:
print("You have provided no valid file paths, so nothing will be combined".format(
magic_table))
return False
# figure out file type from first of files to join
with open(filenames[0]) as f:
file_type = f.readline().split()[1]
if file_type in ['er_specimens', 'er_samples', 'er_sites',
'er_locations', 'er_ages', 'pmag_specimens',
'pmag_samples', 'pmag_sites', 'pmag_results',
'magic_measurements', 'rmag_anisotropy',
'rmag_results', 'rmag_specimens']:
print(
'-W- You are working in MagIC 3 but have provided a MagIC 2.5 file: {}'.format(file_type))
return False
if file_type not in con.table_names:
file_type = magic_table
infiles = [pd.read_csv(infile, sep='\t', header=1)
for infile in filenames]
df = pd.concat(infiles, ignore_index=True, sort=True)
# drop any fully duplicated rows
df.drop_duplicates(inplace=True)
con.add_magic_table(dtype=file_type, df=df)
# drop any mostly empty rows IF they have duplicate index
parent, child = con.get_parent_and_child(file_type)
ignore_cols = [col[:-1] for col in [file_type, parent] if col]
ignore_cols.extend(['software_packages', 'citations'])
con.tables[file_type].drop_duplicate_rows(ignore_cols)
# correctly handle measurements.sequence column
if 'sequence' in con.tables[file_type].df:
con.tables[file_type].df['sequence'] = range(1, len(con.tables[file_type].df) + 1)
# write table to file, use custom name
res = con.write_table_to_file(file_type, custom_name=file_name)
return res
else:
datasets = []
if not filenames:
print("You must provide at least one file")
return False
for infile in filenames:
if not os.path.isfile(infile):
print("{} is not a valid file name".format(infile))
return False
try:
dataset, file_type = pmag.magic_read(infile)
except IndexError:
print('-W- Could not get records from {}'.format(infile))
print(' Skipping...')
continue
print("File ", infile, " read in with ", len(dataset), " records")
for rec in dataset:
datasets.append(rec)
Recs, keys = pmag.fillkeys(datasets)
if Recs:
pmag.magic_write(outfile, Recs, file_type)
print("All records stored in ", outfile)
return outfile
print("No file could be created")
return False
[docs]
def ani_depthplot2(ani_file='rmag_anisotropy.txt', meas_file='magic_measurements.txt', samp_file='er_samples.txt', age_file=None, sum_file=None, fmt='svg', dmin=-1, dmax=-1, depth_scale='sample_core_depth', dir_path='.'):
"""
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option)
"""
pcol = 4
tint = 9
plots = 0
# format files to use full path
# os.path.join(dir_path, ani_file)
ani_file = pmag.resolve_file_name(ani_file, dir_path)
if not os.path.isfile(ani_file):
print("Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file))
return False, "Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file)
# os.path.join(dir_path, meas_file)
meas_file = pmag.resolve_file_name(meas_file, dir_path)
if age_file:
if not os.path.isfile(age_file):
print(
'Warning: you have provided an invalid age file. Attempting to use sample file instead')
age_file = None
depth_scale = 'sample_core_depth'
# os.path.join(dir_path, samp_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
else:
# os.path.join(dir_path, age_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
depth_scale = 'age'
print(
'Warning: you have provided an er_ages format file, which will take precedence over er_samples')
else:
samp_file = pmag.resolve_file_name(samp_file, dir_path)
label = 1
if sum_file:
sum_file = os.path.join(dir_path, sum_file)
dmin, dmax = float(dmin), float(dmax)
# get data read in
isbulk = 0 # tests if there are bulk susceptibility measurements
AniData, file_type = pmag.magic_read(ani_file) # read in tensor elements
if not age_file:
# read in sample depth info from er_sample.txt format file
Samps, file_type = pmag.magic_read(samp_file)
else:
# read in sample age info from er_ages.txt format file
Samps, file_type = pmag.magic_read(samp_file)
age_unit = Samps[0]['age_unit']
for s in Samps:
# change to upper case for every sample name
s['er_sample_name'] = s['er_sample_name'].upper()
Meas, file_type = pmag.magic_read(meas_file)
# print 'meas_file', meas_file
# print 'file_type', file_type
if file_type == 'magic_measurements':
isbulk = 1
Data = []
Bulks = []
BulkDepths = []
for rec in AniData:
# look for depth record for this sample
samprecs = pmag.get_dictitem(Samps, 'er_sample_name',
rec['er_sample_name'].upper(), 'T')
# see if there are non-blank depth data
sampdepths = pmag.get_dictitem(samprecs, depth_scale, '', 'F')
if dmax != -1:
# fishes out records within depth bounds
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmax, 'max')
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmin, 'min')
if len(sampdepths) > 0: # if there are any....
# set the core depth of this record
rec['core_depth'] = sampdepths[0][depth_scale]
Data.append(rec) # fish out data with core_depth
if isbulk: # if there are bulk data
chis = pmag.get_dictitem(
Meas, 'er_specimen_name', rec['er_specimen_name'], 'T')
# get the non-zero values for this specimen
chis = pmag.get_dictitem(
chis, 'measurement_chi_volume', '', 'F')
if len(chis) > 0: # if there are any....
# put in microSI
Bulks.append(
1e6 * float(chis[0]['measurement_chi_volume']))
BulkDepths.append(float(sampdepths[0][depth_scale]))
if len(Bulks) > 0: # set min and max bulk values
bmin = min(Bulks)
bmax = max(Bulks)
xlab = "Depth (m)"
if len(Data) > 0:
location = Data[0]['er_location_name']
else:
return False, 'no data to plot'
# collect the data for plotting tau V3_inc and V1_dec
Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], []
F23s = []
Axs = [] # collect the plot ids
# START HERE
if len(Bulks) > 0:
pcol += 1
# get all the s1 values from Data as floats
s1 = pmag.get_dictkey(Data, 'anisotropy_s1', 'f')
s2 = pmag.get_dictkey(Data, 'anisotropy_s2', 'f')
s3 = pmag.get_dictkey(Data, 'anisotropy_s3', 'f')
s4 = pmag.get_dictkey(Data, 'anisotropy_s4', 'f')
s5 = pmag.get_dictkey(Data, 'anisotropy_s5', 'f')
s6 = pmag.get_dictkey(Data, 'anisotropy_s6', 'f')
nmeas = pmag.get_dictkey(Data, 'anisotropy_n', 'int')
sigma = pmag.get_dictkey(Data, 'anisotropy_sigma', 'f')
Depths = pmag.get_dictkey(Data, 'core_depth', 'f')
# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array
Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array
# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of
# 3x3 sub-arrays
for k in range(len(Depths)):
# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors
# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum
# eigenvector
fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k])
V3Incs.append(fpars['v3_inc'])
V1Decs.append(fpars['v1_dec'])
Tau1.append(fpars['t1'])
Tau2.append(fpars['t2'])
Tau3.append(fpars['t3'])
P.append(Tau1[-1]/Tau3[-1])
F23s.append(fpars['F23'])
if len(Depths) > 0:
if dmax == -1:
dmax = max(Depths)
dmin = min(Depths)
tau_min = 1
for t in Tau3:
if t > 0 and t < tau_min:
tau_min = t
tau_max = max(Tau1)
# tau_min=min(Tau3)
P_max = max(P)
P_min = min(P)
# dmax=dmax+.05*dmax
# dmin=dmin-.05*dmax
main_plot = plt.figure(1, figsize=(10, 8)) # make the figure
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num) # attach the pmagpy version number
ax = plt.subplot(1, pcol, 1) # make the first column
Axs.append(ax)
ax.plot(Tau1, Depths, 'rs')
ax.plot(Tau2, Depths, 'b^')
ax.plot(Tau3, Depths, 'ko')
if sum_file:
core_depth_key, core_label_key, Cores = read_core_csv_file(
sum_file)
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax.axis([tau_min, tau_max, dmax, dmin])
ax.set_xlabel('Eigenvalues')
if depth_scale == 'sample_core_depth':
ax.set_ylabel('Depth (mbsf)')
elif depth_scale == 'age':
ax.set_ylabel('Age (' + age_unit + ')')
else:
ax.set_ylabel('Depth (mcd)')
ax2 = plt.subplot(1, pcol, 2) # make the second column
ax2.plot(P, Depths, 'rs')
ax2.axis([P_min, P_max, dmax, dmin])
ax2.set_xlabel('P')
ax2.set_title(location)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
Axs.append(ax2)
ax3 = plt.subplot(1, pcol, 3)
Axs.append(ax3)
ax3.plot(V3Incs, Depths, 'ko')
ax3.axis([0, 90, dmax, dmin])
ax3.set_xlabel('V3 Inclination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax4 = plt.subplot(1, np.abs(pcol), 4)
Axs.append(ax4)
ax4.plot(V1Decs, Depths, 'rs')
ax4.axis([0, 360, dmax, dmin])
ax4.set_xlabel('V1 Declination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, 360], [depth, depth], 'b--')
if pcol == 4 and label == 1:
plt.text(360, depth + tint, core[core_label_key])
# ax5=plt.subplot(1,np.abs(pcol),5)
# Axs.append(ax5)
# ax5.plot(F23s,Depths,'rs')
# bounds=ax5.axis()
# ax5.axis([bounds[0],bounds[1],dmax,dmin])
# ax5.set_xlabel('F_23')
# ax5.semilogx()
# if sum_file:
# for core in Cores:
# depth=float(core[core_depth_key])
# if depth>=dmin and depth<=dmax:
# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')
# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])
# if pcol==6:
if pcol == 5:
# ax6=plt.subplot(1,pcol,6)
ax6 = plt.subplot(1, pcol, 5)
Axs.append(ax6)
ax6.plot(Bulks, BulkDepths, 'bo')
ax6.axis([bmin - 1, 1.1 * bmax, dmax, dmin])
ax6.set_xlabel('Bulk Susc. (uSI)')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, bmax], [depth, depth], 'b--')
if label == 1:
plt.text(1.1 * bmax, depth + tint,
core[core_label_key])
for x in Axs:
# this makes the x-tick labels more reasonable - they were
# overcrowded using the defaults
pmagplotlib.delticks(x)
fig_name = location + '_ani_depthplot.' + fmt
return main_plot, fig_name
else:
return False, "No data to plot"
[docs]
def ani_depthplot(spec_file='specimens.txt', samp_file='samples.txt',
meas_file='measurements.txt', site_file='sites.txt',
age_file="", sum_file="", fmt='svg', dmin=-1, dmax=-1,
depth_scale='core_depth', dir_path='.', contribution=None):
"""
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'composite_depth', 'core_depth' or 'age' (you must provide an age file to use this option).
You must provide valid specimens and sites files, and either a samples or an ages file.
You may additionally provide measurements and a summary file (csv).
Parameters:
spec_file (str): default "specimens.txt"
samp_file (str): default "samples.txt"
meas_file (str): default "measurements.txt"
site_file (str): default "sites.txt"
age_file (str): default ""
sum_file (str): default ""
fmt (str): str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
dmin (number): default -1
minimum depth to plot (if -1, default to plotting all)
dmax (number): default -1
maximum depth to plot (if -1, default to plotting all)
depth_scale (str): default "core_depth"
scale to plot, ['composite_depth', 'core_depth', 'age'].
if 'age' is selected, you must provide an ages file.
dir_path (str): default "."
directory for input files
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns:
plot
matplotlib plot, or False if no plot could be created
name
figure name, or error message if no plot could be created
"""
if depth_scale == 'sample_core_depth':
depth_scale = 'core_depth'
if depth_scale == 'sample_composite_depth':
depth_scale = 'composite_depth'
pcol = 4
tint = 9
plots = 0
dmin, dmax = float(dmin), float(dmax)
# if contribution object is not provided, read in data from files
if isinstance(contribution, cb.Contribution):
con = contribution
else:
# format files to use full path
meas_file = pmag.resolve_file_name(meas_file, dir_path)
spec_file = pmag.resolve_file_name(spec_file, dir_path)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
site_file = pmag.resolve_file_name(site_file, dir_path)
if age_file:
age_file = pmag.resolve_file_name(age_file, dir_path)
if not os.path.isfile(age_file):
print(
'Warning: you have provided an invalid age file. Attempting to use sample file instead')
age_file = None
depth_scale = 'core_depth'
else:
samp_file = age_file
depth_scale = 'age'
print(
'Warning: you have provided an ages format file, which will take precedence over samples')
samp_file = pmag.resolve_file_name(samp_file, dir_path)
label = 1
if sum_file:
sum_file = pmag.resolve_file_name(sum_file, dir_path)
core_df=pd.read_csv(sum_file)
depths=core_df['Top depth cored CSF (m)'].values
# contribution
dir_path = os.path.split(spec_file)[0]
tables = ['measurements', 'specimens', 'samples', 'sites']
con = cb.Contribution(dir_path, read_tables=tables,
custom_filenames={'measurements': meas_file, 'specimens': spec_file,
'samples': samp_file, 'sites': site_file})
for ftype in ['specimens', 'samples', 'sites']:
if not con.tables.get(ftype):
if ftype == 'samples':
if con.tables.get('ages'):
depth_scale = 'age'
continue
print("-W- This function requires a {} file to run.".format(ftype))
print(" Make sure you include one in your working directory")
return False, "missing required file type: {}".format(ftype)
# propagate needed values
con.propagate_cols(['core_depth'], 'samples', 'sites')
con.propagate_location_to_specimens()
# get data read in
isbulk = 0 # tests if there are bulk susceptibility measurements
ani_file = spec_file
SampData = con.tables['samples'].df
AniData = con.tables['specimens'].df
# add sample into specimens (AniData)
AniData = pd.merge(
AniData, SampData[['sample', depth_scale]], how='inner', on='sample')
# trim down AniData
cond = AniData[depth_scale].astype(bool)
AniData = AniData[cond]
if dmin != -1:
AniData = AniData[AniData[depth_scale] < dmax]
if dmax != -1:
AniData = AniData[AniData[depth_scale] > dmin]
AniData['core_depth'] = AniData[depth_scale]
if not age_file:
Samps = con.tables['samples'].convert_to_pmag_data_list()
else:
con.add_magic_table(dtype='ages', fname=age_file)
Samps = con.tables['ages'].convert_to_pmag_data_list()
# get age unit
age_unit = con.tables['ages'].df['age_unit'][0]
# propagate ages down to sample level
for s in Samps:
# change to upper case for every sample name
s['sample'] = s['sample'].upper()
if 'measurements' in con.tables:
isbulk = 1
Meas = con.tables['measurements'].df # convert_to_pmag_data_list()
if isbulk:
Meas = Meas[Meas['specimen'].astype('bool')]
Meas = Meas[Meas['susc_chi_volume'].astype(bool)]
# add core_depth into Measurements dataframe
Meas = pd.merge(Meas[['susc_chi_volume', 'specimen']], AniData[[
'specimen', 'core_depth']], how='inner', on='specimen')
Bulks = list(Meas['susc_chi_volume'] * 1e6)
BulkDepths = list(Meas['core_depth'])
else:
Bulks, BulkDepths = [], []
# now turn Data from pandas dataframe to a list of dicts
Data = list(AniData.T.apply(dict))
if len(Bulks) > 0: # set min and max bulk values
bmin = min(Bulks)
bmax = max(Bulks)
xlab = "Depth (m)"
#
if len(Data) > 0:
location = Data[0].get('location', 'unknown')
if cb.is_null(location):
location = 'unknown'
try:
location = con.tables['sites'].df['location'][0]
except KeyError:
pass
else:
return False, 'no data to plot'
# collect the data for plotting tau V3_inc and V1_dec
Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], []
F23s = []
Axs = [] # collect the plot ids
if len(Bulks) > 0:
pcol += 1
Data = pmag.get_dictitem(Data, 'aniso_s', '', 'not_null')
# get all the s1 values from Data as floats
aniso_s = pmag.get_dictkey(Data, 'aniso_s', '')
aniso_s = [a.split(':') for a in aniso_s if a is not None]
#print('aniso_s', aniso_s)
s1 = [float(a[0]) for a in aniso_s]
s2 = [float(a[1]) for a in aniso_s]
s3 = [float(a[2]) for a in aniso_s]
s4 = [float(a[3]) for a in aniso_s]
s5 = [float(a[4]) for a in aniso_s]
s6 = [float(a[5]) for a in aniso_s]
# we are good with s1 - s2
nmeas = pmag.get_dictkey(Data, 'aniso_s_n_measurements', 'int')
sigma = pmag.get_dictkey(Data, 'aniso_s_sigma', 'f')
Depths = pmag.get_dictkey(Data, 'core_depth', 'f')
# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array
Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array
# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of
# 3x3 sub-arrays
for k in range(len(Depths)):
# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors
# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum
# eigenvector
fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k])
V3Incs.append(fpars['v3_inc'])
V1Decs.append(fpars['v1_dec'])
Tau1.append(fpars['t1'])
Tau2.append(fpars['t2'])
Tau3.append(fpars['t3'])
P.append(Tau1[-1]/Tau3[-1])
F23s.append(fpars['F23'])
if len(Depths) > 0:
if dmax == -1:
dmax = max(Depths)
dmin = min(Depths)
tau_min = 1
for t in Tau3:
if t > 0 and t < tau_min:
tau_min = t
tau_max = max(Tau1)
# tau_min=min(Tau3)
P_max = max(P)
P_min = min(P)
# dmax=dmax+.05*dmax
# dmin=dmin-.05*dmax
main_plot = plt.figure(1, figsize=(11, 7)) # make the figure
# main_plot = plt.figure(1, figsize=(10, 8)) # make the figure
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num) # attach the pmagpy version number
ax = plt.subplot(1, pcol, 1) # make the first column
Axs.append(ax)
ax.plot(Tau1, Depths, 'rs')
ax.plot(Tau2, Depths, 'b^')
ax.plot(Tau3, Depths, 'ko')
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
if tau_min>.3: tau_min=.3
if tau_max<.36: tau_max=.36
ax.axis([tau_min, tau_max, dmax, dmin])
ax.set_xlabel('Eigenvalues')
if depth_scale == 'core_depth':
ax.set_ylabel('Depth (mbsf)')
elif depth_scale == 'age':
ax.set_ylabel('Age (' + age_unit + ')')
else:
ax.set_ylabel('Depth (mcd)')
ax2 = plt.subplot(1, pcol, 2) # make the second column
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.plot(P, Depths, 'rs')
ax2.axis([P_min, P_max, dmax, dmin])
ax2.set_xlabel('P')
ax2.set_title(location)
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
Axs.append(ax2)
ax3 = plt.subplot(1, pcol, 3)
Axs.append(ax3)
ax3.plot(V3Incs, Depths, 'ko')
ax3.axis([0, 90, dmax, dmin])
ax3.set_xlabel('V3 Inclination')
ax3.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
ax4 = plt.subplot(1, np.abs(pcol), 4)
Axs.append(ax4)
ax4.plot(V1Decs, Depths, 'rs')
ax4.axis([0, 360, dmax, dmin])
ax4.set_xlabel('V1 Declination')
ax4.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
# ax5=plt.subplot(1,np.abs(pcol),5)
# Axs.append(ax5)
# ax5.plot(F23s,Depths,'rs')
# bounds=ax5.axis()
# ax5.axis([bounds[0],bounds[1],dmax,dmin])
# ax5.set_xlabel('F_23')
# ax5.semilogx()
# if sum_file:
# for core in Cores:
# depth=float(core[core_depth_key])
# if depth>=dmin and depth<=dmax:
# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')
# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])
# if pcol==6:
if pcol == 5:
# ax6=plt.subplot(1,pcol,6)
ax6 = plt.subplot(1, pcol, 5)
Axs.append(ax6)
ax6.plot(Bulks, BulkDepths, 'bo')
ax6.set_ylim(dmax, dmin)
ax6.set_xlabel('Bulk Susc. (uSI)')
ax6.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
for x in Axs:
# this makes the x-tick labels more reasonable - they were
# overcrowded using the defaults
pmagplotlib.delticks(x)
fig_name = location + '_ani_depthplot.' + fmt
return main_plot, [fig_name]
else:
return False, "No data to plot"
[docs]
def core_depthplot(input_dir_path='.', meas_file='measurements.txt', spc_file='',
samp_file='samples.txt', age_file='', sum_file='', wt_file='',
depth_scale='core_depth', dmin=-1, dmax=-1, sym='bo',
size=5, spc_sym='ro', spc_size=5, meth='', step=0, fmt='svg',
pltDec=True, pltInc=True, pltMag=True, pltLine=True, pltSus=True,
logit=False, pltTime=False, timescale=None, amin=-1, amax=-1,
norm=False, data_model_num=3,location=""):
"""
depth scale can be 'core_depth' or 'composite_depth' (for data model=3)
if age file is provided, depth_scale will be set to 'age' by default.
You must provide at least a measurements,specimens and sample file to plot.
Parameters:
input_dir_path : str, default "."
file input directory
meas_file : str, default "measurements.txt"
input measurements file
spc_file : str, default ""
input specimens file
samp_file : str, default ""
input samples file
age_file : str, default ""
input ages file
sum_file : str, default ""
input csv summary file
wt_file : str, default ""
input file with weights
depth_scale : str, default "core_depth"
['core_depth', 'composite_depth']
dmin : number, default -1
minimum depth to plot (if -1, default to plotting all)
dmax : number, default -1
maximum depth to plot (if -1, default to plotting all)
sym : str, default "bo"
symbol color and shape, default blue circles
(see matplotlib documentation for more options)
size : int, default 5
symbol size
spc_sym : str, default 'ro'
specimen symbol color and shape, default red circles
(see matplotlib documentation for more options)
meth : str, default ""
method codes, ["LT-NO", "AF", "T", "ARM", "IRM", "X"]
step : int, default 0
treatment step for plotting:
for AF, in mT, for T, in C
fmt : str, default "svg"
format for figures, [svg,jpg,png,pdf]
pltDec : bool, default True
plot declination
pltInc : bool, default True
plot inclination
pltMag : bool, default True
plot magnetization
pltLine : bool, default True
connect dots with a line
pltSus : bool, default True
plot blanket treatment
logit : bool, default False
plot magnetization on a log scale
amin : int, default -1
minimum time to plot (if -1, default to plotting all)
amax : int, default -1
maximum time to plot (if -1, default to plotting all)
norm : bool, default False
normalize by weight
data_model_num : int, default 3
MagIC data model (please, use data model 3)
Returns:
main_plot, figname
"""
data_model_num = int(data_model_num)
# replace MagIC 3 defaults with MagIC 2.5 defaults if needed
if data_model_num == 2 and meas_file == 'measurements.txt':
meas_file = 'magic_measurements.txt'
if data_model_num == 2 and samp_file == 'samples.txt':
samp_file = 'er_samples.txt'
if data_model_num == 2 and age_file == 'ages.txt':
age_file = 'er_ages.txt'
if data_model_num == 2 and depth_scale == "core_depth":
depth_scale = "sample_core_depth"
# initialize MagIC 3.0 vs 2.5 column names
loc_col_name = "location" if data_model_num == 3 else "er_location_name"
site_col_name = "site" if data_model_num == 3 else "er_site_name"
samp_col_name = "sample" if data_model_num == 3 else "er_sample_name"
spec_col_name = "specimen" if data_model_num == 3 else "er_specimen_name"
meth_col_name = "method_codes" if data_model_num == 3 else "magic_method_codes"
spec_dec_col_name = "dir_dec" if data_model_num == 3 else "specimen_dec"
spec_inc_col_name = "dir_inc" if data_model_num == 3 else "specimen_inc"
avg_weight_col_name = "weight" if data_model_num == 3 else "average_weight"
spec_weight_col_name = "weight" if data_model_num == 3 else "specimen_weight"
age_col_name = "age" if data_model_num == 3 else "average_age"
height_col_name = "height" if data_model_num == 3 else "average_height"
average_dec_col_name = "dir_dec" if data_model_num == 3 else "average_dec"
average_inc_col_name = "dir_inc" if data_model_num == 3 else "average_inc"
# initialize other variables
width = 10
Ssym, Ssize = 'cs', 5
pcol = 3
pel = 3
maxInt = -1000
minInt = 1e10
maxSuc = -1000
minSuc = 10000
main_plot = None
if size:
size = int(size)
if spc_size:
spc_size = int(spc_size)
title = ""
if location:title=location
# file formats not supported for the moment
ngr_file = "" # nothing needed, not implemented fully in original script
suc_file = "" # nothing else needed, also was not implemented in original script
res_file = "" # need also res_sym, res_size
wig_file = "" # if wig_file: pcol+=1; width+=2
# which plots to make
if not pltDec:
pcol -= 1
pel -= 1
width -= 2
if not pltInc:
pcol -= 1
pel -= 1
width -= 2
if not pltMag:
pcol -= 1
pel -= 1
width -= 2
# method and step
if not step or meth == 'LT-NO':
step = 0
method = 'LT-NO'
elif meth == "AF":
step = round(float(step) * 1e-3, 6)
method = 'LT-AF-Z'
elif meth == 'T':
step = round(float(step) + 273, 6)
method = 'LT-T-Z'
elif meth == 'ARM':
method = 'LT-AF-I'
step = round(float(step) * 1e-3, 6)
elif meth == 'IRM':
method = 'LT-IRM'
step = round(float(step) * 1e-3, 6)
# not supporting susceptibility at the moment LJ
elif meth == 'X':
method = 'LP-X'
pcol += 1
ind = sys.argv.index('-LP')
if sys.argv[ind+2] == 'mass':
if data_model_num != 3:
suc_key = 'measurement_chi_mass'
else:
suc_key = 'susc_chi_mass'
elif sys.argv[ind+2] == 'vol':
if data_model_num != 3:
suc_key = 'measurement_chi_volume'
else:
suc_key = 'susc_chi_volume'
else:
print('error in susceptibility units')
return False, 'error in susceptibility units'
else:
print('method: {} not supported'.format(meth))
return False, 'method: "{}" not supported'.format(meth)
if wt_file:
norm = True
if dmin and dmax:
dmin, dmax = float(dmin), float(dmax)
else:
dmin, dmax = -1, -1
if pltTime:
amin = float(amin)
amax = float(amax)
pcol += 1
width += 2
if not (amax and timescale):
return False, "To plot time, you must provide amin, amax, and timescale"
#
#
# read in 3.0 data and translate to 2.5
if meas_file:
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
if spc_file:
spc_file = pmag.resolve_file_name(spc_file, input_dir_path)
if samp_file:
samp_file = pmag.resolve_file_name(samp_file, input_dir_path)
if age_file:
age_file = pmag.resolve_file_name(age_file, input_dir_path)
if data_model_num == 3:
fnames = {'specimens': spc_file, 'samples': samp_file,
'ages': age_file, 'measurements': meas_file}
fnames = {k: v for (k, v) in fnames.items() if v}
con = cb.Contribution(input_dir_path, custom_filenames=fnames)
for dtype in ['measurements', 'specimens']:
if dtype not in con.tables:
print(
'-E- You must have a {} file in your input directory ({}) to run core_depthplot'.format(dtype, input_dir_path))
print(' If needed, you can specify your input directory on the command line with "core_depthplot.py -ID dirname ... "')
print(' Or with ipmag.core_depthplot(input_dir_path=dirname, ...)')
# return False, '-E- You must have a {} file in your input directory ({}) to run core_depthplot'.format(dtype, input_dir_path)
# propagate data to measurements
con.propagate_name_down('sample', 'measurements')
con.propagate_name_down('site', 'measurements')
# propagate depth info from sites --> samples
con.propagate_cols(
['core_depth', 'composite_depth'], 'samples', 'sites')
# propagate depth info from samples-> specimens
con.propagate_cols(
['core_depth', 'composite_depth'], 'specimens', 'samples')
if age_file == "":
# get sample data straight from the contribution
Samps = []
if 'samples' in con.tables:
Samps = con.tables['samples'].convert_to_pmag_data_list()
else:
depth_scale = 'age'
Samps = []
# get age data from contribution
if 'ages' in con.tables:
# we need to get sample in here
# this doesn't do the trick by itself
con.propagate_ages()
con.propagate_cols(['age', 'age_unit'], 'samples', 'sites')
Samps = con.tables['samples'].convert_to_pmag_data_list()
age_unit = ""
if spc_file:
Specs3 = []
# get specimen data from contribution
Specs = []
if 'specimens' in con.tables:
Specs = con.tables['specimens'].convert_to_pmag_data_list()
if res_file:
warn = '-W- result file option is not currently available for MagIC data model 3'
print(warn)
return False, warn
#Results, file_type = pmag.magic_read(res_file)
if norm:
#warn = '-W- norm option is not currently available for MagIC data model 3'
# print(warn)
# return False, warn
Specs3, file_type = pmag.magic_read(wt_file)
# translate specimen records to 2.5
ErSpecs = []
# for spec in Specs3:
# ErSpecs.append(map_magic.mapping(spec, spec_magic3_2_magic2_map))
ErSpecs = Specs3
print(len(ErSpecs), ' specimens read in from ', wt_file)
if not os.path.isfile(spc_file):
if not os.path.isfile(meas_file):
return False, "You must provide either a magic_measurements file or a pmag_specimens file"
if not age_file and not samp_file:
print('-W- You must provide either an age file or a sample file')
return False, '-W- You must provide either an age file or a sample file'
# read in 2.5 data
elif data_model_num == 2:
if age_file == "":
if samp_file:
samp_file = os.path.join(input_dir_path, samp_file)
Samps, file_type = pmag.magic_read(samp_file)
else:
depth_scale = 'age'
if age_file:
age_file = os.path.join(input_dir_path, age_file)
Samps, file_type = pmag.magic_read(age_file)
age_unit = ""
if spc_file:
Specs, file_type = pmag.magic_read(spc_file)
if res_file:
Results, file_type = pmag.magic_read(res_file)
if norm:
ErSpecs, file_type = pmag.magic_read(wt_file)
print(len(ErSpecs), ' specimens read in from ', wt_file)
if not os.path.isfile(spc_file):
if not os.path.isfile(meas_file):
return False, "You must provide either a magic_measurements file or a pmag_specimens file"
else:
return False, "Invalid data model number: {}".format(str(data_model_num))
Cores = []
core_depth_key = "Top depth cored CSF (m)"
if sum_file:
# os.path.join(input_dir_path, sum_file)
sum_file = pmag.resolve_file_name(sum_file, input_dir_path)
with open(sum_file, 'r') as fin:
indat = fin.readlines()
if "Core Summary" in indat[0]:
headline = 1
else:
headline = 0
keys = indat[headline].replace('\n', '').split(',')
if "Core Top (m)" in keys:
core_depth_key = "Core Top (m)"
if "Top depth cored CSF (m)" in keys:
core_dpeth_key = "Top depth cored CSF (m)"
if "Core Label" in keys:
core_label_key = "Core Label"
if "Core label" in keys:
core_label_key = "Core label"
for line in indat[2:]:
if 'TOTALS' not in line:
CoreRec = {}
for k in range(len(keys)):
CoreRec[keys[k]] = line.split(',')[k]
Cores.append(CoreRec)
if len(Cores) == 0:
print('no Core depth information available: import core summary file')
sum_file = ""
Data = []
if 'core_depth' in depth_scale or depth_scale == 'mbsf':
ylab = "Depth (mbsf)"
depth_scale = 'core_depth'
elif depth_scale == 'age':
ylab = "Age"
elif 'composite_depth' in depth_scale or depth_scale == 'mcd':
ylab = "Depth (mcd)"
depth_scale = 'composite_depth'
else:
print('Warning: You have provided unsupported depth scale: {}.\nUsing default (mbsf) instead.'.format(
depth_scale))
depth_scale = 'core_depth'
ylab = "Depth (mbsf)"
# fix depth scale for data model 2 if needed
if data_model_num == 2 and not depth_scale.startswith('sample_'):
if depth_scale != "age":
depth_scale = "sample_" + depth_scale
# collect the data for plotting declination
Depths, Decs, Incs, Ints = [], [], [], []
SDepths, SDecs, SIncs, SInts = [], [], [], []
SSucs = []
samples = []
methods, steps, m2 = [], [], []
if os.path.isfile(meas_file): # plot the bulk measurement data
if data_model_num == 3:
Meas = []
if 'measurements' in con.tables:
Meas = con.tables['measurements'].convert_to_pmag_data_list()
# has measurement_magn_mass ....
dec_key, inc_key = 'dir_dec', 'dir_inc'
meth_key, temp_key, ac_key, dc_key = 'method_codes', 'treat_temp', 'treat_ac_field', 'treat_dc_field'
intlist = ['magnitude', 'magn_moment',
'magn_volume', 'magn_mass']
meas_key = "magn_moment"
elif data_model_num == 2:
intlist = ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
temp_key, ac_key, dc_key = 'treatment_temp', 'treatment_ac_field', 'treatment_dc_field'
dec_key, inc_key = 'measurement_dec', 'measurement_inc'
Meas, file_type = pmag.magic_read(meas_file)
meas_key = 'measurement_magn_moment'
#
print(len(Meas), ' measurements read in from ', meas_file)
#
for m in intlist: # find the intensity key with data
# get all non-blank data for this specimen
meas_data = pmag.get_dictitem(Meas, m, '', 'F')
if len(meas_data) > 0:
print('using intensity key:', m)
meas_key = m
break
# fish out the desired method code
m1 = pmag.get_dictitem(Meas, meth_col_name, method, 'has')
if method == 'LT-T-Z':
m2 = pmag.get_dictitem(m1, temp_key, str(
step), 'eval') # fish out the desired step
elif 'LT-AF' in method:
m2 = pmag.get_dictitem(m1, ac_key, str(step), 'eval')
elif 'LT-IRM' in method:
m2 = pmag.get_dictitem(m1, dc_key, str(step), 'eval')
elif 'LP-X' in method:
m2 = pmag.get_dictitem(m1, suc_key, '', 'F')
if len(m2) > 0:
for rec in m2: # fish out depths and weights
D = pmag.get_dictitem(
Samps, samp_col_name, rec[samp_col_name], 'T')
if not D: # if using an age_file, you may need to sort by site
D = pmag.get_dictitem(
Samps, site_col_name, rec[site_col_name], 'T')
depth = pmag.get_dictitem(D, depth_scale, '', 'F')
if len(depth) > 0:
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + depth[0]['age_unit'] + ')'
rec[depth_scale] = float(depth[0][depth_scale])
rec[meth_col_name] = rec[meth_col_name] + \
':' + depth[0][meth_col_name]
if norm:
specrecs = pmag.get_dictitem(
ErSpecs, spec_col_name, rec[spec_col_name], 'T')
specwts = pmag.get_dictitem(
specrecs, spec_weight_col_name, "", 'F')
if len(specwts) > 0:
rec[weight_col_name] = specwts[0][spec_weight_col_name]
# fish out data with core_depth and (if needed)
# weights
Data.append(rec)
else:
# fish out data with core_depth and (if needed) weights
Data.append(rec)
if title == "":
pieces = rec[samp_col_name].split('-')
location = rec.get(loc_col_name, '')
title = location
SData = pmag.sort_diclist(Data, depth_scale)
for rec in SData: # fish out bulk measurement data from desired depths
if dmax == -1 or float(rec[depth_scale]) < dmax and float(rec[depth_scale]) > dmin:
Depths.append((rec[depth_scale]))
if method == "LP-X":
SSucs.append(float(rec[suc_key]))
else:
if pltDec:
Decs.append(float(rec[dec_key]))
if pltInc:
Incs.append(float(rec[inc_key]))
if not norm and pltMag:
Ints.append(float(rec[meas_key]))
if norm and pltMag:
Ints.append(
float(rec[meas_key]) / float(rec[spec_weight_col_name]))
if len(SSucs) > 0:
maxSuc = max(SSucs)
minSuc = min(SSucs)
if len(Ints) > 1:
maxInt = max(Ints)
minInt = min(Ints)
if len(Depths) == 0:
print('no bulk measurement data matched your request')
else:
print(len(Depths), "depths found")
SpecDepths, SpecDecs, SpecIncs = [], [], []
FDepths, FDecs, FIncs = [], [], []
if spc_file: # add depths to spec data
# get all the discrete data with best fit lines
BFLs = pmag.get_dictitem(Specs, meth_col_name, 'DE-BFL', 'has')
for spec in BFLs:
if location == "":
location = spec.get(loc_col_name, "")
samp = pmag.get_dictitem(
Samps, samp_col_name, spec[samp_col_name], 'T')
if len(samp) > 0 and depth_scale in list(samp[0].keys()) and samp[0][depth_scale] != "":
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + samp[0]['age_unit'] + ')'
# filter for depth
if dmax == -1 or float(samp[0][depth_scale]) < dmax and float(samp[0][depth_scale]) > dmin:
# fish out data with core_depth
SpecDepths.append(float(samp[0][depth_scale]))
# fish out data with core_depth
SpecDecs.append(float(spec[spec_dec_col_name]))
# fish out data with core_depth
SpecIncs.append(float(spec[spec_inc_col_name]))
else:
print('no core_depth found for: ', spec[spec_col_name])
# get all the discrete data with best fit lines
FMs = pmag.get_dictitem(Specs, meth_col_name, 'DE-FM', 'has')
for spec in FMs:
if location == "":
location = spec.get(loc_col_name, "")
samp = pmag.get_dictitem(
Samps, samp_col_name, spec[samp_col_name], 'T')
if len(samp) > 0 and depth_scale in list(samp[0].keys()) and samp[0][depth_scale] != "":
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + samp[0]['age_unit'] + ')'
# filter for depth
if dmax == -1 or float(samp[0][depth_scale]) < dmax and float(samp[0][depth_scale]) > dmin:
# fish out data with core_depth
FDepths.append(float(samp[0][depth_scale]))
# fish out data with core_depth
FDecs.append(float(spec[spec_dec_col]))
# fish out data with core_depth
FIncs.append(float(spec[spec_inc_col]))
else:
print('no core_depth found for: ', spec[spec_col_name])
ResDepths, ResDecs, ResIncs = [], [], []
if 'age' in depth_scale: # set y-key
res_scale = age_col_name
else:
res_scale = height_col_name
if res_file: # creates lists of Result Data
for res in Results:
meths = res[meth_col_name].split(":")
if 'DE-FM' in meths:
# filter for depth
if dmax == -1 or float(res[res_scale]) < dmax and float(res[res_scale]) > dmin:
# fish out data with core_depth
ResDepths.append(float(res[res_scale]))
# fish out data with core_depth
ResDecs.append(float(res['average_dec']))
# fish out data with core_depth
ResIncs.append(float(res['average_inc']))
Susc, Sus_depths = [], []
if dmin == -1:
if len(Depths) > 0:
dmin, dmax = Depths[0], Depths[-1]
if len(FDepths) > 0:
dmin, dmax = FDepths[0], FDepths[-1]
if pltSus and len(SDepths) > 0:
if SDepths[0] < dmin:
dmin = SDepths[0]
if SDepths[-1] > dmax:
dmax = SDepths[-1]
if len(SpecDepths) > 0:
if min(SpecDepths) < dmin:
dmin = min(SpecDepths)
if max(SpecDepths) > dmax:
dmax = max(SpecDepths)
if len(ResDepths) > 0:
if min(ResDepths) < dmin:
dmin = min(ResDepths)
if max(ResDepths) > dmax:
dmax = max(ResDepths)
# wig_file and suc_file not currently supported options
# if suc_file:
# with open(suc_file, 'r') as s_file:
# sucdat = s_file.readlines()
# keys = sucdat[0].replace('\n', '').split(',') # splits on underscores
# for line in sucdat[1:]:
# SucRec = {}
# for k in range(len(keys)):
# SucRec[keys[k]] = line.split(',')[k]
# if float(SucRec['Top Depth (m)']) < dmax and float(SucRec['Top Depth (m)']) > dmin and SucRec['Magnetic Susceptibility (80 mm)'] != "":
# Susc.append(float(SucRec['Magnetic Susceptibility (80 mm)']))
# if Susc[-1] > maxSuc:
# maxSuc = Susc[-1]
# if Susc[-1] < minSuc:
# minSuc = Susc[-1]
# Sus_depths.append(float(SucRec['Top Depth (m)']))
#WIG, WIG_depths = [], []
# if wig_file:
# wigdat, file_type = pmag.magic_read(wig_file)
# swigdat = pmag.sort_diclist(wigdat, depth_scale)
# keys = list(wigdat[0].keys())
# for key in keys:
# if key != depth_scale:
# plt_key = key
# break
# for wig in swigdat:
# if float(wig[depth_scale]) < dmax and float(wig[depth_scale]) > dmin:
# WIG.append(float(wig[plt_key]))
# WIG_depths.append(float(wig[depth_scale]))
tint = 4.5
plot = 1
#print('Decs', len(Decs))
#print('Depths', len(Depths), 'SpecDecs', len(SpecDecs))
#print('SpecDepths', len(SpecDepths), 'ResDecs', len(ResDecs))
#print('ResDepths', len(ResDepths), 'SDecs', len(SDecs))
#print('SDepths', len(SDepths), 'SIincs', len(SIncs))
#print('Incs', len(Incs))
if (Decs and Depths) or (SpecDecs and SpecDepths) or (ResDecs and ResDepths) or (SDecs and SDepths) or (SInts and SDepths) or (SIncs and SDepths) or (Incs and Depths):
main_plot = plt.figure(1, figsize=(width, 8)) # this works
# pylab.figure(1,figsize=(width,8))
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num)
if pltDec:
ax = plt.subplot(1, pcol, plot)
if pltLine:
plt.plot(Decs, Depths, 'k')
if len(Decs) > 0:
plt.plot(Decs, Depths, sym, markersize=size)
if len(Decs) == 0 and pltLine and len(SDecs) > 0:
plt.plot(SDecs, SDepths, 'k')
if len(SDecs) > 0:
plt.plot(SDecs, SDepths, Ssym, markersize=Ssize)
if spc_file:
plt.plot(SpecDecs, SpecDepths, spc_sym, markersize=spc_size)
if spc_file and len(FDepths) > 0:
plt.scatter(
FDecs, FDepths, marker=spc_sym[-1], edgecolor=spc_sym[0], facecolor='white', s=spc_size**2)
if res_file:
plt.plot(ResDecs, ResDepths, res_sym, markersize=res_size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 360.], [depth, depth], 'b--')
if pel == plt:
plt.text(360, depth + tint, core[core_label_key])
if pel == plot:
plt.axis([0, 400, dmax, dmin])
else:
plt.axis([0, 360., dmax, dmin])
plt.xlabel('Declination')
plt.ylabel(ylab)
plot += 1
pmagplotlib.delticks(ax) # dec xticks are too crowded otherwise
else:
print('no data!')
return False, 'No data found to plot\nTry again with different parameters'
if pltInc:
plt.subplot(1, pcol, plot)
if pltLine:
plt.plot(Incs, Depths, 'k')
if len(Incs) > 0:
plt.plot(Incs, Depths, sym, markersize=size)
if len(Incs) == 0 and pltLine and len(SIncs) > 0:
plt.plot(SIncs, SDepths, 'k')
if len(SIncs) > 0:
plt.plot(SIncs, SDepths, Ssym, markersize=Ssize)
if spc_file and len(SpecDepths) > 0:
plt.plot(SpecIncs, SpecDepths, spc_sym, markersize=spc_size)
if spc_file and len(FDepths) > 0:
plt.scatter(
FIncs, FDepths, marker=spc_sym[-1], edgecolor=spc_sym[0], facecolor='white', s=spc_size**2)
if res_file:
plt.plot(ResIncs, ResDepths, res_sym, markersize=res_size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
if pel == plot:
plt.text(90, depth + tint, core[core_label_key])
plt.plot([-90, 90], [depth, depth], 'b--')
plt.plot([0, 0], [dmax, dmin], 'k-')
if pel == plot:
plt.axis([-90, 110, dmax, dmin])
else:
plt.axis([-90, 90, dmax, dmin])
plt.xlabel('Inclination')
plt.ylabel('')
plot += 1
if pltMag and len(Ints) > 0 or len(SInts) > 0:
plt.subplot(1, pcol, plot)
for pow in range(-10, 10):
if maxInt * 10**pow > 1:
break
if not logit:
for k in range(len(Ints)):
Ints[k] = Ints[k] * 10**pow
for k in range(len(SInts)):
SInts[k] = SInts[k] * 10**pow
if pltLine and len(Ints) > 0:
plt.plot(Ints, Depths, 'k')
if len(Ints) > 0:
plt.plot(Ints, Depths, sym, markersize=size)
if len(Ints) == 0 and pltLine and len(SInts) > 0:
plt.plot(SInts, SDepths, 'k-')
if len(SInts) > 0:
plt.plot(SInts, SDepths, Ssym, markersize=Ssize)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
plt.plot([0, maxInt * 10**pow + .1], [depth, depth], 'b--')
if depth > dmin and depth < dmax:
plt.text(maxInt * 10**pow - .2 * maxInt * 10 **
pow, depth + tint, core[core_label_key])
plt.axis([0, maxInt * 10**pow + .1, dmax, dmin])
if not norm:
plt.xlabel('%s %i %s' % ('Intensity (10^-', pow, ' Am^2)'))
else:
plt.xlabel('%s %i %s' % ('Intensity (10^-', pow, ' Am^2/kg)'))
else:
if pltLine:
plt.semilogx(Ints, Depths, 'k')
if len(Ints) > 0:
plt.semilogx(Ints, Depths, sym, markersize=size)
if len(Ints) == 0 and pltLine and len(SInts) > 0:
plt.semilogx(SInts, SDepths, 'k')
if len(Ints) == 0 and pltLine == 1 and len(SInts) > 0:
plt.semilogx(SInts, SDepths, 'k')
if len(SInts) > 0:
plt.semilogx(SInts, SDepths, Ssym, markersize=Ssize)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
plt.semilogx([minInt, maxInt], [depth, depth], 'b--')
if depth > dmin and depth < dmax:
plt.text(maxInt - .2 * maxInt, depth +
tint, core[core_label_key])
minInt = plt.axis()[0]
plt.axis([minInt, maxInt, dmax, dmin])
if not norm:
plt.xlabel('Intensity (Am^2)')
else:
plt.xlabel('Intensity (Am^2/kg)')
plot += 1
if suc_file or len(SSucs) > 0:
plt.subplot(1, pcol, plot)
if len(Susc) > 0:
if pltLine:
plt.plot(Susc, Sus_depths, 'k')
if not logit:
plt.plot(Susc, Sus_depths, sym, markersize=size)
if logit:
plt.semilogx(Susc, Sus_depths, sym, markersize=size)
if len(SSucs) > 0:
if not logit:
plt.plot(SSucs, SDepths, sym, markersize=size)
if logit:
plt.semilogx(SSucs, SDepths, sym, markersize=size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if not logit:
plt.plot([minSuc, maxSuc], [depth, depth], 'b--')
if logit:
plt.semilogx([minSuc, maxSuc], [depth, depth], 'b--')
plt.axis([minSuc, maxSuc, dmax, dmin])
plt.xlabel('Susceptibility')
plot += 1
# if wig_file:
# plt.subplot(1, pcol, plot)
# plt.plot(WIG, WIG_depths, 'k')
# if sum_file:
# for core in Cores:
# depth = float(core[core_depth_key])
# plt.plot([WIG[0], WIG[-1]], [depth, depth], 'b--')
# plt.axis([min(WIG), max(WIG), dmax, dmin])
# plt.xlabel(plt_key)
# plot += 1
if pltTime:
ax1 = plt.subplot(1, pcol, plot)
ax1.axis([-.25, 1.5, amax, amin])
plot += 1
TS, Chrons = pmag.get_ts(timescale)
X, Y, Y2 = [0, 1], [], []
cnt = 0
if amin < TS[1]: # in the Brunhes
Y = [amin, amin] # minimum age
Y1 = [TS[1], TS[1]] # age of the B/M boundary
# color in Brunhes, black
ax1.fill_between(X, Y, Y1, facecolor='black')
for d in TS[1:]:
pol = cnt % 2
cnt += 1
if d <= amax and d >= amin:
ind = TS.index(d)
Y = [TS[ind], TS[ind]]
Y1 = [TS[ind + 1], TS[ind + 1]]
if pol:
# fill in every other time
ax1.fill_between(X, Y, Y1, facecolor='black')
ax1.plot([0, 1, 1, 0, 0], [amin, amin, amax, amax, amin], 'k-')
ax2 = ax1.twinx()
plt.ylabel("Age (Ma): " + timescale)
for k in range(len(Chrons) - 1):
c = Chrons[k]
cnext = Chrons[k + 1]
d = cnext[1] - (cnext[1] - c[1])/3.0
if d >= amin and d < amax:
# make the Chron boundary tick
ax2.plot([1, 1.5], [c[1], c[1]], 'k-')
ax2.text(1.05, d, c[0])
ax2.axis([-.25, 1.5, amax, amin])
figname = location + '_m:_' + method + '_core-depthplot.' + fmt
plt.title(location)
return main_plot, figname
[docs]
def unpack_magic(infile=None, dir_path='.', input_dir_path='',
overwrite=False, print_progress=True,
data_model=3., separate_locs=False, txt="",excel=False):
"""
Wrapper function for ipmag.download_magic, to handle the unpacking of a MagIC contribution.
This function takes in a text file, typically downloaded from the MagIC database,
and then unpacks it into MagIC-formatted files. The name emphasizes the "unpacking"
nature of the operation over the "downloading" aspect.
Parameters:
infile : str, optional
Name of the MagIC-format file to unpack.
dir_path : str, optional
Directory path for output. Default is the current directory.
input_dir_path : str, optional
Path to the input file if different from dir_path. Default is dir_path.
overwrite : bool, optional
Whether to overwrite files in the current directory. Default is False.
print_progress : bool, optional
Whether to print progress messages. Default is True.
data_model : float, optional
Specifies the MagIC data model version, either 2.5 or 3. Default is 3.
separate_locs : bool, optional
If True, create separate directories for each location. Default is False.
txt : str, optional
Alternative to providing an infile, you can provide the file contents as a string.
Useful for directly downloading a MagIC file from EarthRef. Default is an empty string.
excel : bool, optional
If True, the input file is treated as an Excel spreadsheet. Default is False.
Returns:
bool
True if the unpacking operation is successful. False otherwise.
"""
return download_magic(infile, dir_path, input_dir_path,
overwrite, print_progress,
data_model, separate_locs, txt, excel)
[docs]
def download_magic(infile=None, dir_path='.', input_dir_path='',
overwrite=False, print_progress=True,
data_model=3., separate_locs=False, txt="",excel=False):
"""
Takes the name of a text file downloaded from the MagIC database and
unpacks it into MagIC-formatted files. by default, download_magic assumes
that you are doing everything in your current directory. if not, you may
provide optional arguments dir_path (where you want the results to go) and
input_dir_path (where the downloaded file is IF that location is different from
dir_path).
Parameters:
infile : str
MagIC-format file to unpack
dir_path : str
output directory (default ".")
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
overwrite: bool
overwrite current directory (default False)
print_progress: bool
verbose output (default True)
data_model : float
MagIC data model 2.5 or 3 (default 3)
separate_locs : bool
create a separate directory for each location (Location_*)
(default False)
txt : str, default ""
if infile is not provided, you may provide a string with file contents instead
(useful for downloading MagIC file directly from earthref)
excel : bool
input file is an excel spreadsheet (as downloaded from MagIC)
Returns:
bool
True if the unpacking operation is successful. False otherwise.
"""
if data_model == 2.5:
method_col = "magic_method_codes"
else:
method_col = "method_codes"
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
if infile:
infile = pmag.resolve_file_name(infile, input_dir_path)
if excel:
sheets=['contribution','locations','sites','samples','specimens',
'measurements','ages','criteria','images']
for sheet in sheets:
try:
table=pd.read_excel(infile,header=3,sheet_name=sheet)
table.fillna("",inplace=True)
table.drop(columns=['Column: '],inplace=True)
table_dicts=table.to_dict('records')
outfile = os.path.join(dir_path, sheet + '.txt')
pmag.magic_write(outfile,table_dicts,sheet)
except:
print ('sheet not found ',sheet)
return
# try to deal reasonably with unicode errors
try:
f = codecs.open(infile, 'r', "utf-8")
infile = f.readlines()
except UnicodeDecodeError:
f = codecs.open(infile, 'r', "Latin-1")
infile = f.readlines()
f.close()
else:
infile = txt.split("\n")
File = [] # will contain all non-blank lines from downloaded file
for line in infile:
line = line.replace('\n', '')
if line[0:4] == '>>>>' or len(line.strip()) > 0: # skip blank lines
File.append(line)
LN = 0 # tracks our progress iterating through File
type_list = []
filenum = 0
while LN < len(File) - 1:
line = File[LN]
if ">>>>" in line:
LN += 1
continue
file_type = line.split('\t')[1]
file_type = file_type.lower()
if file_type[-1] == "\n":
file_type = file_type[:-1]
if print_progress:
print('working on: ', repr(file_type))
if file_type not in type_list:
type_list.append(file_type)
else:
filenum += 1
LN += 1
line = File[LN]
# skip empty tables
if line == ">>>>>>>>>>":
LN += 1
continue
# detect MagIC compact format
magic_compact_title_lines = {}
while File[LN][0] == "*": # this means there are measurements made in MagIC compact format
this_title_line = File[LN].replace('\n', '').replace('*', '').split('\t')
magic_compact_title_lines[this_title_line[0]] = this_title_line[1]
LN += 1
# make key line detection specific
key_line = File[LN]
keys = key_line.replace('\n', '').split('\t')
if keys[0][0] == '.':
keys = line.replace('\n', '').replace('.', '').split('\t')
keys.append('RecNo') # cludge for new MagIC download format
Recs = []
LN += 1
while LN < len(File):
line = File[LN]
# finish up one file type and then break
if ">>>>" in line and len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress:
print(file_type, " data put in ", outfile)
Recs = []
LN += 1
break
# keep adding records of the same file type
else:
rec = line.split('\t')
Rec = {}
if len(magic_compact_title_lines)>0:
for title in list(magic_compact_title_lines.keys()):
Rec[title] = magic_compact_title_lines[title]
if len(rec) == len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
Recs.append(Rec)
# in case of magic_search_results.txt, which has an extra
# column:
elif len(rec) - len(keys) == 1:
for k in range(len(rec))[:-1]:
Rec[keys[k]] = rec[k]
Recs.append(Rec)
elif len(rec) < len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
for k in range(len(rec), len(keys)):
Rec[keys[k]] = ""
Recs.append(Rec)
else:
print('WARNING: problem in file with line: ')
print(line)
print('skipping....')
LN += 1
if len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress:
print(file_type, " data put in ", outfile)
# look through locations table and create separate directories for each
# location
if separate_locs:
con = cb.Contribution(dir_path)
con.propagate_location_to_measurements()
con.propagate_location_to_samples()
con.propagate_location_to_specimens()
for dtype in con.tables:
con.write_table_to_file(dtype)
locs, locnum = [], 1
if 'locations' in con.tables:
locs = list(con.tables['locations'].df.index.unique())
if len(locs) > 0: # at least one location
# go through unique location names
for loc_name in locs:
if print_progress:
print('\nlocation_' + str(locnum) + ": ", loc_name)
lpath = os.path.join(dir_path, 'Location_' + str(locnum))
locnum += 1
try:
os.mkdir(lpath)
except:
print('directory ', lpath,
' already exists - overwriting everything: {}'.format(overwrite))
if not overwrite:
print("-W- download_magic encountered a duplicate subdirectory ({}) and could not finish.\nRerun with overwrite=True, or unpack this file in a different directory.".format(lpath))
return False
for file_type in con.tables:
recs = con.tables[file_type].convert_to_pmag_data_list()
if print_progress:
print(len(recs), ' read in')
lrecs = pmag.get_dictitem(recs, 'location', loc_name, 'T')
if len(lrecs) > 0:
outfile_name = os.path.join(lpath, file_type + ".txt")
pmag.magic_write(outfile_name, lrecs, file_type)
if print_progress:
print(len(lrecs), ' stored in ', outfile_name)
return True
[docs]
def download_magic_from_id(magic_id, directory='.', share_key=""):
"""
Downloads a contribution from earthref.org/MagIC using the provided ID
and saves it to the specified directory. If the directory does not exist, it is created.
If a share_key is provided, it downloads a private contribution.
Parameters:
magic_id (str): Unique ID for a MagIC contribution.
directory (str): Path to save the file. Defaults to current directory.
share_key (str): Share key for downloading from Private Contribution; default is "" for public contribution.
Returns:
bool: True if successful, False otherwise.
str: Relative file path if successful or error message if failed.
"""
magic_id = str(magic_id) # Ensure magic_id is a string to avoid errors in API request
# Normalize the directory path to ensure consistent directory formatting
if not directory.endswith('/'):
directory += '/'
# Ensure the directory exists
if not os.path.exists(directory):
os.makedirs(directory)
# Format file path
file_name = f'magic_contribution_{magic_id}.txt'
out_path = os.path.join(directory, file_name)
# Define API endpoint
api = f'https://api.earthref.org/v1/MagIC/data'
params = {'id': magic_id, 'key': share_key} if share_key else {'id': magic_id}
# Perform the request
response = requests.get(api, params=params)
if response.status_code == 200 and response.text:
# Write the content to the file only if the request was successful and the content is not empty
with open(out_path, 'w') as file:
file.write(response.text)
print("Download successful. File saved to:", out_path)
return True, file_name
else:
# Handle different cases of failure
error_message = "Failed to download: " + (response.reason if response.status_code != 200 else "The file is empty.")
print("Download failed with error:", error_message)
return False, error_message
[docs]
def download_magic_from_doi(doi):
"""
Download a public contribution matching the provided DOI
from earthref.org/MagIC.
Parameters:
doi : str
DOI for a MagIC
Returns:
result : bool
message : str
Error message if download didn't succeed
"""
api = 'https://api.earthref.org/v1/MagIC/{}'
try:
response = requests.get(api.format('download'), params={'doi': doi, 'n_max_contributions': 1})
except urllib.error.HTTPError:
return False, "Looks like you didn't provide a valid DOI, please try again..."
except urllib.error.URLError:
return False, "Couldn't connect to MagIC site, please check your internet connection"
except requests.exceptions.ConnectionError:
return False, "Couldn't connect to MagIC site, please check your internet connection"
except Exception as ex:
print('Unexpected problem downloading from MagIC:', str(ex), type(ex))
return False, str(ex)
if (response.status_code == 200):
contribution_zip = zipfile.ZipFile(io.BytesIO(response.content))
for filename in contribution_zip.namelist():
if (re.match(r'^\d+\/magic_contribution_\d+\.txt', filename)):
contribution_text = io.TextIOWrapper(contribution_zip.open(filename)).read()
with open('magic_contribution.txt', 'wt') as fh:
fh.write(contribution_text)
print(filename, 'extracted to magic_contribution.txt', '\n')
return True, ""
elif (response.status_code == 204):
return False, 'Public contribution with a reference DOI = \'{}\' not found in MagIC'.format(doi)
else:
return False, 'Error:', response.json()['err'][0]['message'], '\n'
[docs]
def validate_with_public_endpoint(contribution_file,verbose=False):
"""
validate contribution to MagIC using public endpoint
Parameters
----------
contribution_file: str
file to validate
verbose : bool
if True, print error messages
Returns
---------
response: API requests.models.Response
response.status_code: bool
True : successful validation of private workspace
response['errors'] : None or 'trouble validating'
response['validation_results'] : dictionary of validation errors
response['warnings'] : list of warnings
"""
api = 'https://api.earthref.org/v1/MagIC/{}'
import json
validation_results={}
f=open(contribution_file,'rb')
response={}
response['status']=False
response['validation']=[]
response['warnings']=None
validation_response = requests.post(api.format('validate'),
headers={'Content-Type':'text/plain'},
data=f)
if verbose:
print('status_code=',validation_response.status_code)
if validation_response.status_code==200:
response['status']=True
validation_results = validation_response.json()['validation']
response['validation']=validation_results
if verbose:
for error in response['validation']['errors']:
print (error['message'],'in rows: ')
print (error['rows'])
elif validation_response.status_code == 500:
response['status'] = False
response['warnings'] = "Status code 500"
else:
response['warnings']=validation_results.json()['errors'][0]['message']
print ('unable to validate contribution')
return response
[docs]
def upload_magic2(concat=0, dir_path='.', data_model=None):
"""
Finds all magic files in a given directory, and compiles them into an
upload.txt file which can be uploaded into the MagIC database. Returns a
tuple of either: (False, error_message, errors) if there was a problem
creating/validating the upload file or: (filename, '', None) if the upload
was fully successful.
"""
SpecDone = []
locations = []
concat = int(concat)
files_list = ["er_expeditions.txt", "er_locations.txt", "er_samples.txt", "er_specimens.txt", "er_sites.txt", "er_ages.txt", "er_citations.txt", "er_mailinglist.txt", "magic_measurements.txt",
"rmag_hysteresis.txt", "rmag_anisotropy.txt", "rmag_remanence.txt", "rmag_results.txt", "pmag_specimens.txt", "pmag_samples.txt", "pmag_sites.txt", "pmag_results.txt", "pmag_criteria.txt", "magic_instruments.txt"]
file_names = [os.path.join(dir_path, f) for f in files_list]
# begin the upload process
up = os.path.join(dir_path, "upload.txt")
if os.path.exists(up):
os.remove(up)
RmKeys = ['citation_label', 'compilation', 'calculation_type', 'average_n_lines', 'average_n_planes',
'specimen_grade', 'site_vgp_lat', 'site_vgp_lon', 'direction_type', 'specimen_Z',
'magic_instrument_codes', 'cooling_rate_corr', 'cooling_rate_mcd', 'anisotropy_atrm_alt',
'anisotropy_apar_perc', 'anisotropy_F', 'anisotropy_F_crit', 'specimen_scat',
'specimen_gmax', 'specimen_frac', 'site_vadm', 'site_lon', 'site_vdm', 'site_lat',
'measurement_chi', 'specimen_k_prime', 'specimen_k_prime_sse', 'external_database_names',
'external_database_ids', 'Further Notes', 'Typology', 'Notes (Year/Area/Locus/Level)',
'Site', 'Object Number', 'dir_n_specimens']
print("-I- Removing: ", RmKeys)
CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction']
CheckSign = ['specimen_b_beta']
last = file_names[-1]
methods, first_file = [], 1
for File in file_names:
# read in the data
Data, file_type = pmag.magic_read(File)
if (file_type != "bad_file") and (file_type != "empty_file"):
print("-I- file", File, " successfully read in")
if len(RmKeys) > 0:
for rec in Data:
# remove unwanted keys
for key in RmKeys:
if key == 'specimen_Z' and key in list(rec.keys()):
# change # change this to lower case
rec[key] = 'specimen_z'
if key in list(rec.keys()):
del rec[key] # get rid of unwanted keys
# make sure b_beta is positive
# ignore blanks
if 'specimen_b_beta' in list(rec.keys()) and rec['specimen_b_beta'] != "":
if float(rec['specimen_b_beta']) < 0:
# make sure value is positive
rec['specimen_b_beta'] = str(
-float(rec['specimen_b_beta']))
print('-I- adjusted to positive: ',
'specimen_b_beta', rec['specimen_b_beta'])
# make all declinations/azimuths/longitudes in range
# 0=>360.
rec = pmag.adjust_all_to_360(rec)
if file_type == 'er_locations':
for rec in Data:
locations.append(rec['er_location_name'])
if file_type in ['pmag_samples', 'pmag_sites', 'pmag_specimens']:
# if there is NO pmag data for specimens (samples/sites),
# do not try to write it to file
# (this causes validation errors, elsewise)
ignore = True
for rec in Data:
if ignore == False:
break
keys = list(rec.keys())
exclude_keys = ['er_citation_names', 'er_site_name', 'er_sample_name',
'er_location_name', 'er_specimen_names', 'er_sample_names']
for key in exclude_keys:
if key in keys:
keys.remove(key)
for key in keys:
if rec[key]:
ignore = False
break
if ignore:
continue
if file_type == 'er_samples': # check to only upload top priority orientation record!
NewSamps, Done = [], []
for rec in Data:
if rec['er_sample_name'] not in Done:
orient, az_type = pmag.get_orient(
Data, rec['er_sample_name'])
NewSamps.append(orient)
Done.append(rec['er_sample_name'])
Data = NewSamps
print(
'only highest priority orientation record from er_samples.txt read in ')
if file_type == 'er_specimens': # only specimens that have sample names
NewData, SpecDone = [], []
for rec in Data:
if rec['er_sample_name'] in Done:
NewData.append(rec)
SpecDone.append(rec['er_specimen_name'])
else:
print('no valid sample record found for: ')
print(rec)
Data = NewData
# print 'only measurements that have specimen/sample info'
if file_type == 'magic_measurements': # only measurements that have specimen names
no_specs = []
NewData = []
for rec in Data:
if rec['er_specimen_name'] in SpecDone:
NewData.append(rec)
else:
print('no valid specimen record found for: ')
print(rec)
no_specs.append(rec)
# print set([record['er_specimen_name'] for record in
# no_specs])
Data = NewData
# write out the data
if len(Data) > 0:
if first_file == 1:
keystring = pmag.first_rec(up, Data[0], file_type)
first_file = 0
else:
keystring = pmag.first_up(up, Data[0], file_type)
for rec in Data:
# collect the method codes
if "magic_method_codes" in list(rec.keys()):
meths = rec["magic_method_codes"].split(':')
for meth in meths:
if meth.strip() not in methods:
if meth.strip() != "LP-DIR-":
methods.append(meth.strip())
try:
pmag.putout(up, keystring, rec)
except IOError:
print('-W- File input error: slowing down')
time.sleep(1)
pmag.putout(up, keystring, rec)
# write out the file separator
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
print(file_type, 'written to ', up)
else:
print('File:', File)
print(file_type, 'is bad or non-existent - skipping ')
# write out the methods table
first_rec, MethRec = 1, {}
for meth in methods:
MethRec["magic_method_code"] = meth
if first_rec == 1:
meth_keys = pmag.first_up(up, MethRec, "magic_methods")
first_rec = 0
try:
pmag.putout(up, meth_keys, MethRec)
except IOError:
print('-W- File input error: slowing down')
time.sleep(1)
pmag.putout(up, meth_keys, MethRec)
if concat == 1:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
if os.path.isfile(up):
from . import validate_upload2 as validate_upload
validated = False
validated, errors = validate_upload.read_upload(up, data_model)
else:
print("no data found, upload file not created")
return False, "no data found, upload file not created", None
# rename upload.txt according to location + timestamp
format_string = "%d.%b.%Y"
if locations:
location = locations[0].replace(' ', '_')
new_up = location + '_' + time.strftime(format_string) + '.txt'
else:
new_up = 'unknown_location_' + time.strftime(format_string) + '.txt'
new_up = os.path.join(dir_path, new_up)
if os.path.isfile(new_up):
fname, extension = os.path.splitext(new_up)
for i in range(1, 100):
if os.path.isfile(fname + "_" + str(i) + extension):
continue
else:
new_up = fname + "_" + str(i) + extension
break
os.rename(up, new_up)
print("Finished preparing upload file: {} ".format(new_up))
if not validated:
print("-W- validation of upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be published.".format(new_up))
return False, "Validation of your upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be published.".format(new_up), errors
return new_up, '', None
def upload_magic3(concat=1, dir_path='.', dmodel=None, vocab="", contribution=None):
print('-W- ipmag.upload_magic3 is deprecated, please switch to using ipmag.upload_magic')
return upload_magic(concat, dir_path, dmodel, vocab, contribution)
[docs]
def upload_magic(concat=False, dir_path='.',input_dir_path='.',validate=True,verbose=True):
"""
Finds all magic files in a given directory, and compiles them into an
upload.txt file which can be uploaded into the MagIC database.
If username/password set, then data will be uploaded to private workspace, otherwise
validation will be done on this computer.
Parameters
----------
concat : boolean where True means do concatenate to upload.txt file in dir_path,
False means write a new file (default is False)
dir_path : string for output directory (default ".")
input_dir_path : str, default "."
validate : boolean
validate upload file on MagIC's public endpoint
verbose : boolean
if True print progress and validation results
Returns
----------
tuple of either: True/False or (False, error_message, validation dictionary val_response['validation'])
if there was a problem creating/validating the upload file
or: (filename, '', None) if the file creation was fully successful.
"""
api = 'https://api.earthref.org/v1/MagIC/{}'
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
locations = []
concat = int(concat)
dtypes = ["locations", "samples", "specimens", "sites", "ages", "measurements",
"criteria","images"] # don't upload the contribution.txt file!
fnames = [os.path.join(input_dir_path, dtype + ".txt") for dtype in dtypes]
file_names = [fname for fname in fnames if os.path.exists(fname)]
first_file=True
if not file_names:
# if no contribution is provided and no contribution could be created,
# you are out of luck
print("-W- No 3.0 files found in your directory: {}, upload file not created".format(input_dir_path))
return False, "no 3.0 files found, upload file not created", None, None
last_file_type=file_names[-1]
# begin the upload file preparation
up = os.path.join(dir_path, "upload.txt")
if os.path.exists(up):
os.remove(up)
RmKeys = ('citation_label', 'compilation', 'calculation_type', 'average_n_lines', 'average_n_planes',
'specimen_grade', 'site_vgp_lat', 'site_vgp_lon', 'direction_type', 'specimen_Z',
'magic_instrument_codes', 'cooling_rate_corr', 'cooling_rate_mcd', 'anisotropy_atrm_alt',
'anisotropy_apar_perc', 'anisotropy_F', 'anisotropy_F_crit', 'specimen_scat',
'specimen_gmax', 'specimen_frac', 'site_vadm', 'site_lon', 'site_vdm', 'site_lat',
'measurement_chi', 'specimen_k_prime', 'specimen_k_prime_sse', 'external_database_names',
'external_database_ids', 'Further Notes', 'Typology', 'Notes (Year/Area/Locus/Level)',
'Site', 'Object Number', 'version', 'site_definition')
#print("-I- Removing: ", RmKeys)
extra_RmKeys = {'measurements': ['sample', 'site', 'location','treat_mw_energy'],
'specimens': ['site', 'location', 'age', 'age_unit', 'age_high',
'age_low', 'age_sigma', 'specimen_core_depth','result_type'],
'samples': ['location', 'age', 'age_unit', 'age_high', 'age_low',
'age_sigma', 'core_depth', 'composite_depth','result_type'],
'sites': ['texture', 'azimuth', 'azimuth_dec_correction', 'dip',
'orientation_quality', 'sample_alternatives', 'timestamp','result_type'],
'ages': ['level']}
dmodel = data_model.DataModel()
for file_type in file_names:
df = pd.read_csv(file_type,sep='\t',header=1)
df.fillna("",inplace=True)
if len(df):
print("-I- {} file successfully read in".format(file_type))
# make some adjustments to clean up data
# drop non MagIC keys
DropKeys = set(RmKeys).intersection(df.columns)
if len(DropKeys)>0:
print(
'-I- dropping these columns: {} from the {} table'.format(', '.join(DropKeys), file_type))
df.drop(DropKeys, axis=1, inplace=True)
ftype=file_type.split('/')[-1].split('.')[0]
if ftype in list(extra_RmKeys.keys()):
DropKeys = list(extra_RmKeys[ftype])
DropKeys = set(DropKeys).intersection(df.columns)
if len(DropKeys)>0:
print(
'-I- dropping these columns: {} from the {} table'.format(', '.join(DropKeys), file_type))
df.drop(DropKeys, axis=1, inplace=True)
n_cols=df.filter(like='_n',axis=1)
if 'lat_n' in n_cols.columns:
n_cols.drop(columns=['lat_n'],inplace=True) # oops: lat_n was also getting stripped of '0.' below!
other_int_cols=['contribution_id','pole_w_q','pole_bc_q','order','sequence','hyst_loop','treat_step_num']
for col in n_cols:
if col in df.columns:
df[col]=df[col].astype('str').str.strip(".0")
for col in other_int_cols:
if col in df.columns:
df[col]=df[col].astype('str').str.strip(".0")
# convert int_scat to True/False
if 'int_scat' in df.columns:
df.loc[df['int_scat']=='t','int_scat']='True'
df.loc[df['int_scat']=='f','int_scat']='False'
# make sure int_md is positive
if 'int_md' in df.columns:
try:
df['int_md'] = df['int_md'].replace('', np.nan)
df.loc[df['int_md'].astype('float')<0,'int_md']=np.nan
except Exception as ex:
print("-W-", ex)
# make sure int_b_beta is positive
if 'int_b_beta' in df.columns:
# get rid of empty strings
df = df.replace(r'\s+( +\.)|#', np.nan,
regex=True).replace('', np.nan)
try:
df['int_b_beta'] = df['int_b_beta'].astype(
float).apply(abs)
except ValueError:
"-W- Non numeric values found in int_b_beta column.\n Could not apply absolute value."
# make all declinations/azimuths/longitudes in range 0=>360.
relevant_cols = val_up3.get_degree_cols(df)
for col in relevant_cols:
df[col] = df[col].apply(pmag.adjust_val_to_360)
# get list of location names
if 'locations' in file_type:
locations = sorted(df['location'].unique())
# write out the data
if len(df):
df.replace('-9999','',inplace=True)
#for col in df.columns:
# df.loc[df[col]=='-9999',col]=""
ftype=file_type.split('/')[-1].split('.')[0] #
if first_file:
pmag.magic_write(up, df, ftype, dataframe=True)
first_file=False
else:
pmag.magic_write(up, df, ftype, dataframe=True, append=True)
# write out the file separator
if last_file_type != file_type:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
print("-I-", file_type, 'written to ', up)
else: # last file, no newline at end of file
print("-I-", file_type, 'written to ', up)
# if there was no understandable data
else:
print(file_type, 'is bad or non-existent - skipping ')
# add to existing file
if concat:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
if not os.path.isfile(up):
print("no data found, upload file not created")
return False, "no data found, upload file not created", None, None
# rename upload.txt according to location + timestamp
format_string = "%d.%b.%Y"
if locations:
locs = set(locations)
locs = sorted(locs)[:3]
#location = locations[0].replace(' ', '_')
try:
locs = [loc.replace(' ', '-') for loc in locs]
except AttributeError:
locs = ["unknown_location"]
location = "_".join(locs)
new_up = location + '_' + time.strftime(format_string) + '.txt'
else:
new_up = 'unknown_location_' + time.strftime(format_string) + '.txt'
new_up = os.path.join(dir_path, new_up)
if os.path.isfile(new_up):
fname, extension = os.path.splitext(new_up)
for i in range(1, 100):
if os.path.isfile(fname + "_" + str(i) + extension):
continue
else:
new_up = fname + "_" + str(i) + extension
break
if not up:
print("-W- Could not create an upload file")
return False, "Could not create an upload file", None, None
os.rename(up, new_up)
print("Finished preparing upload file: {} ".format(new_up))
val_response={}
if validate:
print ('Validating upload file with public endpoint')
try:
val_response=validate_with_public_endpoint(new_up,verbose=verbose)
except Exception as ex:
print("-E- Couldn't connect to MagIC for validation")
print(ex)
return False, "Could not create an upload file", None, None
return new_up, val_response, None, None
[docs]
def contribution_to_magic(contribution, dir_path='.'):
"""
Write a contribution object to MagIC-formatted files in the specified directory.
Compiles these files into a upload.txt file which can be uploaded into the
MagIC database using the upload_magic function.
Parameters
----------
contribution : Contribution
A contribution object containing tables to be written to a MagIC-formatted file.
dir_path : str
The directory path where the MagIC-formatted file will be written.
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
contribution.tables['specimens'].write_magic_file(dir_path=dir_path)
contribution.tables['samples'].write_magic_file(dir_path=dir_path)
contribution.tables['sites'].write_magic_file(dir_path=dir_path)
contribution.tables['locations'].write_magic_file(dir_path=dir_path)
contribution.tables['measurements'].write_magic_file(dir_path=dir_path)
upload_magic(dir_path=dir_path, input_dir_path=dir_path)
[docs]
def create_private_contribution(username="",password=""):
"""
Create a private contribution on earthref.org/MagIC.
Parameters:
username : str
personal username for MagIC
password : str
password for username
Returns:
**response** API requests.models.Response
response.status_code: bool
True : successful creation of private workspace
response['url'] : str
URL of request
response['method'] : str
'POST'
response['id'] : str
if successful, MagIC ID number created
response['errors'] : str
if unsuccessful, error message
"""
api = 'https://api.earthref.org/v1/MagIC/{}'
response={}
response['status_code']=False
response['errors']='Failed to contact database'
response['status_code']=False
response['method']='POST'
try:
create_response = requests.post(api.format('private'), auth=(username, password))
if create_response.status_code==201:
response['status_code']=True
response['errors']=None
response['method']='POST'
response['url']=create_response.request.url
response['id']=create_response.json()['id']
else:
response['status_code']=False
response['method']='POST'
response['url']=create_response.request.url
response['id']='None'
response['errors']=create_response.json()['errors'][0]['message']
except:
pass
return response
[docs]
def delete_private_contribution(contribution_id,username="",password=""):
"""
Delete a private contribution on earthref.org/MagIC.
Parameters:
contribution_id: int
ID of MagIC contribution to delete
username : str
personal username for MagIC
password : str
password for username
Returns:
**response** (API requests.models.Response)
response.status_code: bool
True : successful creation of private workspace
response['url'] : str
URL of request
response['method'] :
'DELETE'
response['id'] : str
if successful, MagIC ID contribution deleted
response['errors'] : str
if unsuccessful, error message
"""
api = 'https://api.earthref.org/v1/MagIC/{}'
response={}
response['status_code']=False
response['errors']='Failed to contact database'
response['method']='DELETE'
try:
delete_response = requests.delete(api.format('private'),
params={'id':contribution_id},
auth=(username, password))
if delete_response.status_code==200:
response['status_code']=True
response['url']=delete_response.request.url
response['id']=contribution_id
response['errors']='None'
else:
response['status_code']=False
response['url']=delete_response.request.url
response['id']='None'
response['errors']=delete_response.json()['errors'][0]['message']
except:
pass
return response
[docs]
def upload_to_private_contribution(contribution_id, upload_file,username="",password=""):
"""
Upload to a private contribution on earthref.org/MagIC.
Parameters
----------
contribution_id: int
ID of MagIC contribution to delete
upload_file: str
file to upload (complete path)
username : str
personal username for MagIC
password : str
password for username
Returns
---------
response: API requests.models.Response
response.status_code: bool
True : successful creation of private workspace
response['url'] : str
URL of request
response['method']='PUT'
response['errors'] : str
if unsuccessful, error message
"""
api = 'https://api.earthref.org/v1/MagIC/{}'
response={}
response['status_code']=False
response['errors']='Failed to contact database'
response['method']='PUT'
response['upload_file']=upload_file
try:
with open(upload_file, 'rb') as f:
upload_response = requests.put(api.format('private'),
params={'id':contribution_id},
auth=(username, password),
headers={'Content-Type': 'text/plain'},
data=f)
if upload_response.status_code==202:
response['status_code']=True
response['url']=upload_response.request.url
response['errors']='None'
else:
response['status_code']=False
response['url']=upload_response.request.url
#response['errors']=upload_response.json()['errors'][0]['message']
except:
print ('trouble uploading:')
print (upload_response.json()['errors'])
return response
[docs]
def validate_private_contribution(contribution_id,username="",password="",verbose=True):
"""
validate private contribution in MagIC
Parameters
----------
contribution_id: int
ID of MagIC contribution to delete
username : str
personal username for MagIC
password : str
password for username
verbose : bool
if True, print error messages
Returns
---------
response: API requests.models.Response
response.status_code: bool
True : successful validation of private workspace
response['url'] : str
URL of request
response['results'] : dictionary of validation results
response['method']='POST'
response['errors'] : str
if unsuccessful, error message
"""
api = 'https://api.earthref.org/v1/MagIC/{}'
import json
response={}
response['status_code']=False
response['errors']='Failed to contact database'
response['method']='POST'
response['contribution_id']=contribution_id
try:
create_response = requests.put(api.format('private/validate'),
params={'id':contribution_id},
auth=(username, password))
if create_response.status_code==200:
response['status_code']=True
response['url']=create_response.request.url
response['errors']='None'
errors_dict=json.loads(create_response.text)
response['validation_results']=errors_dict['validation']['errors']
if verbose:print('Validated contribution with ID', contribution_id, ':\n', response['validation_results'])
else:
response['status_code']=False
response['url']=create_response.request.url
response['errors']=create_response.json()['errors'][0]['message']
response['validation_results']='None'
print('A private contribution with ID', contribution_id,
' could not be found in your private workspace for validation\n')
except:
print ('trouble validating:')
return response
[docs]
def specimens_results_magic(infile='pmag_specimens.txt', measfile='magic_measurements.txt', sampfile='er_samples.txt', sitefile='er_sites.txt', agefile='er_ages.txt', specout='er_specimens.txt', sampout='pmag_samples.txt', siteout='pmag_sites.txt', resout='pmag_results.txt', critout='pmag_criteria.txt', instout='magic_instruments.txt', plotsites=False, fmt='svg', dir_path='.', cors=[], priorities=['DA-AC-ARM', 'DA-AC-TRM'], coord='g', user='', vgps_level='site', do_site_intensity=True, DefaultAge=["none"], avg_directions_by_sample=False, avg_intensities_by_sample=False, avg_all_components=False, avg_by_polarity=False, skip_directions=False, skip_intensities=False, use_sample_latitude=False, use_paleolatitude=False, use_criteria='default'):
"""
Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages.
@param -> infile: path from the WD to the pmag speciemns table
@param -> measfile: path from the WD to the magic measurement file
@param -> sampfile: path from the WD to the er sample file
@param -> sitefile: path from the WD to the er sites data file
@param -> agefile: path from the WD to the er ages data file
@param -> specout: path from the WD to the place to write the er specimens data file
@param -> sampout: path from the WD to the place to write the pmag samples data file
@param -> siteout: path from the WD to the place to write the pmag sites data file
@param -> resout: path from the WD to the place to write the pmag results data file
@param -> critout: path from the WD to the place to write the pmag criteria file
@param -> instout: path from th WD to the place to write the magic instruments file
@param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string
"""
# initialize some variables
plotsites = False # cannot use draw_figs from within ipmag
Comps = [] # list of components
version_num = pmag.get_version()
args = sys.argv
model_lat_file = ""
Dcrit, Icrit, nocrit = 0, 0, 0
corrections = []
nocorrection = ['DA-NL', 'DA-AC', 'DA-CR']
# do some data adjustments
for cor in cors:
nocorrection.remove('DA-' + cor)
corrections.append('DA-' + cor)
for p in priorities:
if not p.startswith('DA-AC-'):
p = 'DA-AC-' + p
# translate coord into coords
if coord == 's':
coords = ['-1']
if coord == 'g':
coords = ['0']
if coord == 't':
coords = ['100']
if coord == 'b':
coords = ['0', '100']
if vgps_level == 'sample':
vgps = 1 # save sample level VGPS/VADMs
else:
vgps = 0 # site level
if do_site_intensity:
nositeints = 0
else:
nositeints = 1
# chagne these all to True/False instead of 1/0
if not skip_intensities:
# set model lat and
if use_sample_latitude and use_paleolatitude:
print("you should set a paleolatitude file OR use present day lat - not both")
return False
elif use_sample_latitude:
get_model_lat = 1
elif use_paleolatitude:
get_model_lat = 2
try:
model_lat_file = dir_path + '/' + args[ind + 1]
get_model_lat = 2
mlat = open(model_lat_file, 'r')
ModelLats = []
for line in mlat.readlines():
ModelLat = {}
tmp = line.split()
ModelLat["er_site_name"] = tmp[0]
ModelLat["site_model_lat"] = tmp[1]
ModelLat["er_sample_name"] = tmp[0]
ModelLat["sample_lat"] = tmp[1]
ModelLats.append(ModelLat)
mlat.clos()
except:
print("use_paleolatitude option requires a valid paleolatitude file")
else:
get_model_lat = 0 # skips VADM calculation entirely
if plotsites and not skip_directions: # plot by site - set up plot window
EQ = {}
EQ['eqarea'] = 1
# define figure 1 as equal area projection
pmagplotlib.plot_init(EQ['eqarea'], 5, 5)
# I don't know why this has to be here, but otherwise the first plot
# never plots...
pmagplotlib.plot_net(EQ['eqarea'])
pmagplotlib.draw_figs(EQ)
infile = os.path.join(dir_path, infile)
measfile = os.path.join(dir_path, measfile)
instout = os.path.join(dir_path, instout)
sampfile = os.path.join(dir_path, sampfile)
sitefile = os.path.join(dir_path, sitefile)
agefile = os.path.join(dir_path, agefile)
specout = os.path.join(dir_path, specout)
sampout = os.path.join(dir_path, sampout)
siteout = os.path.join(dir_path, siteout)
resout = os.path.join(dir_path, resout)
critout = os.path.join(dir_path, critout)
if use_criteria == 'none':
Dcrit, Icrit, nocrit = 1, 1, 1 # no selection criteria
crit_data = pmag.default_criteria(nocrit)
elif use_criteria == 'default':
crit_data = pmag.default_criteria(nocrit) # use default criteria
elif use_criteria == 'existing':
crit_data, file_type = pmag.magic_read(
critout) # use pmag_criteria file
print("Acceptance criteria read in from ", critout)
accept = {}
for critrec in crit_data:
for key in list(critrec.keys()):
# need to migrate specimen_dang to specimen_int_dang for intensity
# data using old format
if 'IE-SPEC' in list(critrec.keys()) and 'specimen_dang' in list(critrec.keys()) and 'specimen_int_dang' not in list(critrec.keys()):
critrec['specimen_int_dang'] = critrec['specimen_dang']
del critrec['specimen_dang']
# need to get rid of ron shaars sample_int_sigma_uT
if 'sample_int_sigma_uT' in list(critrec.keys()):
critrec['sample_int_sigma'] = '%10.3e' % (
eval(critrec['sample_int_sigma_uT']) * 1e-6)
if key not in list(accept.keys()) and critrec[key] != '':
accept[key] = critrec[key]
if use_criteria == 'default':
pmag.magic_write(critout, [accept], 'pmag_criteria')
print("\n Pmag Criteria stored in ", critout, '\n')
# now we're done slow dancing
# read in site data - has the lats and lons
SiteNFO, file_type = pmag.magic_read(sitefile)
# read in site data - has the lats and lons
SampNFO, file_type = pmag.magic_read(sampfile)
# find all the sites with height info.
height_nfo = pmag.get_dictitem(SiteNFO, 'site_height', '', 'F')
if agefile:
AgeNFO, file_type = pmag.magic_read(
agefile) # read in the age information
# read in specimen interpretations
Data, file_type = pmag.magic_read(infile)
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
comment, orient = "", []
samples, sites = [], []
for rec in Data: # run through the data filling in missing keys and finding all components, coordinates available
# fill in missing fields, collect unique sample and site names
if 'er_sample_name' not in list(rec.keys()):
rec['er_sample_name'] = ""
elif rec['er_sample_name'] not in samples:
samples.append(rec['er_sample_name'])
if 'er_site_name' not in list(rec.keys()):
rec['er_site_name'] = ""
elif rec['er_site_name'] not in sites:
sites.append(rec['er_site_name'])
if 'specimen_int' not in list(rec.keys()):
rec['specimen_int'] = ''
if 'specimen_comp_name' not in list(rec.keys()) or rec['specimen_comp_name'] == "":
rec['specimen_comp_name'] = 'A'
if rec['specimen_comp_name'] not in Comps:
Comps.append(rec['specimen_comp_name'])
rec['specimen_tilt_correction'] = rec['specimen_tilt_correction'].strip(
'\n')
if "specimen_tilt_correction" not in list(rec.keys()):
rec["specimen_tilt_correction"] = "-1" # assume sample coordinates
if rec["specimen_tilt_correction"] not in orient:
# collect available coordinate systems
orient.append(rec["specimen_tilt_correction"])
if "specimen_direction_type" not in list(rec.keys()):
# assume direction is line - not plane
rec["specimen_direction_type"] = 'l'
if "specimen_dec" not in list(rec.keys()):
# if no declination, set direction type to blank
rec["specimen_direction_type"] = ''
if "specimen_n" not in list(rec.keys()):
rec["specimen_n"] = '' # put in n
if "specimen_alpha95" not in list(rec.keys()):
rec["specimen_alpha95"] = '' # put in alpha95
if "magic_method_codes" not in list(rec.keys()):
rec["magic_method_codes"] = ''
# start parsing data into SpecDirs, SpecPlanes, SpecInts
SpecInts, SpecDirs, SpecPlanes = [], [], []
samples.sort() # get sorted list of samples and sites
sites.sort()
if not skip_intensities: # don't skip intensities
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
if nocrit == 0: # use selection criteria
for rec in IntData: # do selection criteria
kill = pmag.grade(rec, accept, 'specimen_int')
if len(kill) == 0:
# intensity record to be included in sample, site
# calculations
SpecInts.append(rec)
else:
SpecInts = IntData[:] # take everything - no selection criteria
# check for required data adjustments
if len(corrections) > 0 and len(SpecInts) > 0:
for cor in corrections:
# only take specimens with the required corrections
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'has')
if len(nocorrection) > 0 and len(SpecInts) > 0:
for cor in nocorrection:
# exclude the corrections not specified for inclusion
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'not')
# take top priority specimen of its name in remaining specimens (only one
# per customer)
PrioritySpecInts = []
specimens = pmag.get_specs(SpecInts) # get list of uniq specimen names
for spec in specimens:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'er_specimen_name', spec, 'T')
if len(ThisSpecRecs) == 1:
PrioritySpecInts.append(ThisSpecRecs[0])
elif len(ThisSpecRecs) > 1: # more than one
prec = []
for p in priorities:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'magic_method_codes', p, 'has')
if len(ThisSpecRecs) > 0:
prec.append(ThisSpecRecs[0])
PrioritySpecInts.append(prec[0]) # take the best one
SpecInts = PrioritySpecInts # this has the first specimen record
if not skip_directions: # don't skip directions
# retrieve specimens with directed lines and planes
AllDirs = pmag.get_dictitem(Data, 'specimen_direction_type', '', 'F')
# get all specimens with specimen_n information
Ns = pmag.get_dictitem(AllDirs, 'specimen_n', '', 'F')
if nocrit != 1: # use selection criteria
for rec in Ns: # look through everything with specimen_n for "good" data
kill = pmag.grade(rec, accept, 'specimen_dir')
if len(kill) == 0: # nothing killed it
SpecDirs.append(rec)
else: # no criteria
SpecDirs = AllDirs[:] # take them all
# SpecDirs is now the list of all specimen directions (lines and planes)
# that pass muster
# list of all sample data and list of those that pass the DE-SAMP criteria
PmagSamps, SampDirs = [], []
PmagSites, PmagResults = [], [] # list of all site data and selected results
SampInts = []
for samp in samples: # run through the sample names
if avg_directions_by_sample: # average by sample if desired
# get all the directional data for this sample
SampDir = pmag.get_dictitem(SpecDirs, 'er_sample_name', samp, 'T')
if len(SampDir) > 0: # there are some directions
for coord in coords: # step through desired coordinate systems
# get all the directions for this sample
CoordDir = pmag.get_dictitem(
SampDir, 'specimen_tilt_correction', coord, 'T')
if len(CoordDir) > 0: # there are some with this coordinate system
if not avg_all_components: # look component by component
for comp in Comps:
# get all directions from this component
CompDir = pmag.get_dictitem(
CoordDir, 'specimen_comp_name', comp, 'T')
if len(CompDir) > 0: # there are some
# get a sample average from all specimens
PmagSampRec = pmag.lnpbykey(
CompDir, 'sample', 'specimen')
# decorate the sample record
PmagSampRec["er_location_name"] = CompDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CompDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if CompDir[0]['specimen_flag'] == 'g':
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_comp_name'] = comp
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['er_specimen_names'] = pmag.get_list(
CompDir, 'er_specimen_name') # get a list of the specimen names used
PmagSampRec['magic_method_codes'] = pmag.get_list(
CompDir, 'magic_method_codes') # get a list of the methods used
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
else:
kill = []
if len(kill) == 0:
SampDirs.append(PmagSampRec)
if vgps == 1: # if sample level VGP info desired, do that now
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
# print(PmagSampRec)
PmagSamps.append(PmagSampRec)
if avg_all_components: # average all components together basically same as above
PmagSampRec = pmag.lnpbykey(
CoordDir, 'sample', 'specimen')
PmagSampRec["er_location_name"] = CoordDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CoordDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if all(i['specimen_flag'] == 'g' for i in CoordDir):
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = ""
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['sample_comp_name'] = pmag.get_list(
CoordDir, 'specimen_comp_name') # get components used
PmagSampRec['er_specimen_names'] = pmag.get_list(
CoordDir, 'er_specimen_name') # get specimne names averaged
PmagSampRec['magic_method_codes'] = pmag.get_list(
CoordDir, 'magic_method_codes') # assemble method codes
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
if len(kill) == 0: # passes the mustard
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
else: # take everything
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if avg_intensities_by_sample: # average by sample if desired
# get all the intensity data for this sample
SampI = pmag.get_dictitem(SpecInts, 'er_sample_name', samp, 'T')
if len(SampI) > 0: # there are some
# get average intensity stuff
PmagSampRec = pmag.average_int(SampI, 'specimen', 'sample')
# decorate sample record
PmagSampRec["sample_description"] = "sample intensity"
PmagSampRec["sample_direction_type"] = ""
PmagSampRec['er_site_name'] = SampI[0]["er_site_name"]
PmagSampRec['er_sample_name'] = samp
PmagSampRec['er_location_name'] = SampI[0]["er_location_name"]
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['er_specimen_names'] = pmag.get_list(
SampI, 'er_specimen_name')
PmagSampRec['magic_method_codes'] = pmag.get_list(
SampI, 'magic_method_codes')
if nocrit != 1: # apply criteria!
kill = pmag.grade(PmagSampRec, accept, 'sample_int')
if len(kill) == 0:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
else:
PmagSampRec = {} # sample rejected
else: # no criteria
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
PmagSampRec['pmag_criteria_codes'] = ""
if vgps == 1 and get_model_lat != 0 and PmagSampRec != {}:
if get_model_lat == 1: # use sample latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, SampNFO)
# get rid of the model lat key
del(PmagResRec['model_lat'])
elif get_model_lat == 2: # use model latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, ModelLats)
if PmagResRec != {}:
PmagResRec['magic_method_codes'] = PmagResRec['magic_method_codes'] + ":IE-MLAT"
if PmagResRec != {}:
PmagResRec['er_specimen_names'] = PmagSampRec['er_specimen_names']
PmagResRec['er_sample_names'] = PmagSampRec['er_sample_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['average_int_sigma_perc'] = PmagSampRec['sample_int_sigma_perc']
PmagResRec['average_int_sigma'] = PmagSampRec['sample_int_sigma']
PmagResRec['average_int_n'] = PmagSampRec['sample_int_n']
PmagResRec['vadm_n'] = PmagSampRec['sample_int_n']
PmagResRec['data_type'] = 'i'
PmagResults.append(PmagResRec)
if len(PmagSamps) > 0:
# fill in missing keys from different types of records
TmpSamps, keylist = pmag.fillkeys(PmagSamps)
# save in sample output file
pmag.magic_write(sampout, TmpSamps, 'pmag_samples')
print(' sample averages written to ', sampout)
#
# create site averages from specimens or samples as specified
#
for site in sites:
for coord in coords:
if not avg_directions_by_sample:
key, dirlist = 'specimen', SpecDirs # if specimen averages at site level desired
if avg_directions_by_sample:
key, dirlist = 'sample', SampDirs # if sample averages at site level desired
# get all the sites with directions
tmp = pmag.get_dictitem(dirlist, 'er_site_name', site, 'T')
# use only the last coordinate if avg_all_components==False
tmp1 = pmag.get_dictitem(tmp, key + '_tilt_correction', coord, 'T')
# fish out site information (lat/lon, etc.)
sd = pmag.get_dictitem(SiteNFO, 'er_site_name', site, 'T')
if len(sd) > 0:
sitedat = sd[0]
if not avg_all_components: # do component wise averaging
for comp in Comps:
# get all components comp
siteD = pmag.get_dictitem(
tmp1, key + '_comp_name', comp, 'T')
# remove bad data from means
quality_siteD = []
# remove any records for which specimen_flag or sample_flag are 'b'
# assume 'g' if flag is not provided
for rec in siteD:
spec_quality = rec.get('specimen_flag', 'g')
samp_quality = rec.get('sample_flag', 'g')
if (spec_quality == 'g') and (samp_quality == 'g'):
quality_siteD.append(rec)
siteD = quality_siteD
if len(siteD) > 0: # there are some for this site and component name
# get an average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the site record
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if avg_directions_by_sample:
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
else:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
# determine the demagnetization code (DC3,4 or 5) for this site
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if plotsites:
print(PmagSiteRec['er_site_name'])
# plot and list the data
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else: # last component only
# get the last orientation system specified
siteD = tmp1[:]
if len(siteD) > 0: # there are some
# get the average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the record
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if not avg_directions_by_sample:
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if plotsites:
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else:
print('site information not found in er_sites for site, ',
site, ' site will be skipped')
for PmagSiteRec in PmagSites: # now decorate each dictionary some more, and calculate VGPs etc. for results table
PmagSiteRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagSiteRec['magic_software_packages'] = version_num
if agefile != "":
PmagSiteRec = pmag.get_age(
PmagSiteRec, "er_site_name", "site_inferred_", AgeNFO, DefaultAge)
PmagSiteRec['pmag_criteria_codes'] = 'ACCEPT'
if 'site_n_lines' in list(PmagSiteRec.keys()) and 'site_n_planes' in list(PmagSiteRec.keys()) and PmagSiteRec['site_n_lines'] != "" and PmagSiteRec['site_n_planes'] != "":
if int(PmagSiteRec["site_n_planes"]) > 0:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM-LP"
elif int(PmagSiteRec["site_n_lines"]) > 2:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM"
kill = pmag.grade(PmagSiteRec, accept, 'site_dir')
if len(kill) == 0:
PmagResRec = {} # set up dictionary for the pmag_results table entry
PmagResRec['data_type'] = 'i' # decorate it a bit
PmagResRec['magic_software_packages'] = version_num
PmagSiteRec['site_description'] = 'Site direction included in results table'
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
dec = float(PmagSiteRec["site_dec"])
inc = float(PmagSiteRec["site_inc"])
if 'site_alpha95' in list(PmagSiteRec.keys()) and PmagSiteRec['site_alpha95'] != "":
a95 = float(PmagSiteRec["site_alpha95"])
else:
a95 = 180.
sitedat = pmag.get_dictitem(SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')[
0] # fish out site information (lat/lon, etc.)
lat = float(sitedat['site_lat'])
lon = float(sitedat['site_lon'])
plon, plat, dp, dm = pmag.dia_vgp(
dec, inc, a95, lat, lon) # get the VGP for this site
if PmagSiteRec['site_tilt_correction'] == '-1':
C = ' (spec coord) '
if PmagSiteRec['site_tilt_correction'] == '0':
C = ' (geog. coord) '
if PmagSiteRec['site_tilt_correction'] == '100':
C = ' (strat. coord) '
PmagResRec["pmag_result_name"] = "VGP Site: " + \
PmagSiteRec["er_site_name"] # decorate some more
PmagResRec["result_description"] = "Site VGP, coord system = " + \
str(coord) + ' component: ' + comp
PmagResRec['er_site_names'] = PmagSiteRec['er_site_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['er_citation_names'] = 'This study'
PmagResRec['er_analyst_mail_names'] = user
PmagResRec["er_location_names"] = PmagSiteRec["er_location_name"]
if avg_directions_by_sample:
PmagResRec["er_sample_names"] = PmagSiteRec["er_sample_names"]
else:
PmagResRec["er_specimen_names"] = PmagSiteRec["er_specimen_names"]
PmagResRec["tilt_correction"] = PmagSiteRec['site_tilt_correction']
PmagResRec["pole_comp_name"] = PmagSiteRec['site_comp_name']
PmagResRec["average_dec"] = PmagSiteRec["site_dec"]
PmagResRec["average_inc"] = PmagSiteRec["site_inc"]
PmagResRec["average_alpha95"] = PmagSiteRec["site_alpha95"]
PmagResRec["average_n"] = PmagSiteRec["site_n"]
PmagResRec["average_n_lines"] = PmagSiteRec["site_n_lines"]
PmagResRec["average_n_planes"] = PmagSiteRec["site_n_planes"]
PmagResRec["vgp_n"] = PmagSiteRec["site_n"]
PmagResRec["average_k"] = PmagSiteRec["site_k"]
PmagResRec["average_r"] = PmagSiteRec["site_r"]
PmagResRec["average_lat"] = '%10.4f ' % (lat)
PmagResRec["average_lon"] = '%10.4f ' % (lon)
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagResRec["vgp_lat"] = '%7.1f ' % (plat)
PmagResRec["vgp_lon"] = '%7.1f ' % (plon)
PmagResRec["vgp_dp"] = '%7.1f ' % (dp)
PmagResRec["vgp_dm"] = '%7.1f ' % (dm)
PmagResRec["magic_method_codes"] = PmagSiteRec["magic_method_codes"]
if '0' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-GEO" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-GEO"
if '100' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-TILT" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-TILT"
PmagSiteRec['site_polarity'] = ""
if avg_by_polarity: # assign polarity based on angle of pole lat to spin axis - may want to re-think this sometime
angle = pmag.angle([0, 0], [0, (90 - plat)])
if angle <= 55.:
PmagSiteRec["site_polarity"] = 'n'
if angle > 55. and angle < 125.:
PmagSiteRec["site_polarity"] = 't'
if angle >= 125.:
PmagSiteRec["site_polarity"] = 'r'
PmagResults.append(PmagResRec)
if avg_by_polarity:
# find the tilt corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '100', 'T')
if len(crecs) < 2:
# if there aren't any, find the geographic corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '0', 'T')
if len(crecs) > 2: # if there are some,
comp = pmag.get_list(crecs, 'site_comp_name').split(':')[
0] # find the first component
# fish out all of the first component
crecs = pmag.get_dictitem(crecs, 'site_comp_name', comp, 'T')
precs = []
for rec in crecs:
precs.append({'dec': rec['site_dec'], 'inc': rec['site_inc'],
'name': rec['er_site_name'], 'loc': rec['er_location_name']})
# calculate average by polarity
polpars = pmag.fisher_by_pol(precs)
# hunt through all the modes (normal=A, reverse=B, all=ALL)
for mode in list(polpars.keys()):
PolRes = {}
PolRes['er_citation_names'] = 'This study'
PolRes["pmag_result_name"] = "Polarity Average: Polarity " + mode
PolRes["data_type"] = "a"
PolRes["average_dec"] = '%7.1f' % (polpars[mode]['dec'])
PolRes["average_inc"] = '%7.1f' % (polpars[mode]['inc'])
PolRes["average_n"] = '%i' % (polpars[mode]['n'])
PolRes["average_r"] = '%5.4f' % (polpars[mode]['r'])
PolRes["average_k"] = '%6.0f' % (polpars[mode]['k'])
PolRes["average_alpha95"] = '%7.1f' % (
polpars[mode]['alpha95'])
PolRes['er_site_names'] = polpars[mode]['sites']
PolRes['er_location_names'] = polpars[mode]['locs']
PolRes['magic_software_packages'] = version_num
PmagResults.append(PolRes)
if not skip_intensities and nositeints != 1:
for site in sites: # now do intensities for each site
if plotsites:
print(site)
if not avg_intensities_by_sample:
key, intlist = 'specimen', SpecInts # if using specimen level data
if avg_intensities_by_sample:
key, intlist = 'sample', PmagSamps # if using sample level data
# get all the intensities for this site
Ints = pmag.get_dictitem(intlist, 'er_site_name', site, 'T')
if len(Ints) > 0: # there are some
# get average intensity stuff for site table
PmagSiteRec = pmag.average_int(Ints, key, 'site')
# get average intensity stuff for results table
PmagResRec = pmag.average_int(Ints, key, 'average')
if plotsites: # if site by site examination requested - print this site out to the screen
for rec in Ints:
print(rec['er_' + key + '_name'], ' %7.1f' %
(1e6 * float(rec[key + '_int'])))
if len(Ints) > 1:
print('Average: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int'])), 'N: ', len(Ints))
print('Sigma: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int_sigma'])), 'Sigma %: ', PmagResRec['average_int_sigma_perc'])
input('Press any key to continue\n')
er_location_name = Ints[0]["er_location_name"]
# decorate the records
PmagSiteRec["er_location_name"] = er_location_name
PmagSiteRec["er_citation_names"] = "This study"
PmagResRec["er_location_names"] = er_location_name
PmagResRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagResRec["er_analyst_mail_names"] = user
PmagResRec["data_type"] = 'i'
if not avg_intensities_by_sample:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name') # list of all specimens used
PmagResRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name') # list of all samples used
PmagResRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name')
PmagSiteRec['er_site_name'] = site
PmagResRec['er_site_names'] = site
PmagSiteRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
PmagResRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
kill = pmag.grade(PmagSiteRec, accept, 'site_int')
if nocrit == 1 or len(kill) == 0:
b, sig = float(PmagResRec['average_int']), ""
if(PmagResRec['average_int_sigma']) != "":
sig = float(PmagResRec['average_int_sigma'])
# fish out site direction
sdir = pmag.get_dictitem(
PmagResults, 'er_site_names', site, 'T')
# get the VDM for this record using last average
# inclination (hope it is the right one!)
if len(sdir) > 0 and sdir[-1]['average_inc'] != "":
inc = float(sdir[0]['average_inc'])
# get magnetic latitude using dipole formula
mlat = pmag.magnetic_lat(inc)
# get VDM with magnetic latitude
PmagResRec["vdm"] = '%8.3e ' % (pmag.b_vdm(b, mlat))
PmagResRec["vdm_n"] = PmagResRec['average_int_n']
if 'average_int_sigma' in list(PmagResRec.keys()) and PmagResRec['average_int_sigma'] != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), mlat)
PmagResRec["vdm_sigma"] = '%8.3e ' % (vdm_sig)
else:
PmagResRec["vdm_sigma"] = ""
mlat = "" # define a model latitude
if get_model_lat == 1: # use present site latitude
mlats = pmag.get_dictitem(
SiteNFO, 'er_site_name', site, 'T')
if len(mlats) > 0:
mlat = mlats[0]['site_lat']
# use a model latitude from some plate reconstruction model
# (or something)
elif get_model_lat == 2:
mlats = pmag.get_dictitem(
ModelLats, 'er_site_name', site, 'T')
if len(mlats) > 0:
PmagResRec['model_lat'] = mlats[0]['site_model_lat']
mlat = PmagResRec['model_lat']
if mlat != "":
# get the VADM using the desired latitude
PmagResRec["vadm"] = '%8.3e ' % (
pmag.b_vdm(b, float(mlat)))
if sig != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), float(mlat))
PmagResRec["vadm_sigma"] = '%8.3e ' % (vdm_sig)
PmagResRec["vadm_n"] = PmagResRec['average_int_n']
else:
PmagResRec["vadm_sigma"] = ""
# fish out site information (lat/lon, etc.)
sitedat = pmag.get_dictitem(
SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')
if len(sitedat) > 0:
sitedat = sitedat[0]
PmagResRec['average_lat'] = sitedat['site_lat']
PmagResRec['average_lon'] = sitedat['site_lon']
else:
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['magic_software_packages'] = version_num
PmagResRec["pmag_result_name"] = "V[A]DM: Site " + site
PmagResRec["result_description"] = "V[A]DM of site"
PmagResRec["pmag_criteria_codes"] = "ACCEPT"
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagSites.append(PmagSiteRec)
PmagResults.append(PmagResRec)
if len(PmagSites) > 0:
Tmp, keylist = pmag.fillkeys(PmagSites)
pmag.magic_write(siteout, Tmp, 'pmag_sites')
print(' sites written to ', siteout)
else:
print("No Site level table")
if len(PmagResults) > 0:
TmpRes, keylist = pmag.fillkeys(PmagResults)
pmag.magic_write(resout, TmpRes, 'pmag_results')
print(' results written to ', resout)
else:
print("No Results level table")
[docs]
def orientation_magic(or_con=1, dec_correction_con=1, dec_correction=0, bed_correction=True,
samp_con='1', hours_from_gmt=0, method_codes='', average_bedding=False,
orient_file='orient.txt', samp_file='samples.txt', site_file='sites.txt',
output_dir_path='.', input_dir_path='', append=False, data_model=3):
"""
use this function to convert tab delimited field notebook information to MagIC formatted tables (er_samples and er_sites)
INPUT FORMAT
Input files must be tab delimited and have in the first line:
tab location_name
Note: The "location_name" will facilitate searching in the MagIC database. Data from different
"locations" should be put in separate files. The definition of a "location" is rather loose.
Also this is the word 'tab' not a tab, which will be indicated by '\t'.
The second line has the names of the columns (tab delimited), e.g.:
site_name sample_name mag_azimuth field_dip date lat long sample_lithology sample_type sample_class shadow_angle hhmm stratigraphic_height bedding_dip_direction bedding_dip GPS_baseline image_name image_look image_photographer participants method_codes site_description sample_description GPS_Az, sample_igsn, sample_texture, sample_cooling_rate, cooling_rate_corr, cooling_rate_mcd
defaults:
orientation_magic(or_con=1, dec_correction_con=1, dec_correction=0, bed_correction=True, samp_con='1', hours_from_gmt=0, method_codes='', average_bedding=False, orient_file='orient.txt', samp_file='er_samples.txt', site_file='er_sites.txt', output_dir_path='.', input_dir_path='', append=False):
orientation conventions:
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] see http://earthref.org/PmagPy/cookbook/#field_info for more information. You can customize other format yourself, or email ltauxe@ucsd.edu for help.
[8] Lab arrow azimuth = mag_azimuth-180; Lab arrow dip = 90-field_dip
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitrary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitrary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
NOTE:
1) column order doesn't matter but the NAMES do.
2) sample_name, sample_lithology, sample_type, sample_class, lat and long are required. all others are optional.
3) If subsequent data are the same (e.g., date, bedding orientation, participants, stratigraphic_height),
you can leave the field blank and the program will fill in the last recorded information. BUT if you really want a blank stratigraphic_height, enter a '-1'. These will not be inherited and must be specified for each entry: image_name, look, photographer or method_codes
4) hhmm must be in the format: hh:mm and the hh must be in 24 hour time.
date must be mm/dd/yy (years < 50 will be converted to 20yy and >50 will be assumed 19yy). hours_from_gmt is the number of hours to SUBTRACT from hh to get to GMT.
5) image_name, image_look and image_photographer are colon delimited lists of file name (e.g., IMG_001.jpg) image look direction and the name of the photographer respectively. If all images had same look and photographer, just enter info once. The images will be assigned to the site for which they were taken - not at the sample level.
6) participants: Names of who helped take the samples. These must be a colon delimited list.
7) method_codes: Special method codes on a sample level, e.g., SO-GT5 which means the orientation is has an uncertainty of >5 degrees
for example if it broke off before orienting....
8) GPS_Az is the place to put directly determined GPS Azimuths, using, e.g., points along the drill direction.
9) sample_cooling_rate is the cooling rate in K per Ma
10) int_corr_cooling_rate
11) cooling_rate_mcd: data adjustment method code for cooling rate correction; DA-CR-EG is educated guess; DA-CR-PS is percent estimated from pilot samples; DA-CR-TRM is comparison between 2 TRMs acquired with slow and rapid cooling rates. is the percent cooling rate factor to apply to specimens from this sample, DA-CR-XX is the method code
"""
# initialize some variables
# bed_correction used to be BedCorr
# dec_correction_con used to be corr
# dec_correction used to be DecCorr
# meths is now method_codes
# delta_u is now hours_from_gmt
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
or_con, dec_correction_con, dec_correction = int(
or_con), int(dec_correction_con), float(dec_correction)
hours_from_gmt = float(hours_from_gmt)
stratpos = ""
# date of sampling, latitude (pos North), longitude (pos East)
date, lat, lon = "", "", ""
bed_dip, bed_dip_dir = "", ""
Lats, Lons = [], [] # list of latitudes and longitudes
# lists of Sample records and Site records
SampOuts, SiteOuts, ImageOuts = [], [], []
samplelist, sitelist, imagelist = [], [], []
Z = 1
newbaseline, newbeddir, newbeddip = "", "", ""
fpars = []
sclass, lithology, sample_type = "", "", ""
newclass, newlith, newtype = '', '', ''
BPs = [] # bedding pole declinations, bedding pole inclinations
image_file = "er_images.txt"
#
# use 3.0. default filenames when in 3.0.
# but, still allow for custom names
data_model = int(data_model)
if data_model == 3:
if samp_file == "er_samples.txt":
samp_file = "samples.txt"
if site_file == "er_sites.txt":
site_file = "sites.txt"
image_file = "images.txt"
orient_file = pmag.resolve_file_name(orient_file, input_dir_path)
if not os.path.exists(orient_file):
return False, "No such file: {}. If the orientation file is not in your current working directory, make sure you have specified the correct input directory.".format(orient_file)
samp_file = os.path.join(output_dir_path, samp_file)
site_file = os.path.join(output_dir_path, site_file)
image_file = os.path.join(output_dir_path, image_file)
# validate input
if '4' in samp_con[0]:
pattern = re.compile(r'[4][-]\d')
result = pattern.match(samp_con)
if not result:
raise Exception(
"If using sample naming convention 4, you must provide the number of characters with which to distinguish sample from site. [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX)")
if '7' in samp_con[0]:
pattern = re.compile(r'[7][-]\d')
result = pattern.match(samp_con)
if not result:
raise Exception(
"If using sample naming convention 7, you must provide the number of characters with which to distinguish sample from site. [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY")
if dec_correction_con == 2 and not dec_correction:
raise Exception(
"If using magnetic declination convention 2, you must also provide a declincation correction in degrees")
SampRecs, SiteRecs, ImageRecs = [], [], []
SampRecs_sorted, SiteRecs_sorted = {}, {}
if append:
try:
SampRecs, file_type = pmag.magic_read(samp_file)
# convert 3.0. sample file to 2.5 format
if data_model == 3:
SampRecs3 = SampRecs
SampRecs = []
for samp_rec in SampRecs3:
rec = map_magic.mapping(
samp_rec, map_magic.samp_magic3_2_magic2_map)
SampRecs.append(rec)
# magic_data dictionary sorted by sample_name
SampRecs_sorted = pmag.sort_magic_data(SampRecs, 'er_sample_name')
print('sample data to be appended to: ', samp_file)
except Exception as ex:
print(ex)
print('problem with existing file: ',
samp_file, ' will create new.')
try:
SiteRecs, file_type = pmag.magic_read(site_file)
# convert 3.0. site file to 2.5 format
if data_model == 3:
SiteRecs3 = SiteRecs
SiteRecs = []
for site_rec in SiteRecs3:
SiteRecs.append(map_magic.mapping(
site_rec, map_magic.site_magic3_2_magic2_map))
# magic_data dictionary sorted by site_name
SiteRecs_sorted = pmag.sort_magic_data(SiteRecs, 'er_site_name')
print('site data to be appended to: ', site_file)
except Exception as ex:
print(ex)
print('problem with existing file: ',
site_file, ' will create new.')
try:
ImageRecs, file_type = pmag.magic_read(image_file)
# convert from 3.0. --> 2.5
if data_model == 3:
ImageRecs3 = ImageRecs
ImageRecs = []
for image_rec in ImageRecs3:
ImageRecs.append(map_magic.mapping(
image_rec, map_magic.image_magic3_2_magic2_map))
print('image data to be appended to: ', image_file)
except:
print('problem with existing file: ',
image_file, ' will create new.')
#
# read in file to convert
#
OrData, location_name = pmag.magic_read(orient_file)
if location_name == "demag_orient":
location_name = ""
#
# step through the data sample by sample
#
# use map_magic in here...
for OrRec in OrData:
if 'mag_azimuth' not in list(OrRec.keys()):
OrRec['mag_azimuth'] = ""
if 'field_dip' not in list(OrRec.keys()):
OrRec['field_dip'] = ""
if OrRec['mag_azimuth'] == " ":
OrRec["mag_azimuth"] = ""
if OrRec['field_dip'] == " ":
OrRec["field_dip"] = ""
if 'sample_description' in list(OrRec.keys()):
sample_description = OrRec['sample_description']
else:
sample_description = ""
if 'cooling_rate_corr' in list(OrRec.keys()):
if 'cooling_rate_mcd' not in list(OrRec.keys()):
OrRec['cooling_rate_mcd'] = 'DA-CR'
sample_orientation_flag = 'g'
if 'sample_orientation_flag' in list(OrRec.keys()):
if OrRec['sample_orientation_flag'] == 'b' or OrRec["mag_azimuth"] == "":
sample_orientation_flag = 'b'
methcodes = method_codes # initialize method codes
if methcodes:
if 'method_codes' in list(OrRec.keys()) and OrRec['method_codes'].strip() != "":
methcodes = methcodes + ":" + \
OrRec['method_codes'] # add notes
else:
if 'method_codes' in list(OrRec.keys()) and OrRec['method_codes'].strip() != "":
methcodes = OrRec['method_codes'] # add notes
codes = methcodes.replace(" ", "").split(":")
sample_name = OrRec["sample_name"]
# patch added by rshaar 7/2016
# if sample_name already exists in er_samples.txt:
# merge the new data colmuns calculated by orientation_magic with the existing data colmuns
# this is done to make sure no previous data in er_samples.txt and
# er_sites.txt is lost.
if sample_name in list(SampRecs_sorted.keys()):
Prev_MagRec = SampRecs_sorted[sample_name][-1]
MagRec = Prev_MagRec
else:
Prev_MagRec = {}
MagRec = {}
MagRec["er_citation_names"] = "This study"
# the following keys were calculated or defined in the code above:
for key in ['sample_igsn', 'sample_texture', 'sample_cooling_rate',
'cooling_rate_corr', 'cooling_rate_mcd']:
val = OrRec.get(key, '')
if val:
MagRec[key] = val
elif key in list(Prev_MagRec.keys()):
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = ""
if location_name != "":
MagRec["er_location_name"] = location_name
elif "er_location_name" in list(Prev_MagRec.keys()):
MagRec["er_location_name"] = Prev_MagRec["er_location_name"]
else:
MagRec["er_location_name"] = ""
# the following keys are taken directly from OrRec dictionary:
for key in ["sample_height", "er_sample_alternatives", "sample_orientation_flag"]:
if key in list(OrRec.keys()) and OrRec[key] != "":
MagRec[key] = OrRec[key]
elif key in list(Prev_MagRec.keys()):
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = ""
# the following keys, if blank, used to be defined here as "Not Specified" :
for key in ["sample_class", "sample_lithology", "sample_type"]:
if key in list(OrRec.keys()) and OrRec[key] != "" and OrRec[key] != "Not Specified":
MagRec[key] = OrRec[key]
elif key in list(Prev_MagRec.keys()) and Prev_MagRec[key] != "" and Prev_MagRec[key] != "Not Specified":
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = "" # "Not Specified"
# (rshaar) From here parse new information and replace previous, if exists:
#
# parse information common to all orientation methods
#
MagRec["er_sample_name"] = OrRec["sample_name"]
if "IGSN" in list(OrRec.keys()):
MagRec["sample_igsn"] = OrRec["IGSN"]
else:
MagRec["sample_igsn"] = ""
# MagRec["sample_height"],MagRec["sample_bed_dip_direction"],MagRec["sample_bed_dip"]="","",""
MagRec["sample_bed_dip_direction"], MagRec["sample_bed_dip"] = "", ""
# if "er_sample_alternatives" in OrRec.keys():
# MagRec["er_sample_alternatives"]=OrRec["sample_alternatives"]
sample = OrRec["sample_name"]
if OrRec['mag_azimuth'] == "" and OrRec['field_dip'] != "":
OrRec['mag_azimuth'] = '999'
if OrRec["mag_azimuth"] != "":
labaz, labdip = pmag.orient(
float(OrRec["mag_azimuth"]), float(OrRec["field_dip"]), or_con)
if labaz < 0:
labaz += 360.
else:
labaz, labdip = "", ""
if OrRec['mag_azimuth'] == '999':
labaz = ""
if "GPS_baseline" in list(OrRec.keys()) and OrRec['GPS_baseline'] != "":
newbaseline = OrRec["GPS_baseline"]
if newbaseline != "":
baseline = float(newbaseline)
MagRec['er_scientist_mail_names'] = OrRec.get('participants', '')
newlat = OrRec["lat"]
if newlat != "":
lat = float(newlat)
if lat == "":
print("No latitude specified for ! ", sample,
". Latitude is required for all samples.")
return False, "No latitude specified for ! " + sample + ". Latitude is required for all samples."
MagRec["sample_lat"] = '%11.5f' % (lat)
newlon = OrRec["long"]
if newlon != "":
lon = float(newlon)
if lon == "":
print("No longitude specified for ! ", sample,
". Longitude is required for all samples.")
return False, str("No longitude specified for ! " + sample + ". Longitude is required for all samples.")
MagRec["sample_lon"] = '%11.5f' % (lon)
if 'bedding_dip_direction' in list(OrRec.keys()):
newbeddir = OrRec["bedding_dip_direction"]
if newbeddir != "":
bed_dip_dir = OrRec['bedding_dip_direction']
if 'bedding_dip' in list(OrRec.keys()):
newbeddip = OrRec["bedding_dip"]
if newbeddip != "":
bed_dip = OrRec['bedding_dip']
MagRec["sample_bed_dip"] = bed_dip
MagRec["sample_bed_dip_direction"] = bed_dip_dir
# MagRec["sample_type"]=sample_type
if labdip != "":
MagRec["sample_dip"] = '%7.1f' % labdip
else:
MagRec["sample_dip"] = ""
if "date" in list(OrRec.keys()) and OrRec["date"] != "":
newdate = OrRec["date"]
if newdate != "":
date = newdate
mmddyy = date.split('/')
yy = int(mmddyy[2])
if yy > 50:
yy = 1900 + yy
else:
yy = 2000 + yy
decimal_year = yy + float(mmddyy[0])/12
sample_date = '%i:%s:%s' % (yy, mmddyy[0], mmddyy[1])
time = OrRec['hhmm']
if time:
sample_date += (':' + time)
MagRec["sample_date"] = sample_date.strip(':')
if labaz != "":
MagRec["sample_azimuth"] = '%7.1f' % (labaz)
else:
MagRec["sample_azimuth"] = ""
if "stratigraphic_height" in list(OrRec.keys()):
if OrRec["stratigraphic_height"] != "":
MagRec["sample_height"] = OrRec["stratigraphic_height"]
stratpos = OrRec["stratigraphic_height"]
elif OrRec["stratigraphic_height"] == '-1':
MagRec["sample_height"] = "" # make empty
elif stratpos != "":
# keep last record if blank
MagRec["sample_height"] = stratpos
#
# get magnetic declination (corrected with igrf value)
if dec_correction_con == 1 and MagRec['sample_azimuth'] != "":
try:
x, y, z, f = pmag.doigrf(lon, lat, 0, decimal_year)
except TypeError: # see issue 617
return None, "Only dates prior to 2020 supported"
Dir = pmag.cart2dir((x, y, z))
dec_correction = Dir[0]
if "bedding_dip" in list(OrRec.keys()):
if OrRec["bedding_dip"] != "":
MagRec["sample_bed_dip"] = OrRec["bedding_dip"]
bed_dip = OrRec["bedding_dip"]
else:
MagRec["sample_bed_dip"] = bed_dip
else:
MagRec["sample_bed_dip"] = '0'
if "bedding_dip_direction" in list(OrRec.keys()):
if OrRec["bedding_dip_direction"] != "" and bed_correction == 1:
dd = float(OrRec["bedding_dip_direction"]) + dec_correction
if dd > 360.:
dd = dd - 360.
MagRec["sample_bed_dip_direction"] = '%7.1f' % (dd)
dip_dir = MagRec["sample_bed_dip_direction"]
else:
MagRec["sample_bed_dip_direction"] = OrRec['bedding_dip_direction']
else:
MagRec["sample_bed_dip_direction"] = '0'
if average_bedding:
if str(MagRec["sample_bed_dip_direction"]) and str(MagRec["sample_bed_dip"]):
BPs.append([float(MagRec["sample_bed_dip_direction"]),
float(MagRec["sample_bed_dip"]) - 90., 1.])
if MagRec['sample_azimuth'] == "" and MagRec['sample_dip'] == "":
MagRec["sample_declination_correction"] = ''
methcodes = methcodes + ':SO-NO'
MagRec["magic_method_codes"] = methcodes
MagRec['sample_description'] = sample_description
#
# work on the site stuff too
if 'site_name' in list(OrRec.keys()) and OrRec['site_name'] != "":
site = OrRec['site_name']
elif 'site_name' in list(Prev_MagRec.keys()) and Prev_MagRec['site_name'] != "":
site = Prev_MagRec['site_name']
else:
# parse out the site name
site = pmag.parse_site(OrRec["sample_name"], samp_con, Z)
MagRec["er_site_name"] = site
site_description = "" # overwrite any prior description
if 'site_description' in list(OrRec.keys()) and OrRec['site_description'] != "":
site_description = OrRec['site_description'].replace(",", ";")
if "image_name" in list(OrRec.keys()):
images = OrRec["image_name"].split(":")
if "image_look" in list(OrRec.keys()):
looks = OrRec['image_look'].split(":")
else:
looks = []
if "image_photographer" in list(OrRec.keys()):
photographers = OrRec['image_photographer'].split(":")
else:
photographers = []
for image in images:
if image != "" and image not in imagelist:
imagelist.append(image)
ImageRec = {}
ImageRec['er_image_name'] = image
ImageRec['image_type'] = "outcrop"
ImageRec['image_date'] = sample_date
ImageRec['er_citation_names'] = "This study"
ImageRec['er_location_name'] = location_name
ImageRec['er_site_name'] = MagRec['er_site_name']
k = images.index(image)
if len(looks) > k:
ImageRec['er_image_description'] = "Look direction: " + looks[k]
elif len(looks) >= 1:
ImageRec['er_image_description'] = "Look direction: " + looks[-1]
else:
ImageRec['er_image_description'] = "Look direction: unknown"
if len(photographers) > k:
ImageRec['er_photographer_mail_names'] = photographers[k]
elif len(photographers) >= 1:
ImageRec['er_photographer_mail_names'] = photographers[-1]
else:
ImageRec['er_photographer_mail_names'] = "unknown"
ImageOuts.append(ImageRec)
if site not in sitelist:
sitelist.append(site) # collect unique site names
# patch added by rshaar 7/2016
# if sample_name already exists in er_samples.txt:
# merge the new data colmuns calculated by orientation_magic with the existing data colmuns
# this is done to make sure no previous data in er_samples.txt and
# er_sites.txt is lost.
if site in list(SiteRecs_sorted.keys()):
Prev_MagRec = SiteRecs_sorted[site][-1]
SiteRec = Prev_MagRec
else:
Prev_MagRec = {}
SiteRec = {}
SiteRec["er_citation_names"] = "This study"
SiteRec["er_site_name"] = site
SiteRec["site_definition"] = "s"
if "er_location_name" in SiteRec and SiteRec.get("er_location_name"):
pass
elif key in list(Prev_MagRec.keys()) and Prev_MagRec[key] != "":
SiteRec[key] = Prev_MagRec[key]
else:
print('setting location name to ""')
SiteRec[key] = ""
for key in ["lat", "lon", "height"]:
if "site_" + key in list(Prev_MagRec.keys()) and Prev_MagRec["site_" + key] != "":
SiteRec["site_" + key] = Prev_MagRec["site_" + key]
else:
SiteRec["site_" + key] = MagRec["sample_" + key]
# SiteRec["site_lat"]=MagRec["sample_lat"]
# SiteRec["site_lon"]=MagRec["sample_lon"]
# SiteRec["site_height"]=MagRec["sample_height"]
for key in ["class", "lithology", "type"]:
if "site_" + key in list(Prev_MagRec.keys()) and Prev_MagRec["site_" + key] != "Not Specified":
SiteRec["site_" + key] = Prev_MagRec["site_" + key]
else:
SiteRec["site_" + key] = MagRec["sample_" + key]
# SiteRec["site_class"]=MagRec["sample_class"]
# SiteRec["site_lithology"]=MagRec["sample_lithology"]
# SiteRec["site_type"]=MagRec["sample_type"]
if site_description != "": # overwrite only if site_description has something
SiteRec["site_description"] = site_description
SiteOuts.append(SiteRec)
if sample not in samplelist:
samplelist.append(sample)
if MagRec['sample_azimuth'] != "": # assume magnetic compass only
MagRec['magic_method_codes'] = MagRec['magic_method_codes'] + ':SO-MAG'
MagRec['magic_method_codes'] = MagRec['magic_method_codes'].strip(
":")
SampOuts.append(MagRec)
if MagRec['sample_azimuth'] != "" and dec_correction_con != 3:
az = labaz + dec_correction
if az > 360.:
az = az - 360.
CMDRec = {}
for key in list(MagRec.keys()):
CMDRec[key] = MagRec[key] # make a copy of MagRec
CMDRec["sample_azimuth"] = '%7.1f' % (az)
CMDRec["magic_method_codes"] = methcodes + ':SO-CMD-NORTH'
CMDRec["magic_method_codes"] = CMDRec['magic_method_codes'].strip(
':')
CMDRec["sample_declination_correction"] = '%7.1f' % (
dec_correction)
if dec_correction_con == 1:
CMDRec['sample_description'] = sample_description + \
':Declination correction calculated from IGRF'
else:
CMDRec['sample_description'] = sample_description + \
':Declination correction supplied by user'
CMDRec["sample_description"] = CMDRec['sample_description'].strip(
':')
SampOuts.append(CMDRec)
if "mag_az_bs" in list(OrRec.keys()) and OrRec["mag_az_bs"] != "" and OrRec["mag_az_bs"] != " ":
SRec = {}
for key in list(MagRec.keys()):
SRec[key] = MagRec[key] # make a copy of MagRec
labaz = float(OrRec["mag_az_bs"])
az = labaz + dec_correction
if az > 360.:
az = az - 360.
SRec["sample_azimuth"] = '%7.1f' % (az)
SRec["sample_declination_correction"] = '%7.1f' % (
dec_correction)
SRec["magic_method_codes"] = methcodes + \
':SO-SIGHT-BACK:SO-CMD-NORTH'
SampOuts.append(SRec)
#
# check for suncompass data
#
# there are sun compass data
if "shadow_angle" in list(OrRec.keys()) and OrRec["shadow_angle"] != "":
if hours_from_gmt == "":
#hours_from_gmt=raw_input("Enter hours to subtract from time for GMT: [0] ")
hours_from_gmt = 0
SunRec, sundata = {}, {}
shad_az = float(OrRec["shadow_angle"])
if not OrRec["hhmm"]:
print('If using the column shadow_angle for sun compass data, you must also provide the time for each sample. Sample ',
sample, ' has shadow_angle but is missing the "hh:mm" column.')
else: # calculate sun declination
sundata["date"] = '%i:%s:%s:%s' % (
yy, mmddyy[0], mmddyy[1], OrRec["hhmm"])
sundata["delta_u"] = hours_from_gmt
sundata["lon"] = lon # do not truncate!
sundata["lat"] = lat # do not truncate!
sundata["shadow_angle"] = OrRec["shadow_angle"]
# now you can truncate
sundec = '%7.1f' % (pmag.dosundec(sundata))
for key in list(MagRec.keys()):
SunRec[key] = MagRec[key] # make a copy of MagRec
SunRec["sample_azimuth"] = sundec # do not truncate!
SunRec["sample_declination_correction"] = ''
SunRec["magic_method_codes"] = methcodes + ':SO-SUN'
SunRec["magic_method_codes"] = SunRec['magic_method_codes'].strip(
':')
SampOuts.append(SunRec)
#
# check for differential GPS data
#
# there are diff GPS data
if "prism_angle" in list(OrRec.keys()) and OrRec["prism_angle"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
prism_angle = float(OrRec["prism_angle"])
sundata["shadow_angle"] = OrRec["shadow_angle"]
sundec = pmag.dosundec(sundata)
for key in list(MagRec.keys()):
SunRec[key] = MagRec[key] # make a copy of MagRec
SunRec["sample_azimuth"] = '%7.1f' % (sundec)
SunRec["sample_declination_correction"] = ''
SunRec["magic_method_codes"] = methcodes + ':SO-SUN'
SunRec["magic_method_codes"] = SunRec['magic_method_codes'].strip(
':')
SampOuts.append(SunRec)
#
# check for differential GPS data
#
# there are diff GPS data
if "prism_angle" in list(OrRec.keys()) and OrRec["prism_angle"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
prism_angle = float(OrRec["prism_angle"])
laser_angle = float(OrRec["laser_angle"])
if OrRec["GPS_baseline"] != "":
baseline = float(OrRec["GPS_baseline"]) # new baseline
gps_dec = baseline + laser_angle + prism_angle - 90.
while gps_dec > 360.:
gps_dec = gps_dec - 360.
while gps_dec < 0:
gps_dec = gps_dec + 360.
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
GPSRec["sample_azimuth"] = '%7.1f' % (gps_dec)
GPSRec["sample_declination_correction"] = ''
GPSRec["magic_method_codes"] = methcodes + ':SO-GPS-DIFF'
SampOuts.append(GPSRec)
# there are differential GPS Azimuth data
if "GPS_Az" in list(OrRec.keys()) and OrRec["GPS_Az"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
GPSRec["sample_azimuth"] = '%7.1f' % (float(OrRec["GPS_Az"]))
GPSRec["sample_declination_correction"] = ''
GPSRec["magic_method_codes"] = methcodes + ':SO-GPS-DIFF'
SampOuts.append(GPSRec)
if average_bedding != "0" and fpars:
fpars = pmag.fisher_mean(BPs)
print('over-writing all bedding with average ')
Samps = []
for rec in SampOuts:
if average_bedding != "0" and fpars:
rec['sample_bed_dip_direction'] = '%7.1f' % (fpars['dec'])
rec['sample_bed_dip'] = '%7.1f' % (fpars['inc'] + 90.)
Samps.append(rec)
else:
Samps.append(rec)
for rec in SampRecs:
if rec['er_sample_name'] not in samplelist: # overwrite prior for this sample
Samps.append(rec)
for rec in SiteRecs:
if rec['er_site_name'] not in sitelist: # overwrite prior for this sample
SiteOuts.append(rec)
for rec in ImageRecs:
if rec['er_image_name'] not in imagelist: # overwrite prior for this sample
ImageOuts.append(rec)
print('saving data...')
SampsOut, keys = pmag.fillkeys(Samps)
Sites, keys = pmag.fillkeys(SiteOuts)
if data_model == 3:
SampsOut3 = []
Sites3 = []
for samp_rec in SampsOut:
new_rec = map_magic.mapping(
samp_rec, map_magic.samp_magic2_2_magic3_map)
SampsOut3.append(new_rec)
for site_rec in Sites:
new_rec = map_magic.mapping(
site_rec, map_magic.site_magic2_2_magic3_map)
Sites3.append(new_rec)
wrote_samps = pmag.magic_write(samp_file, SampsOut3, "samples")
wrote_sites = pmag.magic_write(site_file, Sites3, "sites")
else:
wrote_samps = pmag.magic_write(samp_file, SampsOut, "er_samples")
wrote_sites = pmag.magic_write(site_file, Sites, "er_sites")
if wrote_samps:
print("Data saved in ", samp_file, ' and ', site_file)
else:
print("No data found")
if len(ImageOuts) > 0:
# need to do conversion here 3.0. --> 2.5
Images, keys = pmag.fillkeys(ImageOuts)
image_type = "er_images"
if data_model == 3:
# convert 2.5 --> 3.0.
image_type = "images"
Images2 = Images
Images = []
for image_rec in Images2:
Images.append(map_magic.mapping(
image_rec, map_magic.image_magic2_2_magic3_map))
pmag.magic_write(image_file, Images, image_type)
print("Image info saved in ", image_file)
return True, None
[docs]
def azdip_magic(orient_file='orient.txt', samp_file="samples.txt", samp_con="1", Z=1, method_codes='FS-FD', location_name='unknown', append=False, output_dir='.', input_dir='.', data_model=3):
"""
takes space delimited AzDip file and converts to MagIC formatted tables
Parameters:
orient_file : name of azdip formatted input file
samp_file : name of samples.txt formatted output file
samp_con : integer of sample orientation convention
- [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default]
- [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitrary length)
- [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitrary length)
- [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
- [5] site name same as sample
- [6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
- [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
method_codes : colon delimited string with the following as desired
- FS-FD field sampling done with a drill
- FS-H field sampling done with hand samples
- FS-LOC-GPS field location done with GPS
- FS-LOC-MAP field location done with map
- SO-POM a Pomeroy orientation device was used
- SO-ASC an ASC orientation device was used
- SO-MAG orientation with magnetic compass
location_name : location of samples
append : boolean. if True, append to the output file
output_dir : path to output file directory
input_dir : path to input file directory
data_model : MagIC data model.
INPUT FORMAT
Input files must be space delimited:
Samp Az Dip Strike Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
"""
#
# initialize variables
#
data_model = int(data_model)
if (data_model != 3) and (samp_file == "samples.txt"):
samp_file = "er_samples.txt"
if (data_model == 2) and (samp_file == "er_samples.txt"):
samp_file = "samples.txt"
DEBUG = 0
version_num = pmag.get_version()
or_con, corr = "3", "1"
# date of sampling, latitude (pos North), longitude (pos East)
date, lat, lon = "", "", ""
bed_dip, bed_dip_dir = "", ""
participantlist = ""
sites = [] # list of site names
Lats, Lons = [], [] # list of latitudes and longitudes
# lists of Sample records and Site records
SampRecs, SiteRecs, ImageRecs, imagelist = [], [], [], []
average_bedding = "1", 1, "0"
newbaseline, newbeddir, newbeddip = "", "", ""
delta_u = "0"
sclass, lithology, type = "", "", ""
newclass, newlith, newtype = '', '', ''
user = ""
corr == "3"
DecCorr = 0.
samp_file = pmag.resolve_file_name(samp_file, output_dir)
orient_file = pmag.resolve_file_name(orient_file, input_dir)
input_dir = os.path.split(orient_file)[0]
output_dir = os.path.split(samp_file)[0]
#
#
if append:
try:
SampRecs, file_type = pmag.magic_read(samp_file)
print("sample data to be appended to: ", samp_file)
except:
print('problem with existing samp file: ',
samp_file, ' will create new')
#
# read in file to convert
#
azfile = open(orient_file, 'r')
AzDipDat = azfile.readlines()
azfile.close()
if not AzDipDat:
return False, 'No data in orientation file, please try again'
azfile.close()
SampOut, samplist = [], []
for line in AzDipDat:
orec = line.split()
if len(orec) > 2:
labaz, labdip = pmag.orient(float(orec[1]), float(orec[2]), or_con)
bed_dip = float(orec[4])
if bed_dip != 0:
bed_dip_dir = float(orec[3]) - \
90. # assume dip to right of strike
else:
bed_dip_dir = float(orec[3]) # assume dip to right of strike
MagRec = {}
MagRec["er_location_name"] = location_name
MagRec["er_citation_names"] = "This study"
#
# parse information common to all orientation methods
#
MagRec["er_sample_name"] = orec[0]
MagRec["sample_bed_dip"] = '%7.1f' % (bed_dip)
MagRec["sample_bed_dip_direction"] = '%7.1f' % (bed_dip_dir)
MagRec["sample_dip"] = '%7.1f' % (labdip)
MagRec["sample_azimuth"] = '%7.1f' % (labaz)
methods = method_codes.replace(" ", "").split(":")
OR = 0
for method in methods:
method_type = method.split("-")
if "SO" in method_type:
OR = 1
if OR == 0:
method_codes = method_codes + ":SO-NO"
MagRec["magic_method_codes"] = method_codes
# parse out the site name
site = pmag.parse_site(orec[0], samp_con, Z)
MagRec["er_site_name"] = site
MagRec['magic_software_packages'] = version_num
SampOut.append(MagRec)
if MagRec['er_sample_name'] not in samplist:
samplist.append(MagRec['er_sample_name'])
for samp in SampRecs:
if samp not in samplist:
SampOut.append(samp)
Samps, keys = pmag.fillkeys(SampOut)
if data_model == 2:
# write to file
pmag.magic_write(samp_file, Samps, "er_samples")
else:
# translate sample records to MagIC 3
Samps3 = []
for samp in Samps:
Samps3.append(map_magic.mapping(
samp, map_magic.samp_magic2_2_magic3_map))
# write to file
pmag.magic_write(samp_file, Samps3, "samples")
print("Data saved in ", samp_file)
return True, None
def read_core_csv_file(sum_file):
Cores = []
core_depth_key = "Top depth cored CSF (m)"
if os.path.isfile(sum_file):
fin = open(sum_file, 'r')
indat = fin.readlines()
if "Core Summary" in indat[0]:
headline = 1
else:
headline = 0
keys = indat[headline].replace('\n', '').split(',')
if "Core Top (m)" in keys:
core_depth_key = "Core Top (m)"
if "Top depth cored CSF (m)" in keys:
core_depth_key = "Top depth cored CSF (m)"
if 'Top depth cored (m)' in keys:
core_depth_key = 'Top depth cored (m)'
if "Core Label" in keys:
core_label_key = "Core Label"
if "Core label" in keys:
core_label_key = "Core label"
if "Label ID" in keys:
core_label_key = "Label ID"
for line in indat[2:]:
if 'TOTALS' not in line:
CoreRec = {}
for k in range(len(keys)):
CoreRec[keys[k]] = line.split(',')[k]
Cores.append(CoreRec)
fin.close()
if len(Cores) == 0:
print('no Core depth information available: import core summary file')
return False, False, []
else:
return core_depth_key, core_label_key, Cores
else:
return False, False, []
[docs]
class Site(object):
''' This Site class is for use within Jupyter/IPython notebooks. It reads in
MagIC-formatted data (text files) and compiles fits, separates them by type,
and plots equal-area projections inline. If means were not taken and output
within the Demag GUI, it should automatically compute the Fisher mean for each
fit type. Code is still a work in progress, but it is currently useful for
succinctly computing/displaying data in notebook format.
'''
def __init__(self, site_name, data_path, data_format="MagIC"):
'''
site_name: the name of the site
data_path: put all MagIC data (text files) in a single directory and
provide its path
data_format: MagIC-formatted data is necessary in this code, but future
compatibility with other formats possible (built-in CIT_magic conversion?)
*other keyword arguments not necessary*
'''
# import necessary functions to be used in the notebook
import os
from matplotlib import pyplot as plt
import pandas as pd
import re
dir_name = os.path.relpath(data_path)
self.all_file_names = os.listdir(dir_name)
os.path.join
self.file_names = []
for file_name in self.all_file_names:
if re.match('.*txt', file_name) != None:
self.file_names.append(file_name)
for i in self.file_names:
path_to_open = os.path.join(dir_name, i)
text_file = open(path_to_open, 'r')
first_line = text_file.readlines()[0]
text_file.close()
if 'er_sites' in first_line:
self.er_sites_path = path_to_open
elif 'pmag_sites' in first_line:
self.mean_path = path_to_open
elif 'pmag_specimens' in first_line:
self.data_path = path_to_open
self.name = site_name
# self.data_path = data_path # default name of 'pmag_specimens'
self.data_format = data_format # default name of 'pmag_sites'
# self.mean_path = mean_path # default name of 'er_sites'
#self.er_sites_path = er_sites_path
if self.data_format == "MagIC":
self.fits = pd.read_csv(self.data_path, sep="\t", skiprows=1)
if self.mean_path != None:
self.means = pd.read_csv(self.mean_path, sep="\t", skiprows=1)
if self.er_sites_path != None:
self.location = pd.read_csv(
self.er_sites_path, sep="\t", skiprows=1)
else:
raise Exception("Please convert data to MagIC format")
self.parse_all_fits()
self.lat = float(self.location.site_lat)
self.lon = float(self.location.site_lon)
# the following exception won't be necessary if parse_all_fits is
# working properly
if self.mean_path == None:
raise Exception(
'Make fisher means within the demag GUI - functionality for handling this is in progress')
[docs]
def parse_fits(self, fit_name):
'''USE PARSE_ALL_FITS unless otherwise necessary
Isolate fits by the name of the fit; we also set 'specimen_tilt_correction' to zero in order
to only include data in geographic coordinates - THIS NEEDS TO BE GENERALIZED
'''
fits = self.fits.loc[self.fits.specimen_comp_name ==
fit_name].loc[self.fits.specimen_tilt_correction == 0]
fits.reset_index(inplace=True)
means = self.means.loc[self.means.site_comp_name ==
fit_name].loc[self.means.site_tilt_correction == 0]
means.reset_index(inplace=True)
mean_name = str(fit_name) + "_mean"
setattr(self, fit_name, fits)
setattr(self, mean_name, means)
def parse_all_fits(self):
# This is run upon initialization of the Site class
self.fit_types = self.fits.specimen_comp_name.unique().tolist()
for fit_type in self.fit_types:
self.parse_fits(fit_type)
print("Data separated by ", self.fit_types,
"fits and can be accessed by <site_name>.<fit_name>")
def get_fit_names(self):
return self.fit_types
def get_fisher_mean(self, fit_name):
mean_name = str(fit_name) + "_mean"
if self.mean_path != None:
self.fisher_dict = {'dec': float(getattr(self, mean_name).site_dec),
'inc': float(getattr(self, mean_name).site_inc),
'alpha95': float(getattr(self, mean_name).site_alpha95),
'k': float(getattr(self, mean_name).site_k),
'r': float(getattr(self, mean_name).site_r),
'n': float(getattr(self, mean_name).site_n)}
return self.fisher_dict
else:
self.directions = []
for fit_num in range(0, len(getattr(self, fit_name))):
self.directions.append([list(getattr(self, fit_name).specimen_dec)[fit_num],
list(getattr(self, fit_name).specimen_inc)[fit_num], 1.])
#fish_mean = pmag.fisher_mean(directions)
self.fisher_dict = pmag.fisher_mean(self.directions)
# setattr(self,fisher_dict,fish_mean)
#self.fisher_dict = getattr(self,mean_name)
return self.fisher_dict
def get_lat(self):
return self.lat
def get_lon(self):
return self.lon
def get_site_coor(self):
return [self.lat, self.lon]
def get_name(self):
return self
def eq_plot_everything(self, title=None, clrs=None, size=(5, 5), **kwargs):
fignum = 0
plt.figure(num=fignum, figsize=size, dpi=200)
plot_net(fignum)
clr_idx = 0
for fits in self.fit_types:
mean_code = str(fits) + "_mean"
print(mean_code)
if clrs is not None:
plot_di(getattr(self, fits).specimen_dec,
getattr(self, fits).specimen_inc, color=clrs[clr_idx], label=fits + ' directions')
print(float(getattr(self, mean_code).site_dec),
float(getattr(self, mean_code).site_inc))
plot_di_mean(float(getattr(self, mean_code).site_dec),
float(getattr(self, mean_code).site_inc),
float(getattr(self, mean_code).site_alpha95),
color=clrs[clr_idx], marker='s', label=fits + ' mean')
clr_idx += 1
else:
self.random_color = np.random.rand(3)
plot_di(getattr(self, fits).specimen_dec,
getattr(self, fits).specimen_inc, color=self.random_color, label=fits + ' directions')
print(float(getattr(self, mean_code).site_dec),
float(getattr(self, mean_code).site_inc))
plot_di_mean(float(getattr(self, mean_code).site_dec),
float(getattr(self, mean_code).site_inc),
float(getattr(self, mean_code).site_alpha95),
color=self.random_color, marker='s', label=fits + ' mean')
plt.legend(**kwargs)
if title != None:
plt.title(title)
plt.show()
def eq_plot(self, fit_name, title=None, clr=None, size=(5, 5), **kwargs):
fignum = 0
plt.figure(num=fignum, figsize=size, dpi=200)
plot_net(fignum)
mean_code = str(fit_name) + "_mean"
if clr is not None:
self.random_color = clr
else:
self.random_color = np.random.rand(3)
plot_di(getattr(self, fit_name).specimen_dec,
getattr(self, fit_name).specimen_inc,
color=self.random_color, label=fit_name + ' directions')
plot_di_mean(float(getattr(self, mean_code).site_dec),
float(getattr(self, mean_code).site_inc),
float(getattr(self, mean_code).site_alpha95), marker='s', label=fit_name + ' mean')
plt.legend(**kwargs)
if title != None:
plt.title(title)
plt.show()
# def eq_plot_sidebyside(self, fit_name):
# fig,ax = plt.subplots(1,2)
# ax[0].plot(self.eq_plot_everything())
# ax[1].plot(self.eq_plot(fit_name))
# plt.show()
def get_site_data(self, description, fit_name, demag_type='Thermal', cong_test_result=None):
self.site_data = pd.Series({'site_type': str(description),
'site_lat': self.get_lat(),
'site_lon': self.get_lon(),
'demag_type': demag_type,
'dec': float(self.get_fisher_mean(fit_name)['dec']),
'inc': float(self.get_fisher_mean(fit_name)['inc']),
'a_95': float(self.get_fisher_mean(fit_name)['alpha95']),
'n': int(self.get_fisher_mean(fit_name)['n']),
'kappa': float(self.get_fisher_mean(fit_name)['k']),
'R': float(self.get_fisher_mean(fit_name)['r']),
'cong_test_result': cong_test_result},
name=str(self.name))
return self.site_data
[docs]
def dayplot_magic(path_to_file='.', hyst_file="specimens.txt", rem_file='',
save=True, save_folder='.', fmt='svg', data_model=3,
interactive=False, contribution=None, image_records=False):
"""
Makes 'day plots' (Day et al. 1977) and squareness/coercivity plots
(Neel, 1955; plots after Tauxe et al., 2002); plots 'linear mixing'
curve from Dunlop and Carter-Stiglitz (2006).
Parameters:
path_to_file : path to directory that contains files (default is current directory, '.')
the default input file is 'specimens.txt' (data_model=3
if data_model = 2, then must these are the defaults:
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
rem_file : remanence file (default is 'rmag_remanence.txt')
save : boolean argument to save plots (default is True)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
image_records (boolean): generate and return a record for each image in a list of dicts which can be ingested by pmag.magic_write, default is False
"""
hyst_path = os.path.join(path_to_file, hyst_file)
if data_model == 2 and rem_file != '':
rem_path = os.path.join(path_to_file, rem_file)
# hyst_file,rem_file="rmag_hysteresis.txt","rmag_remanence.txt"
dir_path = path_to_file
verbose = pmagplotlib.verbose
# initialize some variables
# define figure numbers for Day,S-Bc,S-Bcr
DSC = {}
DSC['day'], DSC['S-Bc'], DSC['S-Bcr'], DSC['bcr1-bcr2'] = 1, 2, 3, 4
hyst_data, file_type = pmag.magic_read(hyst_path)
rem_data = []
if data_model == 2 and rem_file != "":
rem_data, file_type = pmag.magic_read(rem_path)
S, BcrBc, Bcr2, Bc, hsids, Bcr = [], [], [], [], [], []
Ms, Bcr1, Bcr1Bc, S1 = [], [], [], []
locations = ''
image_recs = []
if data_model == 2:
for rec in hyst_data:
if 'er_location_name' in list(rec.keys()) and rec['er_location_name'] not in locations:
locations = locations + rec['er_location_name'] + '_'
if rec['hysteresis_bcr'] != "" and rec['hysteresis_mr_moment'] != "":
S.append(float(rec['hysteresis_mr_moment'])/float(
rec['hysteresis_ms_moment']))
Bcr.append(float(rec['hysteresis_bcr']))
Bc.append(float(rec['hysteresis_bc']))
BcrBc.append(Bcr[-1]/Bc[-1])
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
hsids.append(rec['er_specimen_name'])
if len(rem_data) > 0:
for rec in rem_data:
if rec['remanence_bcr'] != "" and float(rec['remanence_bcr']) > 0:
try:
ind = hsids.index(rec['er_specimen_name'])
Bcr1.append(float(rec['remanence_bcr']))
Bcr1Bc.append(Bcr1[-1]/Bc[ind])
S1.append(S[ind])
Bcr2.append(Bcr[ind])
except ValueError:
if verbose:
print('hysteresis data for ',
rec['er_specimen_name'], ' not found')
else:
fnames = {'specimens': hyst_file}
if contribution:
con = contribution
else:
con = cb.Contribution(dir_path, read_tables=['specimens'],
custom_filenames=fnames)
if 'specimens' not in con.tables:
print('-E- No specimen file found in {}'.format(os.path.realpath(dir_path)))
if image_records:
return False, [], []
return False, []
spec_container = con.tables['specimens']
spec_df = spec_container.df
# get as much data as possible for naming plots
#if pmagplotlib.isServer:
con.propagate_location_to_specimens()
loc_list = []
if 'location' in spec_df.columns:
loc_list = spec_df['location'].unique()
do_rem = bool('rem_bcr' in spec_df.columns)
for ind, row in spec_df.iterrows():
if row['hyst_bcr'] and row['hyst_mr_moment']:
S.append(float(row['hyst_mr_moment'])/float(row['hyst_ms_moment']))
Bcr.append(float(row['hyst_bcr']))
Bc.append(float(row['hyst_bc']))
BcrBc.append(Bcr[-1]/Bc[-1])
hsids.append(row['specimen'])
if do_rem and Bc:
if row['rem_bcr'] and float(row['rem_bcr']) > 0:
try:
Bcr1.append(float(row['rem_bcr']))
Bcr1Bc.append(Bcr1[-1]/Bc[-1])
S1.append(S[-1])
Bcr2.append(Bcr[-1])
except ValueError:
if verbose:
print('hysteresis data for ',
row['specimen'], end=' ')
print(' not found')
#
# now plot the day and S-Bc, S-Bcr plots
#
fnames = {'day': os.path.join(save_folder, "_".join(loc_list) + '_Day.' + fmt),
'S-Bcr': os.path.join(save_folder, "_".join(loc_list) + '_S-Bcr.' + fmt),
'S-Bc': os.path.join(save_folder, "_".join(loc_list) + '_S-Bc.' + fmt)}
if len(Bcr1) > 0:
plt.figure(num=DSC['day'], figsize=(5, 5))
#plt.figure(num=DSC['S-Bc'], figsize=(5, 5))
plt.figure(num=DSC['S-Bcr'], figsize=(5, 5))
plt.figure(num=DSC['bcr1-bcr2'], figsize=(5, 5))
pmagplotlib.plot_day(DSC['day'], Bcr1Bc, S1, 'ro')
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr1, S1, 'ro')
#pmagplotlib.plot_init(DSC['bcr1-bcr2'], 5, 5)
pmagplotlib.plot_bcr(DSC['bcr1-bcr2'], Bcr1, Bcr2)
fnames.pop('S-Bc')
fnames['bcr1-bcr2'] = os.path.join(save_folder, 'bcr1-bcr2.png')
DSC.pop('S-Bc')
if pmagplotlib.isServer:
for key in list(DSC.keys()):
fnames[key] = 'LO:_' + ":".join(set(loc_list)) + '_' + 'SI:__SA:__SP:__TY:_' + key + '_.' + fmt
if image_records:
for ftype, fname in fnames.items():
image_rec = {'file': fname, 'type': ftype, 'title': " ".join(set(loc_list)) + " " + ftype,
'timestamp': date.today().isoformat(), 'software_packages': version.version}
image_recs.append(image_rec)
if save:
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
if image_records:
return True, fnames.values(), image_recs
return True, fnames.values()
if interactive:
pmagplotlib.draw_figs(DSC)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
if image_records:
return True, fnames.values(), image_recs
return True, fnames.values()
else:
plt.figure(num=DSC['day'], figsize=(5, 5))
plt.figure(num=DSC['S-Bc'], figsize=(5, 5))
plt.figure(num=DSC['S-Bcr'], figsize=(5, 5))
#plt.figure(num=DSC['bcr1-bcr2'], figsize=(5, 5))
del DSC['bcr1-bcr2']
# do other plots instead
pmagplotlib.plot_day(DSC['day'], BcrBc, S, 'bs')
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr, S, 'bs')
pmagplotlib.plot_s_bc(DSC['S-Bc'], Bc, S, 'bs')
if pmagplotlib.isServer:
for key in list(DSC.keys()):
fnames[key] = 'LO:_' + ":".join(set(loc_list)) + '_' + 'SI:__SA:__SP:__TY:_' + key + '_.' + fmt
if image_records:
for ftype, fname in fnames.items():
image_rec = {'file': fname, 'type': PLOT_TYPES[ftype], 'title': " ".join(set(loc_list)) + " " + ftype,
'timestamp': date.today().isoformat(), 'software_packages': version.version}
image_recs.append(image_rec)
if save:
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
if image_records:
return True, fnames.values(), image_recs
return True, fnames.values()
elif interactive:
pmagplotlib.draw_figs(DSC)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
if image_records:
return True, fnames.values(), image_recs
return True, fnames.values()
if image_records:
return True, [], []
return True, []
[docs]
def smooth(x, window_len, window='bartlett'):
"""
Smooth the data using a sliding window with requested size - meant to be
used with the ipmag function curie().
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by padding the beginning and the end of the signal
with average of the first (last) ten values of the signal, to evoid jumps
at the beginning/end. Output is an array of the smoothed signal.
Required Parameters
----------
x : the input signal, equally spaced!
window_len : the dimension of the smoothing window
Optional Parameters (defaults are used if not specified)
----------
window : type of window from numpy library ['flat','hanning','hamming','bartlett','blackman']
(default is Bartlett)
-flat window will produce a moving average smoothing.
-Bartlett window is very similar to triangular window,
but always ends with zeros at points 1 and n.
-hanning,hamming,blackman are used for smoothing the Fourier transform
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
# numpy available windows
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError(
"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
# padding the beginning and the end of the signal with an average value to
# evoid edge effect
start = [np.average(x[0:10])] * window_len
end = [np.average(x[-10:])] * window_len
s = start + list(x) + end
# s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w/w.sum(), s, mode='same')
return np.array(y[window_len:-window_len])
[docs]
def curie(path_to_file='.', file_name='', magic=False,
window_length=3, save=False, save_folder='.', fmt='svg', t_begin="", t_end=""):
"""
Plots and interprets curie temperature data.
The 1st derivative is calculated from smoothed M-T curve (convolution
with triangular window with width= <-w> degrees)
The 2nd derivative is calculated from smoothed 1st derivative curve
(using the same sliding window width)
The estimated curie temperation is the maximum of the 2nd derivative.
Temperature steps should be in multiples of 1.0 degrees.
Parameters:
file_name : name of file to be opened
path_to_file : path to directory that contains file (default is current directory, '.')
window_length : dimension of smoothing window (input to smooth() function)
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is svg)
t_begin: start of truncated window for search (default is beginning of data)
t_end: end of truncated window for search (default is end of data)
magic : True if MagIC formatted measurements.txt file
Returns:
A plot is shown and saved if save=True.
"""
plot = 0
window_len = window_length
# read data from file
complete_path = os.path.join(path_to_file, file_name)
if magic:
data_df = pd.read_csv(complete_path, sep='\t', header=1)
T = data_df['meas_temp'].values-273
magn_key = cb.get_intensity_col(data_df)
M = data_df[magn_key].values
else:
Data = np.loadtxt(complete_path, dtype=np.float)
T = Data.transpose()[0]
M = Data.transpose()[1]
T = list(T)
M = list(M)
# cut the data if -t is one of the flags
if t_begin != "":
while T[0] < t_begin:
M.pop(0)
T.pop(0)
while T[-1] > t_end:
M.pop(-1)
T.pop(-1)
# prepare the signal:
# from M(T) array with unequal deltaT
# to M(T) array with deltaT=(1 degree).
# if delataT is larger, then points are added using linear fit between
# consecutive data points.
# exit if deltaT is not integer
i = 0
while i < (len(T) - 1):
if (T[i + 1] - T[i]) % 1 > 0.001:
print("delta T should be integer, this program will not work!")
print("temperature range:", T[i], T[i + 1])
sys.exit()
if (T[i + 1] - T[i]) == 0.:
M[i] = np.average([M[i], M[i + 1]])
M.pop(i + 1)
T.pop(i + 1)
elif (T[i + 1] - T[i]) < 0.:
M.pop(i + 1)
T.pop(i + 1)
print("check data in T=%.0f ,M[T] is ignored" % (T[i]))
elif (T[i + 1] - T[i]) > 1.:
slope, b = np.polyfit([T[i], T[i + 1]], [M[i], M[i + 1]], 1)
for j in range(int(T[i + 1]) - int(T[i]) - 1):
M.insert(i + 1, slope * (T[i] + 1.) + b)
T.insert(i + 1, (T[i] + 1.))
i = i + 1
i = i + 1
# calculate the smoothed signal
M = np.array(M, 'f')
T = np.array(T, 'f')
M_smooth = []
M_smooth = smooth(M, window_len)
# plot the original data and the smooth data
PLT = {'M_T': 1, 'der1': 2, 'der2': 3, 'Curie': 4}
plt.figure(num=PLT['M_T'], figsize=(5, 5))
string = 'M-T (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['M_T'], T, M_smooth, sym='-')
pmagplotlib.plot_xy(PLT['M_T'], T, M, sym='--',
xlab='Temperature C', ylab='Magnetization', title=string)
# calculate first derivative
d1, T_d1 = [], []
for i in range(len(M_smooth) - 1):
Dy = M_smooth[i - 1] - M_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d1.append(Dy/Dx)
T_d1 = T[1:len(T - 1)]
d1 = np.array(d1, 'f')
d1_smooth = smooth(d1, window_len)
# plot the first derivative
plt.figure(num=PLT['der1'], figsize=(5, 5))
string = '1st derivative (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['der1'], T_d1, d1_smooth,
sym='-', xlab='Temperature C', title=string)
pmagplotlib.plot_xy(PLT['der1'], T_d1, d1, sym='b--')
# calculate second derivative
d2, T_d2 = [], []
for i in range(len(d1_smooth) - 1):
Dy = d1_smooth[i - 1] - d1_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
# print Dy/Dx
d2.append(Dy/Dx)
T_d2 = T[2:len(T - 2)]
d2 = np.array(d2, 'f')
d2_smooth = smooth(d2, window_len)
# plot the second derivative
plt.figure(num=PLT['der2'], figsize=(5, 5))
string = '2nd derivative (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['der2'], T_d2, d2, sym='-',
xlab='Temperature C', title=string)
d2 = list(d2)
print('second derivative maximum is at T=%i' %
int(T_d2[d2.index(max(d2))]))
# calculate Curie temperature for different width of sliding windows
curie, curie_1 = [], []
wn = list(range(5, 50, 1))
for win in wn:
# calculate the smoothed signal
M_smooth = []
M_smooth = smooth(M, win)
# calculate first derivative
d1, T_d1 = [], []
for i in range(len(M_smooth) - 1):
Dy = M_smooth[i - 1] - M_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d1.append(Dy/Dx)
T_d1 = T[1:len(T - 1)]
d1 = np.array(d1, 'f')
d1_smooth = smooth(d1, win)
# calculate second derivative
d2, T_d2 = [], []
for i in range(len(d1_smooth) - 1):
Dy = d1_smooth[i - 1] - d1_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d2.append(Dy/Dx)
T_d2 = T[2:len(T - 2)]
d2 = np.array(d2, 'f')
d2_smooth = smooth(d2, win)
d2 = list(d2)
d2_smooth = list(d2_smooth)
curie.append(T_d2[d2.index(max(d2))])
curie_1.append(T_d2[d2_smooth.index(max(d2_smooth))])
# plot Curie temp for different sliding window length
plt.figure(num=PLT['Curie'], figsize=(5, 5))
pmagplotlib.plot_xy(PLT['Curie'], wn, curie, sym='.',
xlab='Sliding Window Width (degrees)', ylab='Curie Temp', title='Curie Statistics')
files = {}
for key in list(PLT.keys()):
files[key] = str(key) + '.' + fmt
if save:
for key in list(PLT.keys()):
try:
plt.figure(num=PLT[key])
plt.savefig(save_folder + '/' + files[key].replace('/', '-'))
except:
print('could not save: ', PLT[key], files[key])
print("output file format not supported ")
plt.show()
[docs]
def chi_magic2(path_to_file='.', file_name='magic_measurements.txt',
save=False, save_folder='.', fmt='svg'):
"""
Generates plots that compare susceptibility to temperature at different
frequencies.
Parameters:
(defaults are used if not specified)
path_to_file : path to directory that contains file (default is current directory, '.')
file_name : name of file to be opened (default is 'magic_measurements.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
"""
cont, FTinit, BTinit, k = "", 0, 0, 0
complete_path = os.path.join(path_to_file, file_name)
Tind, cont = 0, ""
EXP = ""
#
meas_data, file_type = pmag.magic_read(complete_path)
#
# get list of unique experiment names
#
# initialize some variables (a continuation flag, plot initialization
# flags and the experiment counter
experiment_names = []
for rec in meas_data:
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
#
# hunt through by experiment name
if EXP != "":
try:
k = experiment_names.index(EXP)
except:
print("Bad experiment name")
sys.exit()
while k < len(experiment_names):
e = experiment_names[k]
if EXP == "":
print(e, k + 1, 'out of ', len(experiment_names))
#
# initialize lists of data, susceptibility, temperature, frequency and
# field
X, T, F, B = [], [], [], []
for rec in meas_data:
methcodes = rec['magic_method_codes']
meths = methcodes.strip().split(':')
if rec['magic_experiment_name'] == e and "LP-X" in meths: # looking for chi measurement
if 'measurement_temp' not in list(rec.keys()):
rec['measurement_temp'] = '300' # set defaults
if 'measurement_freq' not in list(rec.keys()):
rec['measurement_freq'] = '0' # set defaults
if 'measurement_lab_field_ac' not in list(rec.keys()):
rec['measurement_lab_field_ac'] = '0' # set default
X.append(float(rec['measurement_x']))
T.append(float(rec['measurement_temp']))
F.append(float(rec['measurement_freq']))
B.append(float(rec['measurement_lab_field_ac']))
#
# get unique list of Ts,Fs, and Bs
#
Ts, Fs, Bs = [], [], []
for k in range(len(X)): # hunt through all the measurements
if T[k] not in Ts:
Ts.append(T[k]) # append if not in list
if F[k] not in Fs:
Fs.append(F[k])
if B[k] not in Bs:
Bs.append(B[k])
Ts.sort() # sort list of temperatures, frequencies and fields
Fs.sort()
Bs.sort()
if '-x' in sys.argv:
k = len(experiment_names) + 1 # just plot the one
else:
k += 1 # increment experiment number
#
# plot chi versus T and F holding B constant
#
plotnum = 1 # initialize plot number to 1
if len(X) > 2: # if there are any data to plot, continue
b = Bs[-1] # keeping field constant and at maximum
XTF = [] # initialize list of chi versus Temp and freq
for f in Fs: # step through frequencies sequentially
XT = [] # initialize list of chi versus temp
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTF.append(XT) # append list to list of frequencies
if len(XT) > 1: # if there are any temperature dependent data
plt.figure(num=plotnum, figsize=(5, 5)) # initialize plot
# call the plotting function
pmagplotlib.plot_xtf(plotnum, XTF, Fs, e, b)
pmagplotlib.show_fig(plotnum)
plotnum += 1 # increment plot number
f = Fs[0] # set frequency to minimum
XTB = [] # initialize list if chi versus Temp and field
for b in Bs: # step through field values
XT = [] # initial chi versus temp list for this field
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTB.append(XT)
if len(XT) > 1: # if there are any temperature dependent data
plt.figure(num=plotnum, figsize=(5, 5)) # set up plot
# call the plotting function
pmagplotlib.plot_xtb(plotnum, XTB, Bs, e, f)
pmagplotlib.show_fig(plotnum)
plotnum += 1 # increment plot number
if save:
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e + '_' + key + '.' + fmt
PLTS[key] = p
for key in list(PLTS.keys()):
try:
plt.figure(num=PLTS[key])
plt.savefig(save_folder + '/' +
files[key].replace('/', '-'))
except:
print('could not save: ', PLTS[key], files[key])
print("output file format not supported ")
[docs]
def demag_magic(path_to_file='.', file_name='magic_measurements.txt',
save=False, save_folder='.', fmt='svg', plot_by='loc',
treat=None, XLP="", individual=None, average_measurements=False,
single_plot=False):
'''
Takes demagnetization data (from magic_measurements file) and outputs
intensity plots (with optional save).
Parameters:
path_to_file : path to directory that contains files (default is current directory, '.')
file_name : name of measurements file (default is 'magic_measurements.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'svg')
plot_by : specifies what sampling level you wish to plot the data at
('loc' -- plots all samples of the same location on the same plot
'exp' -- plots all samples of the same expedition on the same plot
'site' -- plots all samples of the same site on the same plot
'sample' -- plots all measurements of the same sample on the same plot
'spc' -- plots each specimen individually)
treat : treatment step
'T' = thermal demagnetization
'AF' = alternating field demagnetization
'M' = microwave radiation demagnetization
(default is 'AF')
XLP : filter data by a particular method
individual : This function outputs all plots by default. If plotting by sample
or specimen, you may not wish to see (or wait for) every single plot. You can
therefore specify a particular plot by setting this keyword argument to
a string of the site/sample/specimen name.
average_measurements : Option to average demagnetization measurements by
the grouping specified with the 'plot_by' keyword argument (default is False)
single_plot : Option to output a single plot with all measurements (default is False)
'''
FIG = {} # plot dictionary
FIG['demag'] = 1 # demag is figure 1
in_file, plot_key, LT = os.path.join(
path_to_file, file_name), 'er_location_name', "LT-AF-Z"
XLP = ""
norm = 1
units, dmag_key = 'T', 'treatment_ac_field'
plot_num = 0
if plot_by == 'loc':
plot_key = 'er_location_name'
elif plot_by == 'exp':
plot_key = 'er_expedition_name'
elif plot_by == 'site':
plot_key = 'er_site_name'
elif plot_by == 'sam':
plot_key = 'er_sample_name'
elif plot_by == 'spc':
plot_key = 'er_specimen_name'
if treat != None:
LT = 'LT-' + treat + '-Z' # get lab treatment for plotting
if LT == 'LT-T-Z':
units, dmag_key = 'K', 'treatment_temp'
elif LT == 'LT-AF-Z':
units, dmag_key = 'T', 'treatment_ac_field'
elif LT == 'LT-M-Z':
units, dmag_key = 'J', 'treatment_mw_energy'
else:
units = 'U'
else:
LT = 'LT-AF-Z'
plot_dict = {}
data, file_type = pmag.magic_read(in_file)
sids = pmag.get_specs(data)
plt.figure(num=FIG['demag'], figsize=(5, 5))
print(len(data), ' records read from ', in_file)
#
#
# find desired intensity data
#
# get plotlist
#
plotlist, intlist = [], ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
IntMeths = []
FixData = []
for rec in data:
meths = []
methcodes = rec['magic_method_codes'].split(':')
for meth in methcodes:
meths.append(meth.strip())
for key in list(rec.keys()):
if key in intlist and rec[key] != "":
if key not in IntMeths:
IntMeths.append(key)
if rec[plot_key] not in plotlist and LT in meths:
plotlist.append(rec[plot_key])
if 'measurement_flag' not in list(rec.keys()):
rec['measurement_flag'] = 'g'
FixData.append(rec)
plotlist.sort()
if len(IntMeths) == 0:
print('No intensity information found')
data = FixData
# plot first intensity method found - normalized to initial value anyway -
# doesn't matter which used
int_key = IntMeths[0]
# print plotlist
if individual is not None:
if type(individual) == list or type(individual) == tuple:
plotlist = list(individual)
else:
plotlist = []
plotlist.append(individual)
for plot in plotlist:
print(plot, 'plotting by: ', plot_key)
# fish out all the data for this type of plot
PLTblock = pmag.get_dictitem(data, plot_key, plot, 'T')
# fish out all the dmag for this experiment type
PLTblock = pmag.get_dictitem(PLTblock, 'magic_method_codes', LT, 'has')
# get all with this intensity key non-blank
PLTblock = pmag.get_dictitem(PLTblock, int_key, '', 'F')
if XLP != "":
# reject data with XLP in method_code
PLTblock = pmag.get_dictitem(
PLTblock, 'magic_method_codes', XLP, 'not')
# for plot in plotlist:
if len(PLTblock) > 2:
title = PLTblock[0][plot_key]
spcs = []
for rec in PLTblock:
if rec['er_specimen_name'] not in spcs:
spcs.append(rec['er_specimen_name'])
if average_measurements is False:
for spc in spcs:
# plot specimen by specimen
SPCblock = pmag.get_dictitem(
PLTblock, 'er_specimen_name', spc, 'T')
INTblock = []
for rec in SPCblock:
INTblock.append([float(rec[dmag_key]), 0, 0, float(
rec[int_key]), 1, rec['measurement_flag']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(
FIG['demag'], INTblock, title, 0, units, norm)
else:
AVGblock = {}
for spc in spcs:
# plot specimen by specimen
SPCblock = pmag.get_dictitem(
PLTblock, 'er_specimen_name', spc, 'T')
for rec in SPCblock:
if rec['measurement_flag'] == 'g':
if float(rec[dmag_key]) not in list(AVGblock.keys()):
AVGblock[float(rec[dmag_key])] = [
float(rec[int_key])]
else:
AVGblock[float(rec[dmag_key])].append(
float(rec[int_key]))
INTblock = []
for step in sorted(AVGblock.keys()):
INTblock.append([float(step), 0, 0,
float(sum(AVGblock[step]))/float(len(AVGblock[step])), 1, 'g'])
pmagplotlib.plot_mag(FIG['demag'], INTblock,
title, 0, units, norm)
if save:
plt.savefig(os.path.join(save_folder, title) + '.' + fmt)
if single_plot is False:
plt.show()
if single_plot is True:
plt.show()
[docs]
def iplot_hys(fignum, B, M, s):
"""
function to plot hysteresis data
This function has been adapted from pmagplotlib.iplot_hys for specific use
within a Jupyter notebook.
Parameters:
fignum : reference number for matplotlib figure being created
B : list of B (flux density) values of hysteresis experiment
M : list of M (magnetization) values of hysteresis experiment
s : specimen name
"""
if fignum != 0:
plt.figure(num=fignum)
plt.clf()
hpars = {}
# close up loop
Npts = len(M)
B70 = 0.7 * B[0] # 70 percent of maximum field
for b in B:
if b < B70:
break
Nint = B.index(b) - 1
if Nint > 30:
Nint = 30
if Nint < 10:
Nint = 10
Bzero, Mzero, Mfix, Mnorm, Madj, MadjN = "", "", [], [], [], []
Mazero = ""
m_init = 0.5 * (M[0] + M[1])
m_fin = 0.5 * (M[-1] + M[-2])
diff = m_fin - m_init
Bmin = 0.
for k in range(Npts):
frac = float(k)/float(Npts - 1)
Mfix.append((M[k] - diff * frac))
if Bzero == "" and B[k] < 0:
Bzero = k
if B[k] < Bmin:
Bmin = B[k]
kmin = k
# adjust slope with first 30 data points (throwing out first 3)
Bslop = B[2:Nint + 2]
Mslop = Mfix[2:Nint + 2]
polyU = polyfit(Bslop, Mslop, 1) # best fit line to high field points
# adjust slope with first 30 points of ascending branch
Bslop = B[kmin:kmin + (Nint + 1)]
Mslop = Mfix[kmin:kmin + (Nint + 1)]
polyL = polyfit(Bslop, Mslop, 1) # best fit line to high field points
xhf = 0.5 * (polyU[0] + polyL[0]) # mean of two slopes
# convert B to A/m, high field slope in m^3
hpars['hysteresis_xhf'] = '%8.2e' % (xhf * 4 * np.pi * 1e-7)
meanint = 0.5 * (polyU[1] + polyL[1]) # mean of two intercepts
Msat = 0.5 * (polyU[1] - polyL[1]) # mean of saturation remanence
Moff = []
for k in range(Npts):
# take out linear slope and offset (makes symmetric about origin)
Moff.append((Mfix[k] - xhf * B[k] - meanint))
if Mzero == "" and Moff[k] < 0:
Mzero = k
if Mzero != "" and Mazero == "" and Moff[k] > 0:
Mazero = k
hpars['hysteresis_ms_moment'] = '%8.3e' % (Msat) # Ms in Am^2
#
# split into upper and lower loops for splining
Mupper, Bupper, Mlower, Blower = [], [], [], []
deltaM, Bdm = [], [] # diff between upper and lower curves at Bdm
for k in range(kmin - 2, 0, -2):
Mupper.append(Moff[k]/Msat)
Bupper.append(B[k])
for k in range(kmin + 2, len(B)-1):
Mlower.append(Moff[k] / Msat)
Blower.append(B[k])
Iupper = spline.Spline(Bupper, Mupper) # get splines for upper up and down
Ilower = spline.Spline(Blower, Mlower) # get splines for lower
for b in np.arange(B[0]): # get range of field values
Mpos = ((Iupper(b) - Ilower(b))) # evaluate on both sides of B
Mneg = ((Iupper(-b) - Ilower(-b)))
Bdm.append(b)
deltaM.append(0.5 * (Mpos + Mneg)) # take average delta M
print('whew')
for k in range(Npts):
MadjN.append(Moff[k]/Msat)
Mnorm.append(M[k]/Msat)
# find Mr : average of two spline fits evaluated at B=0 (times Msat)
Mr = Msat * 0.5 * (Iupper(0.) - Ilower(0.))
hpars['hysteresis_mr_moment'] = '%8.3e' % (Mr)
# find Bc (x intercept), interpolate between two bounding points
Bz = B[Mzero - 1:Mzero + 1]
Mz = Moff[Mzero - 1:Mzero + 1]
Baz = B[Mazero - 1:Mazero + 1]
Maz = Moff[Mazero - 1:Mazero + 1]
try:
poly = polyfit(Bz, Mz, 1) # best fit line through two bounding points
Bc = -poly[1]/poly[0] # x intercept
# best fit line through two bounding points
poly = polyfit(Baz, Maz, 1)
Bac = -poly[1]/poly[0] # x intercept
hpars['hysteresis_bc'] = '%8.3e' % (0.5 * (abs(Bc) + abs(Bac)))
except:
hpars['hysteresis_bc'] = '0'
return hpars, deltaM, Bdm, B, Mnorm, MadjN
[docs]
def hysteresis_magic2(path_to_file='.', hyst_file="rmag_hysteresis.txt",
save=False, save_folder='.',
fmt="svg", plots=True):
"""
Calculates hysteresis parameters, saves them in rmag_hysteresis format file.
If selected, this function also plots hysteresis loops, delta M curves,
d (Delta M)/dB curves, and IRM backfield curves.
Parameters:
path_to_file : path to directory that contains files (default is current directory, '.')
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
plots: whether or not to display the plots (default is true)
"""
user, meas_file, rmag_out, rmag_file = "", "agm_measurements.txt", "rmag_hysteresis.txt", ""
pltspec = ""
dir_path = save_folder
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
rmag_out = save_folder + '/' + rmag_out
meas_file = path_to_file + '/' + hyst_file
rmag_rem = save_folder + "/rmag_remanence.txt"
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(hysteresis_magic.__doc__)
print('bad file')
return
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
experiment_names, sids = [], []
for rec in meas_data:
meths = rec['magic_method_codes'].split(':')
methods = []
for meth in meths:
methods.append(meth.strip())
if 'LP-HYS' in methods:
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
if rec['er_specimen_name'] not in sids:
sids.append(rec['er_specimen_name'])
#
fignum = 1
sample_num = 0
# initialize variables to record some bulk info in first loop
first_dcd_rec, first_rec, first_imag_rec = 1, 1, 1
while sample_num < len(sids):
sample = sids[sample_num]
print(sample, sample_num + 1, 'out of ', len(sids))
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
for rec in meas_data:
methcodes = rec['magic_method_codes'].split(':')
meths = []
for meth in methcodes:
meths.append(meth.strip())
if rec['er_specimen_name'] == sample and "LP-HYS" in meths:
B.append(float(rec['measurement_lab_field_dc']))
M.append(float(rec['measurement_magn_moment']))
if first_rec == 1:
e = rec['magic_experiment_name']
HystRec = {}
first_rec = 0
if "er_location_name" in list(rec.keys()):
HystRec["er_location_name"] = rec["er_location_name"]
locname = rec['er_location_name'].replace('/', '-')
if "er_sample_name" in list(rec.keys()):
HystRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
HystRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
HystRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
HystRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == sample and "LP-IRM-DCD" in meths:
Bdcd.append(float(rec['treatment_dc_field']))
Mdcd.append(float(rec['measurement_magn_moment']))
if first_dcd_rec == 1:
RemRec = {}
irm_exp = rec['magic_experiment_name']
first_dcd_rec = 0
if "er_location_name" in list(rec.keys()):
RemRec["er_location_name"] = rec["er_location_name"]
if "er_sample_name" in list(rec.keys()):
RemRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
RemRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
RemRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
RemRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == sample and "LP-IMAG" in meths:
if first_imag_rec == 1:
imag_exp = rec['magic_experiment_name']
first_imag_rec = 0
Bimag.append(float(rec['measurement_lab_field_dc']))
Mimag.append(float(rec['measurement_magn_moment']))
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
# fignum = 1
fig = plt.figure(figsize=(8, 8))
hpars, deltaM, Bdm, B, Mnorm, MadjN = iplot_hys(1, B, M, sample)
ax1 = fig.add_subplot(2, 2, 1)
ax1.axhline(0, color='k')
ax1.axvline(0, color='k')
ax1.plot(B, Mnorm, 'r')
ax1.plot(B, MadjN, 'b')
ax1.set_xlabel('B (T)')
ax1.set_ylabel("M/Msat")
# ax1.set_title(sample)
ax1.set_xlim(-1, 1)
ax1.set_ylim(-1, 1)
bounds = ax1.axis()
n4 = 'Ms: ' + \
'%8.2e' % (float(hpars['hysteresis_ms_moment'])) + ' Am^2'
ax1.text(bounds[1] - .9 * bounds[1], -.9, n4, fontsize=9)
n1 = 'Mr: ' + \
'%8.2e' % (float(hpars['hysteresis_mr_moment'])) + ' Am^2'
ax1.text(bounds[1] - .9 * bounds[1], -.7, n1, fontsize=9)
n2 = 'Bc: ' + '%8.2e' % (float(hpars['hysteresis_bc'])) + ' T'
ax1.text(bounds[1] - .9 * bounds[1], -.5, n2, fontsize=9)
if 'hysteresis_xhf' in list(hpars.keys()):
n3 = r'Xhf: ' + \
'%8.2e' % (float(hpars['hysteresis_xhf'])) + ' m^3'
ax1.text(bounds[1] - .9 * bounds[1], -.3, n3, fontsize=9)
# plt.subplot(1,2,2)
# plt.subplot(1,3,3)
DdeltaM = []
Mhalf = ""
for k in range(2, len(Bdm)):
# differnential
DdeltaM.append(
abs(deltaM[k] - deltaM[k - 2])/(Bdm[k] - Bdm[k - 2]))
for k in range(len(deltaM)):
if deltaM[k]/deltaM[0] < 0.5:
Mhalf = k
break
try:
Bhf = Bdm[Mhalf - 1:Mhalf + 1]
Mhf = deltaM[Mhalf - 1:Mhalf + 1]
# best fit line through two bounding points
poly = polyfit(Bhf, Mhf, 1)
Bcr = (.5 * deltaM[0] - poly[1])/poly[0]
hpars['hysteresis_bcr'] = '%8.3e' % (Bcr)
hpars['magic_method_codes'] = "LP-BCR-HDM"
if HDD['deltaM'] != 0:
ax2 = fig.add_subplot(2, 2, 2)
ax2.plot(Bdm, deltaM, 'b')
ax2.set_xlabel('B (T)')
ax2.set_ylabel('Delta M')
linex = [0, Bcr, Bcr]
liney = [deltaM[0]/2.0, deltaM[0]/2.0, 0]
ax2.plot(linex, liney, 'r')
# ax2.set_title(sample)
ax3 = fig.add_subplot(2, 2, 3)
ax3.plot(Bdm[(len(Bdm) - len(DdeltaM)):], DdeltaM, 'b')
ax3.set_xlabel('B (T)')
ax3.set_ylabel('d (Delta M)/dB')
# ax3.set_title(sample)
ax4 = fig.add_subplot(2, 2, 4)
ax4.plot(Bdcd, Mdcd)
ax4.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax4.axhline(0, color='k')
ax4.axvline(0, color='k')
ax4.set_xlabel('B (T)')
ax4.set_ylabel('M/Mr')
except:
print("not doing it")
hpars['hysteresis_bcr'] = '0'
hpars['magic_method_codes'] = ""
plt.gcf()
plt.gca()
plt.tight_layout()
if save:
plt.savefig(save_folder + '/' + sample + '_hysteresis.' + fmt)
plt.show()
sample_num += 1
[docs]
def find_ei(data, nb=1000, save=False, save_folder='.', fmt='svg',
site_correction=False, return_new_dirs=False, figprefix='EI',
return_values=False, num_resample_to_plot=1000, data_color='k', EI_color='r', resample_EI_color='grey', resample_EI_alpha=0.05, tight_axes=False):
"""
Applies series of assumed flattening factors and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03;
or, if correcting by site instead for study-level secular variation,
finds flattening factor that minimizes elongation and most resembles a
Fisherian distribution.
Finds bootstrap confidence bounds
Parameters:
data: a nested list of dec/inc pairs
nb: number of bootstrapped pseudo-samples (default is 1000)
save: Boolean argument to save plots (default is False)
save_folder: path to folder in which plots should be saved (default is current directory)
fmt: specify format of saved plots (default is 'svg')
figfile : name of saved file plus format string
site_correction: Boolean argument to specify whether to "unsquish" data to
1) the elongation/inclination pair consistent with TK03 secular variation model
(site_correction = False)
or
2) a Fisherian distribution (site_correction = True). Default is FALSE.
Note that many directions (~ 100) are needed for this correction to be reliable.
return_new_dirs: optional return of newly "unflattened" directions as di_block (default is False)
return_values: optional return of all bootstrap result inclinations, elongations, and
f factors (default is False)
if both return_new_dirs=True and return_values=True, the function will return:
di_block of new directions, inclinations, elongations,and f factors
num_resample_to_plot: number of bootstrap resample elongation/inclination curves to plot (default to to plot all)
data_color: the color of the direction equal area plot data (default is black)
EI_color: the color of the EI curve associated with the most frequent f value (rounded to 2 decimal points, default is red)
resample_EI_color: the color of the EI curves for all f values except for the most frequent f (default is grey)
resample_EI_alpha: the transparency of the EI curves for all f values except for the most frequent f (default is grey)
tight_axes: optional argument to tighten up the axes limits for the inclination-elongation figure
Returns:
- equal area plot of original directions
- Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
- Cumulative distribution of bootstrapped optimal inclinations plus uncertainties.
Estimate from original data set plotted as solid line
- Orientation of principle direction through unflattening
NOTE:
If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot.
"""
print("Bootstrapping.... be patient")
print("")
sys.stdout.flush()
upper, lower = int(round(.975 * nb)), int(round(.025 * nb))
E, I = [], []
plt.figure(num=1, figsize=(4, 4))
plot_net(1)
plot_di(di_block=data, color=data_color)
plt.title('Original')
ppars = pmag.doprinc(data)
Io = ppars['inc']
n = ppars["N"]
Es, Is, Fs, V2s = pmag.find_f(data)
if site_correction:
Inc, Elong = Is[Es.index(min(Es))], Es[Es.index(min(Es))]
flat_f = Fs[Es.index(min(Es))]
else:
Inc, Elong = Is[-1], Es[-1]
flat_f = Fs[-1]
plt.figure(num=2, figsize=(4, 4))
plt.plot(Is, Es, EI_color, zorder = nb+1, lw=3)
plt.xlabel(r"inclination ($^\circ$)", fontsize=12)
plt.ylabel("elongation", fontsize=12)
plt.ylim(.9,5)
plt.text(Inc, Elong, ' %4.2f' % (flat_f), fontsize=12)
# plt.text(Is[0] - 2, Es[0], ' %s' % ('f=1'), fontsize=12)
b = 0
while b < nb:
bdata = pmag.pseudo(data)
Esb, Isb, Fsb, V2sb = pmag.find_f(bdata)
if b < num_resample_to_plot:
plt.plot(Isb, Esb, resample_EI_color, alpha=resample_EI_alpha)
if Esb[-1] != 0:
ppars = pmag.doprinc(bdata)
if site_correction:
I.append(abs(Isb[Esb.index(min(Esb))]))
E.append(Esb[Esb.index(min(Esb))])
else:
I.append(abs(Isb[-1]))
E.append(Esb[-1])
b += 1
I.sort()
E.sort()
if tight_axes:
plt.xlim(min(I)*0.8, max(I)*1.05)
plt.ylim(min(E)*0.5, max(E)*1.05)
Eexp = []
for i in I:
Eexp.append(pmag.EI(i))
plt.plot(I, Eexp, 'k')
if Inc == 0:
title = 'Pathological Distribution: ' + \
'[%7.1f, %7.1f]' % (I[lower], I[upper])
title = 'Pathological Distribution: '
else:
title = '%7.1f [%7.1f, %7.1f]' % (Inc, I[lower], I[upper])
if save:
plt.savefig(save_folder+'/'+figprefix+'_EI_bootstraps'+'.'+fmt, bbox_inches='tight', dpi=300)
cdf_fig_num = 3
plt.figure(num=cdf_fig_num, figsize=(4, 4))
pmagplotlib.plot_cdf(cdf_fig_num, I, r'inclination ($^\circ$)', 'r', title)
pmagplotlib.plot_vs(cdf_fig_num, [I[lower], I[upper]], 'b', '--')
pmagplotlib.plot_vs(cdf_fig_num, [Inc], 'g', '-')
pmagplotlib.plot_vs(cdf_fig_num, [Io], 'k', '-')
if save:
plt.savefig(save_folder+'/'+figprefix+'_inc_CDF'+'.'+fmt, bbox_inches='tight', dpi=300)
# plot corrected directional data
di_lists = unpack_di_block(data)
if len(di_lists) == 3:
decs, incs, intensity = di_lists
if len(di_lists) == 2:
decs, incs = di_lists
if flat_f:
unsquished_incs = unsquish(incs, flat_f)
plt.figure(num=4, figsize=(4, 4))
plot_net(4)
plot_di(decs, unsquished_incs, color=data_color)
plt.title('Corrected for flattening')
else:
plt.figure(num=4, figsize=(4, 4))
plot_net(4)
plot_di(decs, incs, color=data_color)
plt.title('Corrected for flattening')
if save:
plt.savefig(save_folder+'/'+figprefix+'_corrected_directions'+'.'+fmt, bbox_inches='tight', dpi=300)
if (Inc, Elong, flat_f) == (0, 0, 0):
print("PATHOLOGICAL DISTRIBUTION")
print("The original inclination was: " + str(np.round(Io,2)))
print("")
print("The corrected inclination is: " + str(np.round(Inc,2)))
print("with bootstrapped confidence bounds of: " +
str(np.round(I[lower],2)) + ' to ' + str(np.round(I[upper],2)))
print("and elongation parameter of: " + str(np.round(Elong,2)))
print("The flattening factor is: " + str(np.round(flat_f,2)))
f_lower = np.tan(np.deg2rad(Io))/np.tan(np.deg2rad(I[lower]))
f_upper = np.tan(np.deg2rad(Io))/np.tan(np.deg2rad(I[upper]))
print("with bootstrapped confidence bounds of: " +
str(np.round(f_lower,2)) + ' to ' + str(np.round(f_upper,2)))
F = np.tan(np.deg2rad(Io))/np.tan(np.deg2rad(I))
if return_new_dirs and return_values :
return make_di_block(decs, unsquished_incs), I, E, F
elif return_new_dirs:
return make_di_block(decs, unsquished_incs)
elif return_values:
return flat_f, I, E, F
else:
return
[docs]
def find_ei_kent(data, site_latitude, site_longitude, kent_color='k', nb=1000, save=False, save_folder='.', fmt='svg',
return_new_dirs=False, return_values=False, figprefix='EI',
num_resample_to_plot=1000, EI_color='r', resample_EI_color='grey', resample_EI_alpha=0.05,
vgp_nb=100, cmap='viridis_r', central_longitude=0, central_latitude=0):
"""
Applies series of assumed flattening factor and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03
Finds bootstrap confidence bounds
Based on all flattening factors from the E/I bootstrap results find the distribution of paleolatitudes and fit with a normal distribution
Based on all flattening factors from the E/I bootstrap results calculate the correspondant VGP pole positions and their mean poles associated with each factor
Perform Monte Carlo resample of the mean poles associated with each flattening factor
Finds the Kent distribution statistics: mean, major and minor axes and their associated angles of dispersion.
Parameters:
data: a nested list of dec/inc pairs
site_latitude, site_longitude: location of the paleomagnetic site
kent_color : color of the Kent ellipse to plot (default is black)
nb : number of bootstrapped pseudo-samples (default is 1000)
save : Boolean argument to save plots (default is False)
save_folder : path to folder in which plots should be saved (default is current directory)
fmt : specify format of saved plots (default is 'svg')
return_new_dirs : optional return of newly "unflattened" directions as di_block (default is False)
return_values : optional return of all bootstrap result inclinations, elongations, and
f factors (default is False)
if both return_new_dirs=True and return_values=True, the function will return
di_block of new directions, inclinations, elongations,and f factors
figprefix : prefix string for the name of the figures to be saved
EI_color : color of the most elongation/inclination curve corresponding to the most frequent f value
resample_EI_color : color of all other elongation/inclination curves
vgp_nb : number of virtual geomagnetic poles to resample for each iteration, the total VGPs resampled will be vgp_nb*nb
cmap : matplotlib color map used for plotting corrected paleomagnetic directions
central_longitude, central_latitude: central point of pole projection (defaults are 0)
num_resample_to_plot : number of bootstrap resample elongation/inclination curves to plot (default to to plot all)
vgp_nb : number of vgp to resample using a Monte Carlo approach with each f factor
cmap : matplotlib color map for color-coding the directions and mean poles based on the f factor
EI_color: the color of the EI curve associated with the most frequent f value (rounded to 2 decimal points, default is red)
resample_EI_color: the color of the EI curves for all f values except for the most frequent f (default is grey)
resample_EI_alpha: the transparency of the EI curves for all f values except for the most frequent f (default is grey)
Returns:
four plots :
1) equal area plot of original directions
2) Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
3) Cumulative distribution of bootstrapped optimal inclinations plus uncertainties. Estimate from original data set plotted as solid line
4) Orientation of principle direction through unflattening
NOTE:
If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot.
"""
print("Bootstrapping.... be patient")
print("")
sys.stdout.flush()
upper, lower = int(round(.975 * nb)), int(round(.025 * nb))
E, I = [], []
ppars = pmag.doprinc(data)
Io = ppars['inc']
n = ppars["N"]
Es, Is, Fs, V2s = pmag.find_f(data)
Inc, Elong = Is[-1], Es[-1]
flat_f = Fs[-1]
# plot E/I figure
plt.figure(num=1, figsize=(4, 4))
plt.plot(Is, Es, EI_color, zorder = nb+1, lw=3)
plt.xlabel(r"inclination ($^\circ$)", fontsize=12)
plt.ylabel("elongation", fontsize=12)
plt.text(Inc, Elong, ' %4.2f' % (flat_f), fontsize=12)
b = 0
while b < nb:
bdata = pmag.pseudo(data)
Esb, Isb, Fsb, V2sb = pmag.find_f(bdata)
if b < num_resample_to_plot:
plt.plot(Isb, Esb, resample_EI_color, alpha=resample_EI_alpha)
if Esb[-1] != 0:
ppars = pmag.doprinc(bdata)
I.append(abs(Isb[-1]))
E.append(Esb[-1])
b += 1
I.sort()
E.sort()
plt.xlim(min(I)*0.8, max(I)*1.05)
plt.ylim(min(E)*0.5, max(E)*1.05)
Eexp = []
for i in I:
Eexp.append(pmag.EI(i))
plt.plot(I, Eexp, 'k')
if Inc == 0:
title = 'Pathological Distribution: ' + \
'[%7.1f, %7.1f]' % (I[lower], I[upper])
else:
title = '%7.1f [%7.1f, %7.1f]' % (Inc, I[lower], I[upper])
if save:
plt.savefig(save_folder+'/'+figprefix+'_bootstraps'+'.'+fmt, bbox_inches='tight', dpi=300)
plt.figure(figsize=(4, 4))
pmagplotlib.plot_cdf(2, I, r'inclination ($^\circ$)', 'r', title)
pmagplotlib.plot_vs(2, [I[lower], I[upper]], 'b', '--')
pmagplotlib.plot_vs(2, [Inc], 'g', '-')
pmagplotlib.plot_vs(2, [Io], 'k', '-')
if save:
plt.savefig(save_folder+'/'+figprefix+'_inc_CDF'+'.'+fmt, bbox_inches='tight', dpi=300)
# plot directional data corrected by all f
F = np.tan(np.deg2rad(Io))/np.tan(np.deg2rad(I))
plt.figure(figsize=(4,4))
plot_net()
cNorm = colors.Normalize(vmin=min(F), vmax=max(F))
f_scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmap)
di_lists = unpack_di_block(data)
if len(di_lists) == 3:
decs, incs, intensity = di_lists
if len(di_lists) == 2:
decs, incs = di_lists
mean_lons = []
mean_lats = []
for f in F:
unsquish_incs = unsquish(incs, f)
unsquish_VGPs = pmag.dia_vgp(np.array([decs, unsquish_incs, np.zeros(len(decs)), np.full(len(decs), site_latitude), np.full(len(decs),site_longitude)]).T)
unsquish_lons, unsquish_lats = unsquish_VGPs[0], unsquish_VGPs[1]
unsquish_VGPs_mean = fisher_mean(unsquish_lons, unsquish_lats)
resampled_lons, resampled_lats = fisher_mean_resample(alpha95=unsquish_VGPs_mean['alpha95'], n=vgp_nb,
dec=unsquish_VGPs_mean['dec'], inc=unsquish_VGPs_mean['inc'], di_block=0)
mean_lons.extend(resampled_lons)
mean_lats.extend(resampled_lats)
rgba = f_scalarMap.to_rgba(f)
hex_color = colors.rgb2hex(rgba)
plot_di(decs, unsquish_incs, color = hex_color, alpha=0.02)
cb = plt.colorbar(f_scalarMap,orientation='horizontal',fraction=0.05, pad=0.05)
cb.ax.tick_params(labelsize=14)
cb.ax.set_title(label='$f$ values', fontsize=14)
if save:
plt.savefig(save_folder+'/'+figprefix+'_corrected_directions'+'.'+fmt, bbox_inches='tight', dpi=300)
# plot paleolatitudes distribution
EI_plats = np.degrees(np.arctan(np.tan(np.radians(I))/2))
plat_mode = stats.mode(np.round(EI_plats, 1))[0][0]
plat_lower, plat_upper = np.round(np.percentile(EI_plats, [2.5, 97.5]), 1)
mu, std = stats.norm.fit(EI_plats)
x = np.linspace(min(EI_plats), max(EI_plats), 100)
p = stats.norm.pdf(x, mu, std)
plt.figure(figsize=(4, 4))
plt.hist(EI_plats, bins=15, alpha=0.6, density=1)
plt.plot(x, p, 'k', linewidth=1)
plt.axvline(x=plat_lower, color = 'gray', ls='--')
plt.axvline(x=plat_upper, color = 'gray', ls='--')
plt.title('%7.1f [%7.1f, %7.1f]' % (plat_mode, plat_lower, plat_upper) + '\nFit result: mu='+str(round(mu,2))+'\nstd='+str(round(std, 2)), fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'paleolatitude ($^\circ$)', fontsize=16)
plt.ylabel('density', fontsize=16)
if save:
plt.savefig(save_folder+'/'+figprefix+'_paleolatitudes'+'.'+fmt, bbox_inches='tight', dpi=300)
plt.show()
# plot resampled mean poles
m = make_orthographic_map(central_longitude, central_latitude)
plot_vgp(m, mean_lons, mean_lats, color='lightgrey', edge='none', markersize=5, alpha=0.02)
kent_stats = kent_distribution_95(dec=mean_lons,inc=mean_lats)
plot_pole_ellipse(m,kent_stats, color=kent_color)
plt.show()
if (Inc, Elong, flat_f) == (0, 0, 0):
print("PATHOLOGICAL DISTRIBUTION")
print("The original inclination was: " + str(np.round(Io,2)))
print("")
print("The corrected inclination is: " + str(np.round(Inc,2)))
print("with bootstrapped confidence bounds of: " +
str(np.round(I[lower],2)) + ' to ' + str(np.round(I[upper],2)))
print("and elongation parameter of: " + str(np.round(Elong,2)))
print("The flattening factor is: " + str(np.round(flat_f,2)))
f_lower = np.tan(np.deg2rad(Io))/np.tan(np.deg2rad(I[lower]))
f_upper = np.tan(np.deg2rad(Io))/np.tan(np.deg2rad(I[upper]))
print("with bootstrapped confidence bounds of: " +
str(np.round(f_lower,2)) + ' to ' + str(np.round(f_upper,2)))
print("")
print('The Kent mean incorporating inclination flattening uncertainty is:')
print_kent_mean(kent_stats)
if return_new_dirs and return_values :
unsquished_incs = unsquish(incs, flat_f)
return make_di_block(decs, unsquished_incs), kent_stats, I, E, F
elif return_new_dirs:
unsquished_incs = unsquish(incs, flat_f)
return make_di_block(decs, unsquished_incs)
elif return_values:
return kent_stats, I, E, F
else:
return kent_stats
[docs]
def find_compilation_kent(plon, plat, A95, slon, slat,
f_from_compilation=None, n=10000, n_fish=100,
return_poles=False, return_kent_stats=True,
return_paleolats=False, map_central_longitude=0,
map_central_latitude=0):
"""
Applies flattening factors from the compilation to sedimentary paleomagnetic pole where only
pole longitude, pole latitude, A95, site longitude, and site latitude are available.
First, calculate the paleomagnetic direction at the site of the mean pole using plon, plat via
pmag.vgp_di. Then draw n resamples from the compiled f values in the compilation. The default
compilation of Pierce et al., 2022 can be used or the user can provide their own compilation.
Unsquish the directions with the resampled f factors, then convert the mean directions back to pole
space. Making the simplifying assumption that A95 is the same as the directions are unflattened.
Resample n_fish mean poles from the Fisher distribution given the unsquished plon, plat, and A95.
This will result in a total of n*n_fish number of resampled mean poles. Summarize the distribution
of the mean poles using a Kent distribution.
Parameters:
plon: legacy mean pole longitude
plat: legacy mean pole latitude
A95: legacy mean pole A95
slon: site longitude
slat: site latitude
f_from_compilation: list of f factors (default is None in which case the compilation of
Pierce et al., 2022 Table S1 will be used)
n: number of resamples from compilation (default is 10000)
n_fish: number of resamples from each Fisher mean pole position (default is 100)
return_poles: whether or not to return the resampled mean pole positions (default is False)
return_kent_stats: whether or not to return the calculated Kent distribution statistics of
the resampled mean poles (default is True)
return_paleolats: whether or not to return the computed compilation paleolatitudes (default is
False)
map_central_longitude: central longitude for the orthographic map (default is 0)
map_central_latitude: central latitude for the orthographic map (default is 0)
Returns:
Depending on the combination of boolean flags provided, returns one or more of:
- compilation_mean_lons, compilation_mean_lats: resampled mean pole positions
- f_compilation_kent_distribution_95: Kent distribution statistics
- compilation_paleolats: computed compilation paleolatitudes
"""
# get the uncorrected declination and inclination from a given paleomagnetic pole
original_dec, original_inc = pmag.vgp_di(plat, plon, slat, slon)
if f_from_compilation is None:
# compilation f value list
f_from_compilation = pd.Series(
[
0.49, 0.77, 0.63, 0.59, 0.57, 0.4, 0.63, 0.66, 0.63, 0.49, 0.49,
0.58, 0.54, 0.73, 0.97, 0.59, 0.84, 0.9, 0.78, 0.83, 0.58, 0.94,
0.78, 0.9, 0.68, 0.48, 0.67, 0.66, 0.7, 0.43, 0.45, 0.58, 0.58,
0.53, 0.42, 0.51, 0.61, 0.52, 0.62, 0.73, 0.66, 0.55, 0.47, 0.77,
0.62, 0.54, 0.46, 0.56, 0.64, 0.47, 0.48, 0.44, 0.52, 0.65, 0.81,
0.64, 0.71, 0.79, 0.65, 0.56, 0.69, 0.43, 0.7, 0.67, 0.65, 0.49,
0.54, 0.64, 0.83, 0.68
]
)
else:
f_from_compilation = pd.Series(f_from_compilation)
f_resample = f_from_compilation.sample(n=n, replace=True).tolist()
plt.figure(figsize=(6, 6))
plt.hist(f_resample, alpha=0.6, density=1)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel("resample of compilation f factors", fontsize=16)
plt.ylabel("density", fontsize=16)
# calculate corrected inclinations
compilation_incs = [pmag.unsquish(original_inc, f) for f in f_resample]
# calculate corrected paleolatitudes
compilation_paleolats = np.degrees(np.arctan(np.tan(np.radians(compilation_incs)) / 2))
plt.figure(figsize=(6, 6))
plt.hist(compilation_paleolats, alpha=0.6, density=1)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r"compilation paleolatitudes ($^\circ$)", fontsize=16)
plt.ylabel("density", fontsize=16)
compilation_mean_lons = []
compilation_mean_lats = []
for i in range(len(f_from_compilation)):
unsquish_plon, unsquish_plats, _, _ = pmag.dia_vgp(
original_dec, compilation_incs[i], A95, slat, slon)
resampled_lons, resampled_lats = fisher_mean_resample(
alpha95=A95,
n=n_fish,
dec=unsquish_plon,
inc=unsquish_plats,
di_block=0)
compilation_mean_lons.extend(resampled_lons)
compilation_mean_lats.extend(resampled_lats)
m = make_orthographic_map(map_central_longitude, map_central_latitude)
plot_vgp(
m,
compilation_mean_lons,
compilation_mean_lats,
color="lightgrey",
edge="none",
markersize=5,
alpha=0.002)
f_compilation_kent_distribution_95 = kent_distribution_95(
dec=compilation_mean_lons, inc=compilation_mean_lats)
print_kent_mean(f_compilation_kent_distribution_95)
plot_pole_ellipse(
m, f_compilation_kent_distribution_95, color="darkred", label="Kent mean pole")
plot_pole(
m, plon, plat, A95, label="uncorrected pole position", color="C0")
plt.legend(loc=8, fontsize=14)
results = []
if return_poles:
results.extend([compilation_mean_lons, compilation_mean_lats])
if return_kent_stats:
results.append(f_compilation_kent_distribution_95)
if return_paleolats:
results.append(compilation_paleolats)
if len(results) == 1:
return results[0]
return tuple(results)
[docs]
def pole_comparison_H2019(lon_1,lat_1,k_1,r_1,lon_2,lat_2,k_2,r_2):
'''
Calculate the Bhattacharyya Coefficient, Bayes error and the
Kullback-Leibler divergence associated with the comparison of
paleomagnetic poles following Heslop and Roberts (2019). The divergence
parameter is asymmetric such that the pole that is the reference pole
should be (lon_1, lat_1, k_1, r_1) and the pole of interest being compared
to that reference pole should be (lon_2, lat_2, k_2, r_2).
Parameters:
lon_1 : longitude of pole 1 (reference pole)
lat_1 : latitude of pole 1
k_1 : Fisher concentration parameter of pole 1
r_1 : resultant vector length of pole 1
lon_2 : longitude of pole 2 (pole of interest)
lat_2: latitude of pole 2
k_2 : Fisher concentration parameter of pole 2
r_2 : resultant vector length of pole 2
Returns:
- bhattacharyya, Bhattacharyya coefficient
- bayes, bayes error
- kld, Kullback-Leibler divergence
Notes:
This function utilizes code developed by D. Heslop
https://github.com/dave-heslop74/kld
https://github.com/dave-heslop74/bhattacharyya
'''
I1 = np.deg2rad(lat_1)
D1 = np.deg2rad(lon_1)
I2 = np.deg2rad(lat_2)
D2 = np.deg2rad(lon_2)
mu1 = np.column_stack((np.cos(D1)*np.cos(I1),np.sin(D1)*np.cos(I1),np.sin(I1)))
mu2 = np.column_stack((np.cos(D2)*np.cos(I2),np.sin(D2)*np.cos(I2),np.sin(I2)))
def log_sinh(k):
if k>700:
s=k-np.log(2.0)
else:
s=np.log(np.sinh(k))
return s
def chernoff_H2019(a,MU1,K1,MU2,K2):
K12=np.linalg.norm(a*MU1*K1+(1-a)*MU2*K2)
JF=a*(log_sinh(K1)-np.log(K1))+(1-a)*(log_sinh(K2)-np.log(K2))-(log_sinh(K12)-np.log(K12))
return np.exp(-JF)
bhattacharyya = chernoff_H2019(0.5,mu1,k_1*r_1,mu2,k_2*r_2)
alpha0 = fminbound(lambda alpha: chernoff_H2019(alpha,mu1,k_1*r_1,mu2,k_2*r_2),0,1)
bayes = chernoff_H2019(alpha0,mu1,k_1*r_1,mu2,k_2*r_2)/2
#Calculate the Kullback-Leibler divergence
K1 = k_1*r_1
K2 = k_2*r_2
term1 = np.log(K1)+log_sinh(K2)-np.log(K2)-log_sinh(K1)
term2 = 1.0 / np.tanh(K1) - 1.0 / K1
term3 = np.dot(mu2,(K2*mu1 - K1*mu2).T)
kld = float(np.squeeze(term1-term2*term3))
return bhattacharyya, bayes, kld
[docs]
def plate_rate_mc(pole1_plon, pole1_plat, pole1_kappa, pole1_N, pole1_age, pole1_age_error,
pole2_plon, pole2_plat, pole2_kappa, pole2_N, pole2_age, pole2_age_error,
ref_loc_lon, ref_loc_lat, samplesize=10000, random_seed=None, plot=True,
savefig=True, save_directory='./', figure_name=''):
"""
Determine the latitudinal motion implied by a pair of poles and utilize
the Monte Carlo sampling method of Swanson-Hysell (2014) to determine the
associated uncertainty.
Parameters:=
plon : longitude of pole
plat : latitude of pole
kappa : Fisher precision parameter for VPGs in pole
N : number of VGPs in pole
age : age assigned to pole in Ma
age_error : 1 sigma age uncertainty in million years
ref_loc_lon : longitude of reference location
ref_loc_lat : latitude of reference location
samplesize : number of draws from pole and age distributions (default set to 10000)
random_seed : set random seed for reproducible number generation (default is None)
plot : whether to make figures (default is True, optional)
savefig : whether to save figures (default is True, optional)
save_directory = default is local directory (optional)
figure_name = prefix for file names (optional)
Returns:
rate of latitudinal motion in cm/yr along with estimated 2.5 and 97.5
percentile rate estimates
"""
ref_loc = [ref_loc_lon, ref_loc_lat]
pole1 = (pole1_plon, pole1_plat)
pole1_paleolat = 90 - pmag.angle(pole1, ref_loc)
pole2 = (pole2_plon, pole2_plat)
pole2_paleolat = 90 - pmag.angle(pole2, ref_loc)
print("The paleolatitude for ref_loc resulting from pole 1 is:" +
str(pole1_paleolat))
print("The paleolatitude for ref_loc resulting from pole 2 is:" +
str(pole2_paleolat))
rate = ((pole1_paleolat - pole2_paleolat) * 111 *
100000)/((pole1_age - pole2_age) * 1000000)
print("The rate of paleolatitudinal change implied by the poles pairs in cm/yr is:" + str(rate))
if random_seed != None:
np.random.seed(random_seed)
pole1_MCages = np.random.normal(pole1_age, pole1_age_error, samplesize)
pole2_MCages = np.random.normal(pole2_age, pole2_age_error, samplesize)
plt.hist(pole1_MCages, 100, histtype='stepfilled',
color='darkred', label='Pole 1 ages')
plt.hist(pole2_MCages, 100, histtype='stepfilled',
color='darkblue', label='Pole 2 ages')
plt.xlabel('Age (Ma)')
plt.ylabel('n')
plt.legend(loc=3)
if savefig:
plot_extension = '_1.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
pole1_MCpoles = []
pole1_MCpole_lat = []
pole1_MCpole_long = []
pole1_MCpaleolat = []
for n in range(samplesize):
vgp_samples = []
for vgp in range(pole1_N):
# pmag.dev returns a direction from a fisher distribution with
# specified kappa
direction_atN = pmag.fshdev(pole1_kappa)
# this direction is centered at latitude of 90 degrees and needs to be rotated
# to be centered on the mean pole position
tilt_direction = pole1_plon
tilt_amount = 90 - pole1_plat
direction = pmag.dotilt(
direction_atN[0], direction_atN[1], tilt_direction, tilt_amount)
vgp_samples.append([direction[0], direction[1], 1.])
mean = pmag.fisher_mean(vgp_samples)
mean_pole_position = (mean['dec'], mean['inc'])
pole1_MCpoles.append([mean['dec'], mean['inc'], 1.])
pole1_MCpole_lat.append(mean['inc'])
pole1_MCpole_long.append(mean['dec'])
paleolat = 90 - pmag.angle(mean_pole_position, ref_loc)
pole1_MCpaleolat.append(paleolat[0])
pole2_MCpoles = []
pole2_MCpole_lat = []
pole2_MCpole_long = []
pole2_MCpaleolat = []
for n in range(samplesize):
vgp_samples = []
for vgp in range(pole2_N):
# pmag.dev returns a direction from a fisher distribution with
# specified kappa
direction_atN = pmag.fshdev(pole2_kappa)
# this direction is centered at latitude of 90 degrees and needs to be rotated
# to be centered on the mean pole position
tilt_direction = pole2_plon
tilt_amount = 90 - pole2_plat
direction = pmag.dotilt(
direction_atN[0], direction_atN[1], tilt_direction, tilt_amount)
vgp_samples.append([direction[0], direction[1], 1.])
mean = pmag.fisher_mean(vgp_samples)
mean_pole_position = (mean['dec'], mean['inc'])
pole2_MCpoles.append([mean['dec'], mean['inc'], 1.])
pole2_MCpole_lat.append(mean['inc'])
pole2_MCpole_long.append(mean['dec'])
paleolat = 90 - pmag.angle(mean_pole_position, ref_loc)
pole2_MCpaleolat.append(paleolat[0])
if plot is True:
plt.figure(figsize=(5, 5))
map_axis = make_mollweide_map()
plot_vgp(map_axis, pole1_MCpole_long, pole1_MCpole_lat, color='b')
plot_vgp(map_axis, pole2_MCpole_long, pole2_MCpole_lat, color='g')
if savefig:
plot_extension = '_2.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
# calculating the change in paleolatitude between the Monte Carlo pairs
pole1_pole2_Delta_degrees = []
pole1_pole2_Delta_kilometers = []
pole1_pole2_Delta_myr = []
pole1_pole2_degrees_per_myr = []
pole1_pole2_cm_per_yr = []
for n in range(samplesize):
Delta_degrees = pole1_MCpaleolat[n] - pole2_MCpaleolat[n]
Delta_Myr = pole1_MCages[n] - pole2_MCages[n]
pole1_pole2_Delta_degrees.append(Delta_degrees)
degrees_per_myr = Delta_degrees/Delta_Myr
cm_per_yr = ((Delta_degrees * 111) * 100000)/(Delta_Myr * 1000000)
pole1_pole2_degrees_per_myr.append(degrees_per_myr)
pole1_pole2_cm_per_yr.append(cm_per_yr)
if plot is True:
plotnumber = 100
plt.figure(num=None, figsize=(10, 4))
plt.subplot(1, 2, 1)
for n in range(plotnumber):
plt.plot([pole1_MCpaleolat[n], pole2_MCpaleolat[n]],
[pole1_MCages[n], pole2_MCages[n]], 'k-', linewidth=0.1, alpha=0.3)
plt.scatter(pole1_MCpaleolat[:plotnumber],
pole1_MCages[:plotnumber], color='b', s=3)
plt.scatter(pole1_paleolat, pole1_age, color='lightblue',
s=100, edgecolor='w', zorder=10000)
plt.scatter(pole2_MCpaleolat[:plotnumber],
pole2_MCages[:plotnumber], color='g', s=3)
plt.scatter(pole2_paleolat, pole2_age, color='lightgreen',
s=100, edgecolor='w', zorder=10000)
plt.plot([pole1_paleolat, pole2_paleolat], [
pole1_age, pole2_age], 'w-', linewidth=2)
plt.gca().invert_yaxis()
plt.xlabel('paleolatitude (degrees)', size=14)
plt.ylabel('time (Ma)', size=14)
plt.subplot(1, 2, 2)
plt.hist(pole1_pole2_cm_per_yr, bins=600)
plt.ylabel('n', size=14)
plt.xlabel('latitudinal drift rate (cm/yr)', size=14)
# plt.xlim([0,90])
if savefig:
plot_extension = '_3.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
twopointfive_percentile = stats.scoreatpercentile(
pole1_pole2_cm_per_yr, 2.5)
fifty_percentile = stats.scoreatpercentile(pole1_pole2_cm_per_yr, 50)
ninetysevenpointfive_percentile = stats.scoreatpercentile(
pole1_pole2_cm_per_yr, 97.5)
print("2.5th percentile is: " +
str(round(twopointfive_percentile, 2)) + " cm/yr")
print("50th percentile is: " + str(round(fifty_percentile, 2)) + " cm/yr")
print("97.5th percentile is: " +
str(round(ninetysevenpointfive_percentile, 2)) + " cm/yr")
return rate[0], twopointfive_percentile, ninetysevenpointfive_percentile
[docs]
def zeq(path_to_file='.', file='', data="", units='U', calculation_type="DE-BFL",
save=False, save_folder='.', fmt='svg', begin_pca="", end_pca="", angle=0,make_plots=True,show_data=True):
"""
NAME
zeq.py
DESCRIPTION
plots demagnetization data for a single specimen:
- The solid (open) symbols in the Zijderveld diagram are X,Y (X,Z) pairs. The demagnetization diagram plots the
fractional remanence remaining after each step. The green line is the fraction of the total remaence removed
between each step. If the principle direction is desired, specify begin_pca and end_pca steps as bounds for calculation.
-The equal area projection has the X direction (usually North in geographic coordinates)
to the top. The red line is the X axis of the Zijderveld diagram. Solid symbols are lower hemisphere.
- red dots and blue line is the remanence remaining after each step. The green line is the partial TRM removed in each interval
INPUT FORMAT
reads from file_name or takes a Pandas DataFrame data with specimen treatment intensity declination inclination as columns
Keywords:
file= FILE a space or tab delimited file with
specimen treatment declination inclination intensity
units= [mT,C] specify units of mT OR C, default is unscaled
save=[True,False] save figure and quit, default is False
fmt [svg,jpg,png,pdf] set figure format [default is svg]
begin_pca [step number] treatment step for beginning of PCA calculation, default
end_pca [step number] treatment step for end of PCA calculation, last step is default
calculation_type [DE-BFL,DE-BFP,DE-FM] Calculation Type: best-fit line, plane or fisher mean; line is default
angle=[0-360]: angle to subtract from declination to rotate in horizontal plane, default is 0
"""
if units == "C":
SIunits = "K"
if units == "mT":
SIunits = "T"
if units == "U":
SIunits = "U"
if file != "":
f = pd.read_csv(os.path.join(path_to_file, file),
delim_whitespace=True, header=None)
f.columns = ['specimen', 'treatment',
'intensity', 'declination', 'inclination']
# adjust for angle rotation
f['declination'] = (f['declination']-angle) % 360
#
else:
f=data
f['quality'] = 'g'
f['type'] = ''
s = f['specimen'].tolist()[0]
if units == 'mT':
f['treatment'] = f['treatment']*1e-3
if units == 'C':
f['treatment'] = f['treatment']+273
data = f[['treatment', 'declination',
'inclination', 'intensity', 'type','quality']]
#print(s)
datablock = data.values.tolist()
# define figure numbers in a dictionary for equal area, zijderveld,
# and intensity vs. demagnetiztion step respectively
if make_plots:
ZED = {}
ZED['eqarea'], ZED['zijd'], ZED['demag'] = 2, 1, 3
plt.figure(num=ZED['zijd'], figsize=(5, 5));
plt.figure(num=ZED['eqarea'], figsize=(5, 5));
plt.figure(num=ZED['demag'], figsize=(5, 5));
#
#
pmagplotlib.plot_zed(ZED, datablock, angle, s, SIunits) # plot the data
#
# print out data for this sample to screen
#
recnum = 0
if show_data:
print('step treat intensity dec inc')
for plotrec in datablock:
if units == 'mT':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0]*1e3, plotrec[3], plotrec[1], plotrec[2]))
if units == 'C':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0]-273., plotrec[3], plotrec[1], plotrec[2]))
if units == 'U':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0], plotrec[3], plotrec[1], plotrec[2]))
recnum += 1
#pmagplotlib.draw_figs(ZED)
if begin_pca != "" and end_pca != "" and calculation_type != "":
if make_plots:pmagplotlib.plot_zed(ZED, datablock, angle, s,
SIunits) # plot the data
# get best-fit direction/great circle
mpars = pmag.domean(datablock, begin_pca, end_pca, calculation_type)
# plot the best-fit direction/great circle
if make_plots:pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
print('Specimen, calc_type, N, min, max, MAD, dec, inc')
if units == 'mT':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"]*1e3, mpars["measurement_step_max"]*1e3, mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if units == 'C':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"]-273, mpars["measurement_step_max"]-273, mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if units == 'U':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"], mpars["measurement_step_max"], mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if save and make_plots:
files = {}
for key in list(ZED.keys()):
files[key] = s+'_'+key+'.'+fmt
pmagplotlib.save_plots(ZED, files)
def aniso_magic_old(infile='specimens.txt', samp_file='samples.txt', site_file='sites.txt',
ipar=1, ihext=1, ivec=1, iplot=0, isite=1, iboot=1, vec=0,
Dir=[], PDir=[], comp=0, user="",
fmt="png", crd="s", verbose=True, plots=0,
num_bootstraps=1000, dir_path=".", input_dir_path=""):
def save(ANIS, fmt, title, con_id=""):
files = {}
for key in list(ANIS.keys()):
if pmagplotlib.isServer:
files[key] = title + '_TY:_aniso_' + key + '_.' + fmt
else:
files[key] = title.replace(
'__', '_') + "_aniso-" + key + "." + fmt
if pmagplotlib.isServer:
titles = {}
titles['data'] = "Eigenvectors"
titles['tcdf'] = "Eigenvalue Confidence"
titles['conf'] = "Confidence Ellipses"
for key in files:
if key not in titles:
titles[key] = key
pmagplotlib.add_borders(ANIS, titles, con_id=con_id)
pmagplotlib.save_plots(ANIS, files)
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
# initialize some variables
version_num = pmag.get_version()
hpars, bpars = [], []
ResRecs = []
if crd == 'g':
CS = 0
elif crd == 't':
CS = 100
else:
CS = -1 # specimen
#
# set up plots
#
ANIS = {}
initcdf, inittcdf = 0, 0
ANIS['data'], ANIS['conf'] = 1, 2
if iboot == 1:
ANIS['tcdf'] = 3
if iplot == 1:
inittcdf = 1
pmagplotlib.plot_init(ANIS['tcdf'], 5, 5)
if comp == 1 and iplot == 1:
initcdf = 1
ANIS['vxcdf'], ANIS['vycdf'], ANIS['vzcdf'] = 4, 5, 6
pmagplotlib.plot_init(ANIS['vxcdf'], 5, 5)
pmagplotlib.plot_init(ANIS['vycdf'], 5, 5)
pmagplotlib.plot_init(ANIS['vzcdf'], 5, 5)
if iplot == 1:
pmagplotlib.plot_init(ANIS['conf'], 5, 5)
pmagplotlib.plot_init(ANIS['data'], 5, 5)
# read in the data
fnames = {'specimens': infile, 'samples': samp_file, 'sites': site_file}
con = cb.Contribution(input_dir_path, read_tables=['specimens', 'samples', 'sites', 'contribution'],
custom_filenames=fnames)
con_id = ""
if 'contribution' in con.tables:
# try to get contribution id
if 'id' in con.tables['contribution'].df.columns:
con_id = con.tables['contribution'].df.iloc[0]['id']
con.propagate_location_to_specimens()
if isite:
con.propagate_name_down('site', 'specimens')
spec_container = con.tables['specimens']
#spec_df = spec_container.get_records_for_code('AE-', strict_match=False)
spec_df = spec_container.df
# get only anisotropy records
spec_df = spec_df.dropna(subset=['aniso_s']).copy()
if 'aniso_tilt_correction' not in spec_df.columns:
spec_df['aniso_tilt_correction'] = -1 # assume specimen coordinates
orlist = spec_df['aniso_tilt_correction'].dropna().unique()
if CS not in orlist:
if len(orlist) > 0:
CS = orlist[0]
else:
CS = -1
if CS == -1:
crd = 's'
if CS == 0:
crd = 'g'
if CS == 100:
crd = 't'
if verbose:
print("desired coordinate system not available, using available: ", crd)
if isite == 1:
if 'site' not in spec_df.columns:
print(
"cannot plot by site -- make sure you have a samples and site table available")
print("plotting all data instead")
isite = 0
plot = 1
else:
sitelist = spec_df['site'].unique()
sitelist.sort()
plot = len(sitelist)
else:
plot = 1
k = 0
while k < plot:
site = ""
loc_name = ""
sdata, Ss = [], [] # list of S format data
if isite == 0:
sdata = spec_df
if 'location' in sdata.columns:
try:
loc_name = ':'.join(sdata['location'].unique())
except TypeError:
loc_name = ""
else:
site = sitelist[k]
sdata = spec_df[spec_df['site'] == site]
if 'location' in sdata.columns:
loc_name = sdata['location'].tolist()[0]
csrecs = sdata[sdata['aniso_tilt_correction'] == CS]
anitypes = csrecs['aniso_type'].unique()
for name in ['citations', 'location', 'site', 'sample']:
if name not in csrecs:
csrecs[name] = ""
Locs = csrecs['location'].unique()
#Sites = csrecs['site'].unique()
#Samples = csrecs['sample'].unique()
#Specimens = csrecs['specimen'].unique()
#Cits = csrecs['citations'].unique()
for ind, rec in csrecs.iterrows():
s = [float(i.strip()) for i in rec['aniso_s'].split(':')]
if s[0] <= 1.0:
Ss.append(s) # protect against crap
ResRec = {}
ResRec['specimen'] = rec['specimen']
ResRec['sample'] = rec['sample']
ResRec['analysts'] = user
ResRec['citations'] = rec['citations']
ResRec['software_packages'] = version_num
ResRec['dir_tilt_correction'] = CS
ResRec["aniso_type"] = rec["aniso_type"]
# tau,Vdirs=pmag.doseigs(s)
if "aniso_s_n_measurements" not in rec.keys():
rec["aniso_s_n_measurements"] = "6"
if "aniso_s_sigma" not in rec.keys():
rec["aniso_s_sigma"] = "0"
fpars = pmag.dohext(
int(rec["aniso_s_n_measurements"]) - 6, float(rec["aniso_s_sigma"]), s)
aniso_v1 = " : ".join(
[str(i) for i in [fpars['t1'], fpars['v1_dec'], fpars['v1_inc']]])
aniso_v2 = " : ".join(
[str(i) for i in [fpars['t2'], fpars['v2_dec'], fpars['v2_inc']]])
aniso_v3 = " : ".join(
[str(i) for i in [fpars['t3'], fpars['v3_dec'], fpars['v3_inc']]])
ResRec['aniso_v1'] = aniso_v1
ResRec['aniso_v2'] = aniso_v2
ResRec['aniso_v3'] = aniso_v3
ResRec['aniso_ftest'] = fpars['F']
ResRec['aniso_ftest12'] = fpars['F12']
ResRec['aniso_ftest23'] = fpars['F23']
ResRec['description'] = 'F_crit: '+fpars['F_crit'] + \
'; F12,F23_crit: '+fpars['F12_crit']
ResRec['aniso_type'] = pmag.makelist(anitypes)
ResRecs.append(ResRec)
if len(Ss) > 1:
if pmagplotlib.isServer: # use server plot naming convention
plot_name = "LO:_" + loc_name + '_SI:_' + site + '_SA:__SP:__CO:_' + crd
title = loc_name
else: # use more readable plot naming convention
plot_name = "{}_{}_{}".format(loc_name, site, crd)
title = "{}_{}_{}".format(loc_name, site, crd)
bpars, hpars = pmagplotlib.plot_anis(ANIS, Ss, iboot, ihext, ivec, ipar,
title, iplot, comp, vec, Dir, num_bootstraps)
if len(PDir) > 0:
pmagplotlib.plot_circ(ANIS['data'], PDir, 90., 'g')
pmagplotlib.plot_circ(ANIS['conf'], PDir, 90., 'g')
if verbose and not plots:
pmagplotlib.draw_figs(ANIS)
if plots:
save(ANIS, fmt, plot_name, con_id)
if hpars != [] and ihext == 1:
HextRec = {}
for key in ResRec.keys():
HextRec[key] = ResRec[key]
# copy over stuff
# group these into HextRec['aniso_v1']
anisotropy_t1 = hpars["t1"]
anisotropy_v1_dec = hpars["v1_dec"]
anisotropy_v1_inc = hpars["v1_inc"]
eta_zeta = "eta/zeta"
anisotropy_v1_eta_dec = hpars["v2_dec"]
anisotropy_v1_eta_inc = hpars["v2_inc"]
anisotropy_v1_eta_semi_angle = hpars["e12"]
anisotropy_v1_zeta_dec = hpars["v3_dec"]
anisotropy_v1_zeta_inc = hpars["v3_inc"]
anisotropy_v1_zeta_semi_angle = hpars["e13"]
aniso_v1_list = [anisotropy_t1, anisotropy_v1_dec, anisotropy_v1_inc, eta_zeta,
anisotropy_v1_eta_dec, anisotropy_v1_eta_inc, anisotropy_v1_eta_semi_angle,
anisotropy_v1_zeta_dec, anisotropy_v1_zeta_inc, anisotropy_v1_zeta_semi_angle]
aniso_v1 = " : ".join([str(i) for i in aniso_v1_list])
HextRec['aniso_v1'] = aniso_v1
# for printing
HextRec["anisotropy_t1"] = '%10.8f' % (hpars["t1"])
HextRec["anisotropy_v1_dec"] = '%7.1f' % (hpars["v1_dec"])
HextRec["anisotropy_v1_inc"] = '%7.1f' % (hpars["v1_inc"])
HextRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars["e12"])
HextRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (hpars["v2_dec"])
HextRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (hpars["v2_inc"])
HextRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars["e13"])
HextRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars["v3_dec"])
HextRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars["v3_inc"])
# group these into HextRec['aniso_v2']
aniso_v2 = " : ".join(
[str(i) for i in [hpars["t2"], hpars["v2_dec"], hpars["v2_inc"]]])
aniso_v2 += " : eta/zeta : "
aniso_v2 += " : ".join([str(i) for i in [hpars['v1_dec'], hpars['v1_inc'], hpars['e12'],
hpars['v3_dec'], hpars['v3_inc'], hpars['e23']]])
HextRec["aniso_v2"] = aniso_v2
# for printing
HextRec["anisotropy_v2_dec"] = '%7.1f' % (hpars["v2_dec"])
HextRec["anisotropy_v2_inc"] = '%7.1f' % (hpars["v2_inc"])
HextRec["anisotropy_t2"] = '%10.8f' % (hpars["t2"])
HextRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars["e12"])
HextRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (hpars["v1_dec"])
HextRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (hpars["v1_inc"])
HextRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars["e23"])
HextRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars["v3_dec"])
HextRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars["v3_inc"])
# group these into HextRec['aniso_v3']
aniso_v3 = " : ".join(
[str(i) for i in [hpars["t3"], hpars["v3_dec"], hpars["v3_inc"]]])
aniso_v3 += " : eta/zeta : "
aniso_v3 += " : ".join([str(i) for i in [hpars["v1_dec"],
hpars["v1_inc"],
hpars["e12"],
hpars["v2_dec"],
hpars["v2_inc"],
hpars["e23"]]])
HextRec["aniso_v3"] = aniso_v3
# for printing
HextRec["anisotropy_v3_dec"] = '%7.1f' % (hpars["v3_dec"])
HextRec["anisotropy_v3_inc"] = '%7.1f' % (hpars["v3_inc"])
HextRec["anisotropy_t3"] = '%10.8f' % (hpars["t3"])
HextRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars["e12"])
HextRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (hpars["v1_dec"])
HextRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (hpars["v1_inc"])
HextRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars["e23"])
HextRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars["v2_dec"])
HextRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars["v2_inc"])
# not valid MagIC columns (2.5 or 3)
HextRec["anisotropy_hext_F"] = '%7.1f ' % (hpars["F"])
HextRec["anisotropy_hext_F12"] = '%7.1f ' % (hpars["F12"])
HextRec["anisotropy_hext_F23"] = '%7.1f ' % (hpars["F23"])
#
HextRec["method_codes"] = 'LP-AN:AE-H'
if verbose:
print("Hext Statistics: ")
print(
" tau_i, V_i_D, V_i_I, V_i_zeta, V_i_zeta_D, V_i_zeta_I, V_i_eta, V_i_eta_D, V_i_eta_I")
print(HextRec["anisotropy_t1"],
HextRec["anisotropy_v1_dec"], end=' ')
print(HextRec["anisotropy_v1_inc"],
HextRec["anisotropy_v1_eta_semi_angle"], end=' ')
print(HextRec["anisotropy_v1_eta_dec"],
HextRec["anisotropy_v1_eta_inc"], end=' ')
print(HextRec["anisotropy_v1_zeta_semi_angle"],
HextRec["anisotropy_v1_zeta_dec"], end=' ')
print(HextRec["anisotropy_v1_zeta_inc"])
#
print(HextRec["anisotropy_t2"],
HextRec["anisotropy_v2_dec"], end=' ')
print(HextRec["anisotropy_v2_inc"],
HextRec["anisotropy_v2_eta_semi_angle"], end=' ')
print(HextRec["anisotropy_v2_eta_dec"],
HextRec["anisotropy_v2_eta_inc"], end=' ')
print(HextRec["anisotropy_v2_zeta_semi_angle"],
HextRec["anisotropy_v2_zeta_dec"], end=' ')
print(HextRec["anisotropy_v2_zeta_inc"])
#
print(HextRec["anisotropy_t3"],
HextRec["anisotropy_v3_dec"], end=' ')
print(HextRec["anisotropy_v3_inc"],
HextRec["anisotropy_v3_eta_semi_angle"], end=' ')
print(HextRec["anisotropy_v3_eta_dec"],
HextRec["anisotropy_v3_eta_inc"], end=' ')
print(HextRec["anisotropy_v3_zeta_semi_angle"],
HextRec["anisotropy_v3_zeta_dec"], end=' ')
print(HextRec["anisotropy_v3_zeta_inc"])
HextRec['software_packages'] = version_num
# strip out invalid keys
for key in HextRec.copy():
if key.startswith('anisotropy_'): # and 'hext' not in key:
HextRec.pop(key)
ResRecs.append(HextRec)
if bpars != []:
BootRec = {}
for key in ResRec.keys():
BootRec[key] = ResRec[key]
# copy over stuff
aniso_v1 = " : ".join([str(i) for i in [bpars['t1'], bpars['v1_dec'],
bpars['v1_inc']]])
aniso_v1 += " : eta/zeta : "
aniso_v1 += " : ".join([str(i) for i in [bpars['v1_eta_dec'],
bpars['v1_eta_inc'],
bpars['v1_eta'],
bpars['v1_zeta_dec'],
bpars['v1_zeta_inc'],
bpars['v1_zeta']]])
BootRec['aniso_v1'] = aniso_v1
# for printing
BootRec["anisotropy_v1_dec"] = '%7.1f' % (bpars["v1_dec"])
BootRec["anisotropy_v2_dec"] = '%7.1f' % (bpars["v2_dec"])
BootRec["anisotropy_v3_dec"] = '%7.1f' % (bpars["v3_dec"])
BootRec["anisotropy_v1_inc"] = '%7.1f' % (bpars["v1_inc"])
BootRec["anisotropy_v2_inc"] = '%7.1f' % (bpars["v2_inc"])
BootRec["anisotropy_v3_inc"] = '%7.1f' % (bpars["v3_inc"])
BootRec["anisotropy_t1"] = '%10.8f' % (bpars["t1"])
BootRec["anisotropy_t2"] = '%10.8f' % (bpars["t2"])
BootRec["anisotropy_t3"] = '%10.8f' % (bpars["t3"])
BootRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
bpars["v1_eta_inc"])
BootRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
bpars["v1_eta_dec"])
BootRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
bpars["v1_eta"])
BootRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
bpars["v1_zeta_inc"])
BootRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
bpars["v1_zeta_dec"])
BootRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
bpars["v1_zeta"])
# group these into aniso_v2
aniso_v2 = " : ".join(
[str(i) for i in [bpars["t2"], bpars["v2_dec"], bpars["v2_inc"]]])
aniso_v2 += " : eta/zeta : "
aniso_v2 += " : ".join([str(i) for i in [bpars['v2_eta_dec'],
bpars['v2_eta_inc'],
bpars['v2_eta'],
bpars['v2_zeta_dec'],
bpars['v2_zeta_inc'],
bpars['v2_zeta']]])
BootRec['aniso_v2'] = aniso_v2
# for printing
BootRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
bpars["v2_eta_inc"])
BootRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
bpars["v2_eta_dec"])
BootRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
bpars["v2_eta"])
BootRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
bpars["v2_zeta_inc"])
BootRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
bpars["v2_zeta_dec"])
BootRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
bpars["v2_zeta"])
# group into aniso_v3
aniso_v3 = " : ".join(
[str(i) for i in [bpars["t3"], bpars["v3_dec"], bpars["v3_inc"]]])
aniso_v3 += " : eta/zeta : "
aniso_v3 += " : ".join([str(i) for i in [bpars["v3_eta_dec"],
bpars["v3_eta_inc"],
bpars["v3_eta"],
bpars["v3_zeta_dec"],
bpars["v3_zeta_inc"],
bpars["v3_zeta"]]])
BootRec["aniso_v3"] = aniso_v3
# for printing
BootRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
bpars["v3_eta_inc"])
BootRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
bpars["v3_eta_dec"])
BootRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
bpars["v3_eta"])
BootRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
bpars["v3_zeta_inc"])
BootRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
bpars["v3_zeta_dec"])
BootRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
bpars["v3_zeta"])
# not valid MagIC columns
BootRec["anisotropy_hext_F"] = ''
BootRec["anisotropy_hext_F12"] = ''
BootRec["anisotropy_hext_F23"] = ''
#
# regular bootstrap
BootRec["method_codes"] = 'LP-AN:AE-H:AE-BS'
if ipar == 1:
# parametric bootstrap
BootRec["method_codes"] = 'LP-AN:AE-H:AE-BS-P'
if verbose:
print("Bootstrap Statistics: ")
print(
" tau_i, V_i_D, V_i_I, V_i_zeta, V_i_zeta_D, V_i_zeta_I, V_i_eta, V_i_eta_D, V_i_eta_I")
print(BootRec["anisotropy_t1"],
BootRec["anisotropy_v1_dec"], end=' ')
print(BootRec["anisotropy_v1_inc"],
BootRec["anisotropy_v1_eta_semi_angle"], end=' ')
print(BootRec["anisotropy_v1_eta_dec"],
BootRec["anisotropy_v1_eta_inc"], end=' ')
print(BootRec["anisotropy_v1_zeta_semi_angle"],
BootRec["anisotropy_v1_zeta_dec"], end=' ')
print(BootRec["anisotropy_v1_zeta_inc"])
#
print(BootRec["anisotropy_t2"], BootRec["anisotropy_v2_dec"],
BootRec["anisotropy_v2_inc"], end=' ')
print(BootRec["anisotropy_v2_eta_semi_angle"],
BootRec["anisotropy_v2_eta_dec"], end=' ')
print(BootRec["anisotropy_v2_eta_inc"],
BootRec["anisotropy_v2_zeta_semi_angle"], end=' ')
print(BootRec["anisotropy_v2_zeta_dec"],
BootRec["anisotropy_v2_zeta_inc"])
#
print(BootRec["anisotropy_t3"], BootRec["anisotropy_v3_dec"],
BootRec["anisotropy_v3_inc"], end=' ')
print(BootRec["anisotropy_v3_eta_semi_angle"],
BootRec["anisotropy_v3_eta_dec"], end=' ')
print(BootRec["anisotropy_v3_eta_inc"],
BootRec["anisotropy_v3_zeta_semi_angle"], end=' ')
print(BootRec["anisotropy_v3_zeta_dec"],
BootRec["anisotropy_v3_zeta_inc"])
BootRec['software_packages'] = version_num
# strip out invalid keys
for key in BootRec.copy():
if key.startswith('anisotropy_'): # and 'hext' not in key:
BootRec.pop(key)
# THESE SHOULD BE AT A DIFFERENT LEVEL??? MAYBE SITE?
ResRecs.append(BootRec)
k += 1
goon = 1
while goon == 1 and iplot == 1 and verbose:
if iboot == 1:
print("compare with [d]irection ")
print(
" plot [g]reat circle, change [c]oord. system, change [e]llipse calculation, s[a]ve plots, [q]uit ")
if isite == 1:
print(" [p]revious, [s]ite, [q]uit, <return> for next ")
ans = input("")
if ans == "q":
sys.exit()
if ans == "e":
iboot, ipar, ihext, ivec = 1, 0, 0, 0
e = input("Do Hext Statistics 1/[0]: ")
if e == "1":
ihext = 1
e = input("Suppress bootstrap 1/[0]: ")
if e == "1":
iboot = 0
if iboot == 1:
e = input("Parametric bootstrap 1/[0]: ")
if e == "1":
ipar = 1
e = input("Plot bootstrap eigenvectors: 1/[0]: ")
if e == "1":
ivec = 1
if iplot == 1:
if inittcdf == 0:
ANIS['tcdf'] = 3
pmagplotlib.plot_init(ANIS['tcdf'], 5, 5)
inittcdf = 1
bpars, hpars = pmagplotlib.plot_anis(ANIS, Ss, iboot, ihext, ivec, ipar, title, iplot,
comp, vec, Dir, num_bootstraps)
if verbose and not plots:
pmagplotlib.draw_figs(ANIS)
if ans == "c":
print("Current Coordinate system is: ")
if CS == -1:
print(" Specimen")
if CS == 0:
print(" Geographic")
if CS == 100:
print(" Tilt corrected")
key = input(
" Enter desired coordinate system: [s]pecimen, [g]eographic, [t]ilt corrected ")
if key == 's':
CS = -1
if key == 'g':
CS = 0
if key == 't':
CS = 100
if CS not in orlist:
if len(orlist) > 0:
CS = orlist[0]
else:
CS = -1
if CS == -1:
crd = 's'
if CS == 0:
crd = 'g'
if CS == 100:
crd = 't'
print(
"desired coordinate system not available, using available: ", crd)
k -= 1
goon = 0
if ans == "":
if isite == 1:
goon = 0
else:
print("Good bye ")
sys.exit()
if ans == 'd':
if initcdf == 0:
initcdf = 1
ANIS['vxcdf'], ANIS['vycdf'], ANIS['vzcdf'] = 4, 5, 6
pmagplotlib.plot_init(ANIS['vxcdf'], 5, 5)
pmagplotlib.plot_init(ANIS['vycdf'], 5, 5)
pmagplotlib.plot_init(ANIS['vzcdf'], 5, 5)
Dir, comp = [], 1
print("""
Input: Vi D I to compare eigenvector Vi with direction D/I
where Vi=1: principal
Vi=2: major
Vi=3: minor
D= declination of comparison direction
I= inclination of comparison direction
example input: 1 15 20""")
con = 1
while con == 1:
try:
vdi = input("Vi D I: ").split()
vec = int(vdi[0])-1
Dir = [float(vdi[1]), float(vdi[2])]
con = 0
except IndexError:
print(" Incorrect entry, try again ")
bpars, hpars = pmagplotlib.plot_anis(ANIS, Ss, iboot, ihext, ivec, ipar, title,
iplot, comp, vec, Dir, num_bootstraps)
Dir, comp = [], 0
if verbose and not plots:
pmagplotlib.draw_figs(ANIS)
if ans == 'g':
con, cnt = 1, 0
while con == 1:
try:
print(
" Input: input pole to great circle ( D I) to plot a great circle: ")
di = input(" D I: ").split()
PDir.append(float(di[0]))
PDir.append(float(di[1]))
con = 0
except:
cnt += 1
if cnt < 10:
print(
" enter the dec and inc of the pole on one line ")
else:
print(
"ummm - you are doing something wrong - i give up")
sys.exit()
if set_env.IS_WIN:
# if windows, must re-draw everything
pmagplotlib.plot_anis(ANIS, Ss, iboot, ihext, ivec, ipar,
title, iplot, comp, vec, Dir, num_bootstraps)
pmagplotlib.plot_circ(ANIS['data'], PDir, 90., 'g')
pmagplotlib.plot_circ(ANIS['conf'], PDir, 90., 'g')
if verbose and not plots:
pmagplotlib.draw_figs(ANIS)
if ans == "p":
k -= 2
goon = 0
if ans == "q":
k = plot
goon = 0
if ans == "s":
keepon = 1
site = input(" print site or part of site desired: ")
while keepon == 1:
try:
k = sitelist.index(site)
keepon = 0
except:
tmplist = []
for qq in range(len(sitelist)):
if site in sitelist[qq]:
tmplist.append(sitelist[qq])
print(site, " not found, but this was: ")
print(tmplist)
site = input('Select one or try again\n ')
k = sitelist.index(site)
goon, ans = 0, ""
if ans == "a":
locs = pmag.makelist(Locs)
site_name = "_"
if isite:
site_name = site
if pmagplotlib.isServer: # use server plot naming convention
title = "LO:_" + locs + '_SI:_' + site_name + '_SA:__SP:__CO:_' + crd
else: # use more readable plot naming convention
title = "{}_{}_{}".format(locs, site_name, crd)
save(ANIS, fmt, title)
goon = 0
else:
if verbose:
print('skipping plot - not enough data points')
k += 1
# put rmag_results stuff here
if len(ResRecs) > 0:
# for rec in ResRecs:
# con.add_item('specimens', rec, rec['specimen'])
# sort records so that they are grouped by specimen ?
#con.write_table_to_file('specimens', 'custom_specimens.txt')
# ResOut,keylist=pmag.fillkeys(ResRecs)
# just make a fresh one
con.add_magic_table_from_data('specimens', ResRecs)
# con.write_table_to_file('specimens', 'anisotropy_specimens.txt')
# pmag.magic_write(outfile,ResOut,'rmag_results')
if verbose:
print(" Good bye ")
[docs]
def aniso_magic_nb(infile='specimens.txt', samp_file='samples.txt', site_file='sites.txt', verbose=True,
ipar=False, ihext=True, ivec=False, isite=False, iboot=False, vec=0,
Dir=[], PDir=[], crd="s", num_bootstraps=1000, dir_path=".", fignum=1,
save_plots=True, interactive=False, fmt="png", contribution=None):
"""
Wrapper for aniso_magic
"""
return aniso_magic(infile, samp_file, site_file, verbose, ipar, ihext, ivec,
isite, iboot, vec, Dir, PDir, crd, num_bootstraps,
dir_path, fignum, save_plots, interactive, fmt, contribution)
[docs]
def aniso_magic(infile='specimens.txt', samp_file='samples.txt', site_file='sites.txt', verbose=True,
ipar=False, ihext=True, ivec=False, isite=False, iboot=False, vec=0,
Dir=[], PDir=[], crd="s", num_bootstraps=1000, dir_path=".", fignum=1,
save_plots=True, interactive=False, fmt="png", contribution=None, image_records=False):
"""
Makes plots of anisotropy eigenvectors, eigenvalues and confidence bounds
All directions are on the lower hemisphere.
Parameters:
infile : specimens formatted file with aniso_s data
samp_file : samples formatted file with sample => site relationship
site_file : sites formatted file with site => location relationship
verbose : if True, print messages to output
ipar (confidence bound parameter): if True - perform parametric bootstrap - requires non-blank aniso_s_sigma
ihext (confidence bound parameter): if True - Hext ellipses
ivec (confidence bound parameter): if True - plot bootstrapped eigenvectors instead of ellipses
isite (confidence bound parameter): if True plot by site, requires non-blank samp_file
iboot (confidence bound parameter): if True - bootstrap ellipses
vec : eigenvector for comparison with Dir
Dir : [Dec,Inc] list for comparison direction
PDir : [Pole_dec, Pole_Inc] for pole to plane for comparison
green dots are on the lower hemisphere, cyan are on the upper hemisphere
crd : ['s','g','t'], coordinate system for plotting whereby:
s : specimen coordinates, aniso_tile_correction = -1, or unspecified
g : geographic coordinates, aniso_tile_correction = 0
t : tilt corrected coordinates, aniso_tile_correction = 100
num_bootstraps : how many bootstraps to do, default 1000
dir_path : directory path
fignum : matplotlib figure number, default 1
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
contribution : pmagpy contribution_builder.Contribution object, if not provided will be created
in directory (default None). (if provided, infile/samp_file/dir_path may be left blank)
"""
figs = {}
saved = []
image_recs = []
# make sure boolean values are in integer form
# for backwards compatibility
ipar = int(ipar)
ihext = int(ihext)
ivec = int(ivec)
isite = int(isite)
#iloc = int(iloc) # NOT USED
iboot = int(iboot)
# initialize some variables
version_num = pmag.get_version()
hpars, bpars = [], []
# set aniso_tilt_correction value
CS = -1 # specimen
if crd == 'g':
CS = 0
if crd == 't':
CS = 100
#
#
# read in the data
if contribution is None:
# fix directory
input_dir_path = os.path.realpath(dir_path)
fnames = {'specimens': infile, 'samples': samp_file, 'sites': site_file}
con = cb.Contribution(input_dir_path, read_tables=['specimens', 'samples', 'sites', 'contribution'],
custom_filenames=fnames)
else:
con = contribution
dir_path = con.directory
# get contribution id if available
con_id = ""
if 'contribution' in con.tables:
if 'id' in con.tables['contribution'].df.columns:
con_id = str(con.tables['contribution'].df['id'].values[0])
# get other data
con.propagate_location_to_specimens()
spec_container = con.tables['specimens']
spec_df = spec_container.df
# use only anisotropy records
spec_df = spec_df.dropna(subset=['aniso_s']).copy()
if 'aniso_tilt_correction' not in spec_df.columns:
spec_df['aniso_tilt_correction'] = -1 # assume specimen coordinates
if "aniso_s_n_measurements" not in spec_df.columns:
spec_df["aniso_s_n_measurements"] = "6"
if "aniso_s_sigma" not in spec_df.columns:
spec_df["aniso_sigma"] = "0"
orlist = spec_df['aniso_tilt_correction'].dropna().unique()
if CS not in orlist:
if len(orlist) > 0:
CS = orlist[0]
else:
CS = -1
if CS == -1:
crd = 's'
if CS == 0:
crd = 'g'
if CS == 100:
crd = 't'
if verbose:
print("desired coordinate system not available, using available: ", crd)
cs_df = spec_df[spec_df['aniso_tilt_correction'] == CS]
if isite:
sites = cs_df['site'].unique()
for site in list(sites):
site_df = cs_df[cs_df.site == site]
loc = ""
if 'sites' in con.tables:
if 'location' in con.tables['sites'].df.columns:
locs = con.tables['sites'].df.loc[site, 'location']
if len(con.tables['sites'].df)>1:
loc=locs[0]
else:
loc=locs
figs = plot_aniso(fignum, site_df, Dir=Dir, PDir=PDir, ipar=ipar,
ihext=ihext, ivec=ivec, iboot=iboot,
vec=vec, num_bootstraps=num_bootstraps, title=site)
files = {key: loc + "_" + site +"_" + crd + "_aniso-" + key + ".png" for (key, value) in figs.items()}
if pmagplotlib.isServer:
titles = {}
for key in figs.keys():
files[key] = "LO:_" + loc + "_SI:_" + site + '_TY:_aniso_' + key + '_.' + fmt
titles = {}
titles['data'] = "Eigenvectors"
titles['tcdf'] = "Eigenvalue Confidence"
titles['conf'] = "Confidence Ellipses"
for key in figs:
if key not in titles:
titles[key] = key
pmagplotlib.add_borders(figs, titles, con_id=con_id)
if image_records:
for plot_type, fname in files.items():
image_rec = {'site': site, 'file': fname, 'type': PLOT_TYPES[plot_type],
'title': "{} {}".format(site, PLOT_TYPES[plot_type]),
'timestamp': date.today().isoformat(), 'software_packages': version.version}
image_recs.append(image_rec)
if save_plots:
saved.extend(pmagplotlib.save_plots(figs, files))
elif interactive:
pmagplotlib.draw_figs(figs)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, files))
else:
continue
else:
fignum += 2
if iboot:
fignum += 1
if len(Dir) > 0:
fignum += 1
else:
figs = plot_aniso(fignum, cs_df, Dir=Dir, PDir=PDir, ipar=ipar, ihext=ihext,
ivec=ivec, iboot=iboot, vec=vec, num_bootstraps=num_bootstraps)
try:
locs = cs_df['location'].unique()
except:
locs = [""]
locs = "-".join(locs)
files = {key: locs + "_" + crd + "_aniso-" + key + ".png" for (key, value) in figs.items()}
if pmagplotlib.isServer:
titles = {}
for key in figs.keys():
files[key] = 'MC:_' + con_id + '_TY:_aniso_' + key + '_.' + fmt
titles = {}
titles['data'] = "Eigenvectors"
titles['tcdf'] = "Eigenvalue Confidence"
titles['conf'] = "Confidence Ellipses"
for key in figs:
if key not in titles:
titles[key] = key
pmagplotlib.add_borders(figs, titles, con_id=con_id)
if image_records:
for plot_type, fname in files.items():
image_rec = {'location': locs, 'file': fname, 'type': PLOT_TYPES[plot_type],
'title': "{} {}".format(locs, PLOT_TYPES[plot_type]),
'timestamp': date.today().isoformat(), 'software_packages': version.version}
image_recs.append(image_rec)
if save_plots:
saved.extend(pmagplotlib.save_plots(figs, files))
elif interactive:
pmagplotlib.draw_figs(figs)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, files))
if image_records:
return True, saved, image_recs
return True, saved
[docs]
def plot_dmag(data="", title="", fignum=1, norm=1,dmag_key='treat_ac_field',intensity='',
quality=False):
"""
plots demagenetization data versus step for all specimens in pandas dataframe datablock
Parameters:
data : Pandas dataframe with MagIC data model 3 columns:
fignum : figure number
specimen : specimen name
dmag_key : one of these: ['treat_temp','treat_ac_field','treat_mw_energy']
selected using method_codes : ['LT_T-Z','LT-AF-Z','LT-M-Z'] respectively
intensity : if blank will choose one of these: ['magn_moment', 'magn_volume', 'magn_mass']
quality : if True use the quality column of the DataFrame
title : title for plot
norm : if True, normalize data to first step
Returns:
matptlotlib plot
"""
plt.figure(num=fignum, figsize=(5, 5))
if intensity:
int_key=intensity
else:
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
# get which key we have
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
int_key = IntMeths[0]
data = data[data[int_key].notnull()] # fish out all data with this key
units = "U" # this sets the units for plotting to undefined
if not dmag_key:
if 'treat_temp' in data.columns: units = "K" # kelvin
elif 'treat_ac_field' in data.columns: units = "T" # tesla
elif 'treat_mw_energy' in data.columns: units = "J" # joules
if dmag_key=='treat_temp': units='K'
if dmag_key=='treat_ac_field': units='T'
if dmag_key=='treat_mw_energy': units='J'
spcs = data.specimen.unique() # get a list of all specimens in DataFrame data
if len(spcs)==0:
print('no data for plotting')
return
# step through specimens to put on plot
for spc in spcs:
spec_data = data[data.specimen.str.contains(spc)]
INTblock = []
for ind, rec in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0,
float(rec[int_key]), 1, rec['quality']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(fignum, INTblock, title, 0, units, norm)
[docs]
def eigs_s(infile="", dir_path='.'):
"""
Converts eigenparamters format data to s format
Parameters:
Input:
file : input file name with eigenvalues (tau) and eigenvectors (V) with format:
tau_1 V1_dec V1_inc tau_2 V2_dec V2_inc tau_3 V3_dec V3_inc
Returns:
the six tensor elements as a nested array
[[x11,x22,x33,x12,x23,x13],....]
"""
file = os.path.join(dir_path, infile)
eigs_data = np.loadtxt(file)
Ss = []
for ind in range(eigs_data.shape[0]):
tau, Vdirs = [], []
for k in range(0, 9, 3):
tau.append(eigs_data[ind][k])
Vdirs.append([eigs_data[ind][k+1], eigs_data[ind][k+2]])
s = list(pmag.doeigs_s(tau, Vdirs))
Ss.append(s)
return Ss
[docs]
def plot_gc(poles, color='g', fignum=1):
"""
plots a great circle on an equal area projection
Parameters:
fignum : number of matplotlib object
poles : nested list of [Dec,Inc] pairs of poles
color : color of lower hemisphere dots for great circle - must be in form: 'g','r','y','k',etc.
upper hemisphere is always cyan
"""
for pole in poles:
pmagplotlib.plot_circ(fignum, pole, 90., color)
def plot_aniso(fignum, aniso_df, Dir=[], PDir=[], ipar=False, ihext=True, ivec=False,
iboot=False, vec=0, num_bootstraps=1000, title=""):
figs = {}
ipar = int(ipar)
ihext = int(ihext)
ivec = int(ivec)
iboot = int(iboot)
Ss, V1, V2, V3 = [], [], [], []
for ind, rec in aniso_df.iterrows():
s = [float(i.strip()) for i in rec['aniso_s'].split(':')]
if s[0] <= 1.0:
Ss.append(s) # protect against crap
tau, Vdir = pmag.doseigs(s)
V1.append([Vdir[0][0], Vdir[0][1]])
V2.append([Vdir[1][0], Vdir[1][1]])
V3.append([Vdir[2][0], Vdir[2][1]])
Ss = np.array(Ss)
if Ss.shape[0] > 1:
# plot the data
plot_net(fignum)
figs['data'] = fignum
if not pmagplotlib.isServer:
plt.title(title+':'+' V1=squares,V2=triangles,V3=circles')
plot_di(di_block=V1, color='r', marker='s', markersize=20)
plot_di(di_block=V2, color='b', marker='^', markersize=20)
plot_di(di_block=V3, color='k', marker='o', markersize=20)
# plot the confidence
nf, sigma, avs = pmag.sbar(Ss)
hpars = pmag.dohext(nf, sigma, avs) # get the Hext parameters
if len(PDir) > 0:
pmagplotlib.plot_circ(fignum+1, PDir, 90., 'g')
figs['conf'] = fignum + 1
plot_net(fignum+1)
if not pmagplotlib.isServer:
plt.title(title+':'+'Confidence Ellipses')
plot_di(dec=hpars['v1_dec'], inc=hpars['v1_inc'],
color='r', marker='s', markersize=30)
plot_di(dec=hpars['v2_dec'], inc=hpars['v2_inc'],
color='b', marker='^', markersize=30)
plot_di(dec=hpars['v3_dec'], inc=hpars['v3_inc'],
color='k', marker='o', markersize=30)
if len(PDir) > 0:
pmagplotlib.plot_circ(fignum+1, PDir, 90., 'g')
# plot the confidence ellipses or vectors as desired
if ihext: # plot the Hext ellipses
ellpars = [hpars["v1_dec"], hpars["v1_inc"], hpars["e12"], hpars["v2_dec"],
hpars["v2_inc"], hpars["e13"], hpars["v3_dec"], hpars["v3_inc"]]
v1_pts = pmagplotlib.plot_ell(fignum+1, ellpars, 'r,', 1, 1)
ellpars = [hpars["v2_dec"], hpars["v2_inc"], hpars["e23"], hpars["v3_dec"],
hpars["v3_inc"], hpars["e12"], hpars["v1_dec"], hpars["v1_inc"]]
v2_pts = pmagplotlib.plot_ell(fignum+1, ellpars, 'b,', 1, 1)
ellpars = [hpars["v3_dec"], hpars["v3_inc"], hpars["e13"], hpars["v1_dec"],
hpars["v1_inc"], hpars["e23"], hpars["v2_dec"], hpars["v2_inc"]]
v3_pts = pmagplotlib.plot_ell(fignum+1, ellpars, 'k,', 1, 1)
if len(Dir) > 0: # plot the comparison direction components
# put in dimap and plot as white symbol with axis color?
plot_di(di_block=[Dir], color='green',
marker='*', markersize=200)
if iboot: # put on the bootstrapped confidence bounds
Tmean, Vmean, Taus, BVs = pmag.s_boot(
Ss, ipar, num_bootstraps) # get eigenvectors of mean tensor
BVs_trans = np.array(BVs).transpose()
if ivec:
plot_di(dec=BVs_trans[0][0], inc=BVs_trans[1]
[0], color='r', marker='.')
plot_di(dec=BVs_trans[0][1], inc=BVs_trans[1]
[1], color='b', marker='.')
plot_di(dec=BVs_trans[0][2], inc=BVs_trans[1]
[2], color='k', marker='.')
# put in dimap and plot as white symbol with axis color?
if len(Dir) > 0: # plot the comparison direction components
plot_di(di_block=[Dir], color='green',
marker='*', markersize=200)
# do the eigenvalue cdfs
Taus = np.array(Taus).transpose()
colors = ['r', 'b', 'k']
styles = ['dotted', 'dashed', 'solid']
for t in range(3): # step through eigenvalues
# get a sorted list of this eigenvalue
ts = np.sort(Taus[t])
pmagplotlib.plot_cdf(
fignum+2, ts, "", colors[t], "") # plot the CDF
figs['tcdf'] = fignum + 2
# minimum 95% conf bound
plt.axvline(ts[int(0.025*len(ts))],
color=colors[t], linestyle=styles[t])
# max 95% conf bound
plt.axvline(ts[int(0.975*len(ts))],
color=colors[t], linestyle=styles[t])
plt.xlabel('Eigenvalues')
# do cartesian coordinates of selected eigenvectori [using vec] vs Dir
if len(Dir) > 0:
V = [row[vec-1] for row in BVs]
X = pmag.dir2cart(V)
comp_X = pmag.dir2cart(Dir)
for i in range(3):
xs = np.sort(np.array([row[i] for row in X]))
pmagplotlib.plot_cdf(
fignum+i+3, xs, "", colors[i], "") # plot the CDF
figs['cdf_' + str(i)] = fignum + i + 3
# minimum 95% conf bound
plt.axvline(xs[int(0.025*len(xs))],
color=colors[vec-1], linestyle=styles[i])
# max 95% conf bound
plt.axvline(xs[int(0.975*len(xs))],
color=colors[vec-1], linestyle=styles[i])
# put on the comparison direction
plt.axvline(
comp_X[0][i], color='lightgreen', linewidth=3)
else:
bpars = pmag.sbootpars(Taus, BVs)
ellpars = [hpars["v1_dec"], hpars["v1_inc"], bpars["v1_zeta"], bpars["v1_zeta_dec"],
bpars["v1_zeta_inc"], bpars["v1_eta"], bpars["v1_eta_dec"], bpars["v1_eta_inc"]]
pmagplotlib.plot_ell(fignum+1, ellpars, 'r-,', 1, 1)
ellpars = [hpars["v2_dec"], hpars["v2_inc"], bpars["v2_zeta"], bpars["v2_zeta_dec"],
bpars["v2_zeta_inc"], bpars["v2_eta"], bpars["v2_eta_dec"], bpars["v2_eta_inc"]]
pmagplotlib.plot_ell(fignum+1, ellpars, 'b-,', 1, 1)
ellpars = [hpars["v3_dec"], hpars["v3_inc"], bpars["v3_zeta"], bpars["v3_zeta_dec"],
bpars["v3_zeta_inc"], bpars["v3_eta"], bpars["v3_eta_dec"], bpars["v3_eta_inc"]]
pmagplotlib.plot_ell(fignum+1, ellpars, 'k-,', 1, 1)
if len(Dir) > 0: # plot the comparison direction components
plot_di(di_block=[Dir], color='green',
marker='*', markersize=200)
return figs
[docs]
def aarm_magic_dm2(infile, dir_path=".", input_dir_path="",
spec_file='specimens.txt', samp_file="samples.txt", data_model_num=3,
coord='s'):
"""
Converts AARM data to best-fit tensor (6 elements plus sigma)
Parameters:
infile (str):
input measurement file
dir_path (str):
output directory, default "."
input_dir_path (str):
input file directory IF different from dir_path, default ""
spec_file (str):
input/output specimen file name, default "specimens.txt"
samp_file (str):
input sample file name, default "samples.txt"
data_model_num (int):
MagIC data model [2, 3], default 3
coord (str):
coordinate system specimen/geographic/tilt-corrected,
['s', 'g', 't'], default 's'
Returns:
Tuple
True or False indicating if conversion was successful, output file name written
Info:
Input for is a series of baseline, ARM pairs.
The baseline should be the AF demagnetized state (3 axis demag is
preferable) for the following ARM acquisition. The order of the
measurements is:
positions 1,2,3, 6,7,8, 11,12,13 (for 9 positions)
positions 1,2,3,4, 6,7,8,9, 11,12,13,14 (for 12 positions)
positions 1-15 (for 15 positions)
"""
data_model_num = int(float(data_model_num))
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
# get full file names
meas_file = pmag.resolve_file_name(infile, input_dir_path)
spec_file = pmag.resolve_file_name(spec_file, input_dir_path)
samp_file = pmag.resolve_file_name(samp_file, input_dir_path)
output_spec_file = os.path.join(dir_path, os.path.split(spec_file)[1])
# get coordinate system
coords = {'s': '-1', 'g': '0', 't': '100'}
if coord not in coords.values():
coord = coords.get(str(coord), '-1')
if data_model_num == 3:
meas_data = []
meas_data3, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print(file_type, "This is not a valid MagIC 3.0. measurements file ")
return False, "{} is not a valid MagIC 3.0. measurements file ".format(meas_file)
# convert meas_data to 2.5
for rec in meas_data3:
meas_map = map_magic.meas_magic3_2_magic2_map
meas_data.append(map_magic.mapping(rec, meas_map))
spec_data = []
spec_data3, file_type = pmag.magic_read(spec_file)
for rec in spec_data3:
spec_map = map_magic.spec_magic3_2_magic2_map
spec_data.append(map_magic.mapping(rec, spec_map))
else: # data model 2
rmag_anis = "rmag_anisotropy.txt"
rmag_res = "rmag_results.txt"
rmag_anis = pmag.resolve_file_name(rmag_anis, input_dir_path)
rmag_res = pmag.resolve_file_name(rmag_res, input_dir_path)
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(file_type, "This is not a valid MagIC 2.5 magic_measurements file ")
return False, "{} is not a valid MagIC 2.5. measurements file ".format(meas_file)
# fish out relevant data
meas_data = pmag.get_dictitem(
meas_data, 'magic_method_codes', 'LP-AN-ARM', 'has')
if coord != '-1': # need to read in sample data
if data_model_num == 3:
samp_data3, file_type = pmag.magic_read(samp_file)
if file_type != 'samples':
print(file_type, "This is not a valid samples file ")
print("Only specimen coordinates will be calculated")
coord = '-1'
else:
# translate to 2
samp_data = []
samp_map = map_magic.samp_magic3_2_magic2_map
for rec in samp_data3:
samp_data.append(map_magic.mapping(rec, samp_map))
else:
samp_data, file_type = pmag.magic_read(samp_file)
if file_type != 'er_samples':
print(file_type, "This is not a valid er_samples file ")
print("Only specimen coordinates will be calculated")
coord = '-1'
#
# sort the specimen names
#
ssort = []
for rec in meas_data:
spec = rec["er_specimen_name"]
if spec not in ssort:
ssort.append(spec)
if len(ssort) > 1:
sids = sorted(ssort)
else:
sids = ssort
#
# work on each specimen
#
specimen = 0
RmagSpecRecs, RmagResRecs = [], []
SpecRecs, SpecRecs3 = [], []
while specimen < len(sids):
s = sids[specimen]
RmagSpecRec = {}
RmagResRec = {}
# get old specrec here if applicable
if data_model_num == 3:
if spec_data:
try:
RmagResRec = pmag.get_dictitem(
spec_data, 'er_specimen_name', s, 'T')[0]
RmagSpecRec = pmag.get_dictitem(
spec_data, 'er_specimen_name', s, 'T')[0]
except IndexError:
pass
data = []
method_codes = []
#
# find the data from the meas_data file for this sample
#
data = pmag.get_dictitem(meas_data, 'er_specimen_name', s, 'T')
#
# find out the number of measurements (9, 12 or 15)
#
npos = int(len(data) / 2)
if npos == 9:
#
# get dec, inc, int and convert to x,y,z
#
# B matrix made from design matrix for positions
B, H, tmpH = pmag.designAARM(npos)
X = []
for rec in data:
Dir = []
Dir.append(float(rec["measurement_dec"]))
Dir.append(float(rec["measurement_inc"]))
Dir.append(float(rec["measurement_magn_moment"]))
X.append(pmag.dir2cart(Dir))
#
# subtract baseline and put in a work array
#
work = np.zeros((npos, 3), 'f')
for i in range(npos):
for j in range(3):
work[i][j] = X[2 * i + 1][j] - X[2 * i][j]
#
# calculate tensor elements
# first put ARM components in w vector
#
w = np.zeros((npos * 3), 'f')
index = 0
for i in range(npos):
for j in range(3):
w[index] = work[i][j]
index += 1
s = np.zeros((6), 'f') # initialize the s matrix
for i in range(6):
for j in range(len(w)):
s[i] += B[i][j] * w[j]
trace = s[0] + s[1] + s[2] # normalize by the trace
for i in range(6):
s[i] = s[i] / trace
a = pmag.s2a(s)
# ------------------------------------------------------------
# Calculating dels is different than in the Kappabridge
# routine. Use trace normalized tensor (a) and the applied
# unit field directions (tmpH) to generate model X,Y,Z
# components. Then compare these with the measured values.
# ------------------------------------------------------------
S = 0.
comp = np.zeros((npos * 3), 'f')
for i in range(npos):
for j in range(3):
index = i * 3 + j
compare = a[j][0] * tmpH[i][0] + a[j][1] * \
tmpH[i][1] + a[j][2] * tmpH[i][2]
comp[index] = compare
for i in range(npos * 3):
d = (w[i] / trace) - comp[i] # del values
S += d * d
nf = float(npos * 3 - 6) # number of degrees of freedom
if S > 0:
sigma = np.sqrt(S / nf)
else:
sigma = 0
RmagSpecRec["rmag_anisotropy_name"] = data[0]["er_specimen_name"]
RmagSpecRec["er_location_name"] = data[0].get(
"er_location_name", "")
RmagSpecRec["er_specimen_name"] = data[0]["er_specimen_name"]
if not "er_sample_name" in RmagSpecRec:
RmagSpecRec["er_sample_name"] = data[0].get(
"er_sample_name", "")
RmagSpecRec["er_site_name"] = data[0].get("er_site_name", "")
RmagSpecRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":AARM"
RmagSpecRec["er_citation_names"] = "This study"
RmagResRec["rmag_result_name"] = data[0]["er_specimen_name"] + ":AARM"
RmagResRec["er_location_names"] = data[0].get(
"er_location_name", "")
RmagResRec["er_specimen_names"] = data[0]["er_specimen_name"]
if not "er_sample_name" not in RmagResRec:
RmagResRec["er_sample_names"] = data[0].get(
"er_sample_name", "")
RmagResRec["er_site_names"] = data[0].get("er_site_name", "")
RmagResRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":AARM"
RmagResRec["er_citation_names"] = "This study"
if "magic_instrument_codes" in list(data[0].keys()):
RmagSpecRec["magic_instrument_codes"] = data[0]["magic_instrument_codes"]
else:
RmagSpecRec["magic_instrument_codes"] = ""
RmagSpecRec["anisotropy_type"] = "AARM"
RmagSpecRec["anisotropy_description"] = "Hext statistics adapted to AARM"
if coord != '-1': # need to rotate s
# set orientation priorities
SO_methods = []
for rec in samp_data:
if "magic_method_codes" not in rec:
rec['magic_method_codes'] = 'SO-NO'
if "magic_method_codes" in rec:
methlist = rec["magic_method_codes"]
for meth in methlist.split(":"):
if "SO" in meth and "SO-POM" not in meth.strip():
if meth.strip() not in SO_methods:
SO_methods.append(meth.strip())
SO_priorities = pmag.set_priorities(SO_methods, 0)
# continue here
redo, p = 1, 0
if len(SO_methods) <= 1:
az_type = SO_methods[0]
orient = pmag.find_samp_rec(
RmagSpecRec["er_sample_name"], samp_data, az_type)
if orient["sample_azimuth"] != "":
method_codes.append(az_type)
redo = 0
while redo == 1:
if p >= len(SO_priorities):
print("no orientation data for ", s)
orient["sample_azimuth"] = ""
orient["sample_dip"] = ""
method_codes.append("SO-NO")
redo = 0
else:
az_type = SO_methods[SO_methods.index(
SO_priorities[p])]
orient = pmag.find_samp_rec(
RmagSpecRec["er_sample_name"], samp_data, az_type)
if orient["sample_azimuth"] != "":
method_codes.append(az_type)
redo = 0
p += 1
az, pl = orient['sample_azimuth'], orient['sample_dip']
s = pmag.dosgeo(s, az, pl) # rotate to geographic coordinates
if coord == '100':
sample_bed_dir, sample_bed_dip = orient['sample_bed_dip_direction'], orient['sample_bed_dip']
# rotate to geographic coordinates
s = pmag.dostilt(s, sample_bed_dir, sample_bed_dip)
hpars = pmag.dohext(nf, sigma, s)
#
# prepare for output
#
RmagSpecRec["anisotropy_s1"] = '%8.6f' % (s[0])
RmagSpecRec["anisotropy_s2"] = '%8.6f' % (s[1])
RmagSpecRec["anisotropy_s3"] = '%8.6f' % (s[2])
RmagSpecRec["anisotropy_s4"] = '%8.6f' % (s[3])
RmagSpecRec["anisotropy_s5"] = '%8.6f' % (s[4])
RmagSpecRec["anisotropy_s6"] = '%8.6f' % (s[5])
RmagSpecRec["anisotropy_mean"] = '%8.3e' % (trace / 3)
RmagSpecRec["anisotropy_sigma"] = '%8.6f' % (sigma)
RmagSpecRec["anisotropy_unit"] = "Am^2"
RmagSpecRec["anisotropy_n"] = '%i' % (npos)
RmagSpecRec["anisotropy_tilt_correction"] = coord
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F"] = '%7.1f ' % (hpars["F"])
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F_crit"] = hpars["F_crit"]
RmagResRec["anisotropy_t1"] = '%8.6f ' % (hpars["t1"])
RmagResRec["anisotropy_t2"] = '%8.6f ' % (hpars["t2"])
RmagResRec["anisotropy_t3"] = '%8.6f ' % (hpars["t3"])
RmagResRec["anisotropy_v1_dec"] = '%7.1f ' % (hpars["v1_dec"])
RmagResRec["anisotropy_v2_dec"] = '%7.1f ' % (hpars["v2_dec"])
RmagResRec["anisotropy_v3_dec"] = '%7.1f ' % (hpars["v3_dec"])
RmagResRec["anisotropy_v1_inc"] = '%7.1f ' % (hpars["v1_inc"])
RmagResRec["anisotropy_v2_inc"] = '%7.1f ' % (hpars["v2_inc"])
RmagResRec["anisotropy_v3_inc"] = '%7.1f ' % (hpars["v3_inc"])
RmagResRec["anisotropy_ftest"] = '%7.1f ' % (hpars["F"])
RmagResRec["anisotropy_ftest12"] = '%7.1f ' % (hpars["F12"])
RmagResRec["anisotropy_ftest23"] = '%7.1f ' % (hpars["F23"])
RmagResRec["result_description"] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
if hpars["e12"] > hpars["e13"]:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
if hpars["e23"] > hpars['e12']:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["tilt_correction"] = '-1'
RmagResRec["anisotropy_type"] = 'AARM'
RmagResRec["magic_method_codes"] = 'LP-AN-ARM:AE-H'
RmagSpecRec["magic_method_codes"] = 'LP-AN-ARM:AE-H'
RmagResRec["magic_software_packages"] = pmag.get_version()
RmagSpecRec["magic_software_packages"] = pmag.get_version()
specimen += 1
RmagSpecRecs.append(RmagSpecRec)
RmagResRecs.append(RmagResRec)
if data_model_num == 3:
SpecRec = RmagResRec.copy()
SpecRec.update(RmagSpecRec)
SpecRecs.append(SpecRec)
else:
print('skipping specimen ', s,
' only 9 positions supported', '; this has ', npos)
specimen += 1
if data_model_num == 3:
# translate records
for rec in SpecRecs:
rec3 = map_magic.convert_aniso('magic3', rec)
SpecRecs3.append(rec3)
# write output to 3.0 specimens file
res, ofile = pmag.magic_write(output_spec_file, SpecRecs3, 'specimens')
print("specimen data stored in {}".format(output_spec_file))
if not res:
return False, "Something went wrong and no records were created. Are you sure your measurement file has the method code 'LP-AN-ARM'?"
return True, output_spec_file
else:
if rmag_anis == "":
rmag_anis = "rmag_anisotropy.txt"
pmag.magic_write(rmag_anis, RmagSpecRecs, 'rmag_anisotropy')
print("specimen tensor elements stored in ", rmag_anis)
if rmag_res == "":
rmag_res = "rmag_results.txt"
pmag.magic_write(rmag_res, RmagResRecs, 'rmag_results')
print("specimen statistics and eigenparameters stored in ", rmag_res)
return True, rmag_anis
[docs]
def get_matrix(n_pos=6):
"""
returns design matrix for anisotropy experiments
Parameters:
n_pos: anisotropy experiment positions (default is 6, can be 6, 9 or 15)
Returns:
matrix for n_pos of 6,9, or 15
Matrices definitions:
A design matrix
B np.dot(inv(np.dot(A.transpose(),A)),A.transpose())
tmpH is used for sigma calculation (9,15 measurements only)
Anisotropy tensor:
|Mx| |s1 s4 s6| |Bx|
|My| = |s4 s2 s5| . |By|
|Mz| |s6 s5 s3| |Bz|
A matrix (measurement matrix):
Each mesurement yields three lines in "A" matrix
|Mi | |Bx 0 0 By 0 Bz| |s1|
|Mi+1| = |0 By 0 Bx Bz 0 | . |s2|
|Mi+2| |0 0 Bz 0 By Bx| |s3|
|s4|
|s5|
"""
if n_pos not in [6,9,15]:
print ('n_pos ',n_pos,' not available')
return False
Matrices = {}
A = np.zeros((n_pos * 3, 6), 'f')
if n_pos == 6:
positions = [[0., 0., 1.], [90., 0., 1.], [0., 90., 1.],
[180., 0., 1.], [270., 0., 1.], [0., -90., 1.]]
if n_pos == 15:
positions = [[315., 0., 1.], [225., 0., 1.], [180., 0., 1.], [135., 0., 1.], [45., 0., 1.],
[90., -45., 1.], [270., -45., 1.], [270., 0., 1.], [270., 45., 1.], [90., 45., 1.],
[180., 45., 1.], [180., -45., 1.], [0., -90., 1.], [0, -45., 1.], [0, 45., 1.]]
if n_pos == 9:
positions = [[315., 0., 1.], [225., 0., 1.], [180., 0., 1.],
[90., -45., 1.], [270., -45., 1.], [270., 0., 1.],
[180., 45., 1.], [180., -45., 1.], [0., -90., 1.]]
tmpH = np.zeros((n_pos, 3), 'f') # define tmpH
for i in range(len(positions)):
CART = pmag.dir2cart(positions[i])
a = CART[0]
b = CART[1]
c = CART[2]
A[3 * i][0] = a
A[3 * i][3] = b
A[3 * i][5] = c
A[3 * i + 1][1] = b
A[3 * i + 1][3] = a
A[3 * i + 1][4] = c
A[3 * i + 2][2] = c
A[3 * i + 2][4] = b
A[3 * i + 2][5] = a
tmpH[i][0] = CART[0]
tmpH[i][1] = CART[1]
tmpH[i][2] = CART[2]
B = np.dot(inv(np.dot(A.transpose(), A)), A.transpose())
Matrices['A'] = A
Matrices['B'] = B
Matrices['tmpH'] = tmpH
return Matrices
[docs]
def aarm_magic(meas_file, dir_path=".", input_dir_path="",
input_spec_file='specimens.txt', output_spec_file='specimens.txt'):
"""
Converts AARM data to best-fit tensor (6 elements plus sigma)
Parameters:
meas_file (str):
input measurement file
dir_path (str):
output directory, default "."
input_dir_path (str):
input file directory IF different from dir_path, default ""
input_spec_file (str):
input specimen file name, default "specimens.txt"
output_spec_file (str):
output specimen file name, default "specimens.txt"
Returns:
True or False indicating if conversion was successful and output file name written
Info:
Input for is a series of baseline, ARM pairs.
The baseline should be the AF demagnetized state (3 axis demag is
preferable) for the following ARM acquisition. The order of the
measurements is:
for 6 positions (AF demag before each step):
1) labfield parallel to X
2) labfield parallel to Y
3) labfield parallel to Z
4) labfield anti-parallel to X
5) labfield anti-parallel to Y
6) labfield anti-parallel to Z
for 9 positions (AF demag before each step):
positions 1,2,3,6,7,8,11,12,13 (from Figure D.2 in Essentials, earthref.org/MagIC/books/Tauxe/Essentials, Appendix D)
for 15 positions (AF demag before each step):
positions 1-15 (for 15 positions)
"""
# fix up file names
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
input_spec_file = pmag.resolve_file_name(input_spec_file, input_dir_path)
output_spec_file = pmag.resolve_file_name(output_spec_file, dir_path)
aniso_spec_columns=['aniso_alt','aniso_ftest','aniso_ftest12','aniso_ftest23','aniso_ftest_quality','aniso_p',
'aniso_s','aniso_s_n_measurements','aniso_s_sigma','aniso_tilt_correction','aniso_type',
'aniso_v1','aniso_v2','aniso_v3','citations','description','method_codes','sample','software_packages','specimen']
# read in data
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print(
"-E- {} is not a valid measurements file, {}".format(meas_file, file_type))
print ('for data model 2.0 please use atrm_magic_dm2')
return False
old_specs=False
old_spec_recs, file_type = pmag.magic_read(input_spec_file)
if file_type != 'specimens':
print("-W- {} is not a valid specimens file ".format(input_spec_file))
old_spec_df=pd.DataFrame(columns=aniso_spec_columns)
print ('creating new specimens.txt file')
else:
old_specs=True
old_spec_df=pd.DataFrame.from_dict(old_spec_recs)
# check format of output specimens table
for col in aniso_spec_columns:
if col not in old_spec_df.columns:old_spec_df[col]=""
df=pd.DataFrame.from_dict(meas_data)
df=df[df['method_codes'].str.contains('LP-AN-ARM')]
if not len(df):
print("-E- No measurement records found with code LP-AN-ARM")
return False, "No measurement records found with code LP-AN-ARM"
#
#
# get sorted list of unique specimen names
sids=np.sort(df['specimen'].unique())
#
# work on each specimen
#
for spec in sids:
meas_df=df[df['specimen']==spec]
# first determine n_pos
aarm_df=meas_df[meas_df['method_codes'].str.contains('LT-AF-I')]
n_pos=len(aarm_df)
if n_pos!= 6 and n_pos != 9 and n_pos != 15:
print( 'aarm_magic only available for n=6,9,15')
return
else:
aarm_df=meas_df[meas_df['method_codes'].str.contains('LP-AN-ARM')]
aarm_df['treat_step_num']=range(len(aarm_df))
# subtract baseline
M=[]
aarm_dirs=aarm_df[['dir_dec','dir_inc','magn_moment']].astype('float').values
M_with_base=pmag.dir2cart(aarm_dirs)
for i in range(1,len(aarm_df),2):
M.append(M_with_base[i]-M_with_base[i-1]) # subtract baseline
K = np.zeros(3 * n_pos, 'f')
for i in range(n_pos):
K[i * 3] = M[i][0]
K[i * 3 + 1] = M[i][1]
K[i * 3 + 2] = M[i][2]
if True:
aniso_parameters=calculate_aniso_parameters(K,n_pos=n_pos)
#alt_check_df=meas_df[meas_df['method_codes'].str.contains('LT-PTRM-I')]
#if len(alt_check_df)>0:
# anis_alt_phi=alt_check_df['treat_dc_field_phi'].astype('float').values[-1]
# anis_alt_theta=alt_check_df['treat_dc_field_theta'].astype('float').values[-1]
# base1=atrm_df[atrm_df['treat_dc_field_phi'].astype('float')==anis_alt_phi]
# base1=base1[base1['treat_dc_field_theta']==anis_alt_theta]
# if len(base1)>0:
# base1_M=base1[['magn_moment']].astype('float').values[0]
# base2_M=alt_check_df[['magn_moment']].astype('float').values[0]
# aniso_alt=100*np.abs(base1_M-base2_M)/np.mean([base1_M,base2_M]) # anisotropy alteration percent
new_spec_df=pd.DataFrame.from_dict([aniso_parameters])
new_spec_df['specimen']=spec
new_spec_df['citations']='This study'
new_spec_df['method_codes']='LP-AN-ARM'
new_spec_df['aniso_type']='AARM'
new_spec_df['software_packages']=pmag.get_version()
new_spec_df['citations']='This study'
if old_specs and 'aniso_s' in old_spec_df.columns and old_spec_df.loc[(old_spec_df['specimen']==spec)&
(old_spec_df['aniso_type']=='AARM')].empty==False: # there is a previous record of AARM for this specimen
print ('replacing existing AARM data for ',spec)
for col in ['aniso_ftest','aniso_ftest12','aniso_ftest23','aniso_p','aniso_s','aniso_s_n_measurements','aniso_s_sigma','aniso_type','aniso_v1','aniso_v2','aniso_v3','aniso_ftest_quality','aniso_tilt_correction','description','software_packages','citations']:
old_spec_df.loc[(old_spec_df['specimen']==spec)&(old_spec_df['aniso_type']=='AARM')&
(old_spec_df[col].notnull()),col]=new_spec_df[col].values[0] # replace existing AARM data for this specimen
elif old_specs and 'aniso_s' in old_spec_df.columns and old_spec_df.loc[old_spec_df['specimen']==spec].empty==False: # there is a no previous record of AARM for this specimen
print ('adding AARM data for ',spec)
for col in ['aniso_ftest','aniso_ftest12','aniso_ftest23','aniso_p','aniso_s','aniso_s_n_measurements','aniso_s_sigma','aniso_type','aniso_v1','aniso_v2','aniso_v3','aniso_ftest_quality','aniso_tilt_correction','description','software_packages','citations']:
old_spec_df.loc[old_spec_df['specimen']==spec,col]=new_spec_df[col].values[0] # add AARM data for this specimen
else: # no record of this specimen, just append to the end of the existing data frame
print ('creating new record for specimen ',spec)
old_spec_df=pd.concat([old_spec_df,new_spec_df]) # add in new record
else:
print ('something wrong with measurements for: ',spec)
old_spec_df.fillna("",inplace=True)
spec_dicts=old_spec_df.to_dict('records')
pmag.magic_write(output_spec_file,spec_dicts,'specimens')
[docs]
def atrm_magic_dm2(meas_file, dir_path=".", input_dir_path="",
input_spec_file='specimens.txt', output_spec_file='specimens.txt',
data_model_num=2):
"""
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Parameters
----------
meas_file : str
input measurement file
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
input_spec_file : str
input specimen file name, default "specimens.txt"
output_spec_file : str
output specimen file name, default "specimens.txt"
data_model_num : number
MagIC data model [2, 3], default 3
Returns
---------
Tuple : (True or False indicating if conversion was successful, output file name written)
"""
# fix up file names
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
rmag_anis = os.path.join(dir_path, 'rmag_anisotropy.txt')
rmag_res = os.path.join(dir_path, 'rmag_results.txt')
input_spec_file = pmag.resolve_file_name(input_spec_file, input_dir_path)
output_spec_file = pmag.resolve_file_name(output_spec_file, dir_path)
# read in data
if data_model_num == 3:
meas_data = []
meas_data3, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print(
"-E- {} is not a valid measurements file, {}".format(meas_file, file_type))
return False
# convert meas_data to 2.5
for rec in meas_data3:
meas_map = map_magic.meas_magic3_2_magic2_map
meas_data.append(map_magic.mapping(rec, meas_map))
old_spec_recs, file_type = pmag.magic_read(input_spec_file)
if file_type != 'specimens':
print("-W- {} is not a valid specimens file ".format(input_spec_file))
old_spec_recs = []
spec_recs = []
for rec in old_spec_recs:
spec_map = map_magic.spec_magic3_2_magic2_map
spec_recs.append(map_magic.mapping(rec, spec_map))
else:
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print("-E- {} is is not a valid magic_measurements file ".format(file_type))
return False, "{} is not a valid magic_measurements file, {}".format(meas_file, file_type)
meas_data = pmag.get_dictitem(
meas_data, 'magic_method_codes', 'LP-AN-TRM', 'has')
if not len(meas_data):
print("-E- No measurement records found with code LP-AN-TRM")
return False, "No measurement records found with code LP-AN-TRM"
#
#
# get sorted list of unique specimen names
ssort = []
for rec in meas_data:
spec = rec["er_specimen_name"]
if spec not in ssort:
ssort.append(spec)
sids = sorted(ssort)
#
#
# work on each specimen
#
specimen, npos = 0, 6
RmagSpecRecs, RmagResRecs = [], []
SpecRecs, SpecRecs3 = [], []
while specimen < len(sids):
nmeas = 0
s = sids[specimen]
RmagSpecRec = {}
RmagResRec = {}
# get old specrec here if applicable
if data_model_num == 3:
if spec_recs:
try:
RmagResRec = pmag.get_dictitem(
spec_recs, 'er_specimen_name', s, 'T')[0]
RmagSpecRec = pmag.get_dictitem(
spec_recs, 'er_specimen_name', s, 'T')[0]
except IndexError:
pass
BX, X = [], []
method_codes = []
Spec0 = ""
#
# find the data from the meas_data file for this sample
# and get dec, inc, int and convert to x,y,z
#
# fish out data for this specimen name
data = pmag.get_dictitem(meas_data, 'er_specimen_name', s, 'T')
if len(data) > 5:
RmagSpecRec["rmag_anisotropy_name"] = data[0]["er_specimen_name"]
RmagSpecRec["er_location_name"] = data[0].get(
"er_location_name", "")
RmagSpecRec["er_specimen_name"] = data[0]["er_specimen_name"]
if not "er_sample_name" in RmagSpecRec:
RmagSpecRec["er_sample_name"] = data[0].get(
"er_sample_name", "")
RmagSpecRec["er_site_name"] = data[0].get("er_site_name", "")
RmagSpecRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":ATRM"
RmagSpecRec["er_citation_names"] = "This study"
RmagResRec["rmag_result_name"] = data[0]["er_specimen_name"] + ":ATRM"
RmagResRec["er_location_names"] = data[0].get(
"er_location_names", "")
RmagResRec["er_specimen_names"] = data[0]["er_specimen_name"]
if data_model_num == 2:
RmagResRec["er_sample_names"] = data[0].get(
"er_sample_name", "")
RmagResRec["er_site_names"] = data[0].get("er_site_name", "")
RmagResRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":ATRM"
RmagResRec["er_citation_names"] = "This study"
RmagSpecRec["anisotropy_type"] = "ATRM"
if "magic_instrument_codes" in list(data[0].keys()):
RmagSpecRec["magic_instrument_codes"] = data[0]["magic_instrument_codes"]
else:
RmagSpecRec["magic_instrument_codes"] = ""
RmagSpecRec["anisotropy_description"] = "Hext statistics adapted to ATRM"
for rec in data:
meths = rec['magic_method_codes'].strip().split(':')
Dir = []
Dir.append(float(rec["measurement_dec"]))
Dir.append(float(rec["measurement_inc"]))
Dir.append(float(rec["measurement_magn_moment"]))
if "LT-T-Z" in meths:
BX.append(pmag.dir2cart(Dir)) # append baseline steps
elif "LT-T-I" in meths:
X.append(pmag.dir2cart(Dir))
nmeas += 1
#
if len(BX) == 1:
for i in range(len(X) - 1):
BX.append(BX[0]) # assume first 0 field step as baseline
elif len(BX) == 0: # assume baseline is zero
for i in range(len(X)):
BX.append([0., 0., 0.]) # assume baseline of 0
elif len(BX) != len(X): # if BX isn't just one measurement or one in between every infield step, just assume it is zero
print('something odd about the baselines - just assuming zero')
for i in range(len(X)):
BX.append([0., 0., 0.]) # assume baseline of 0
if nmeas < 6: # must have at least 6 measurements right now -
print('skipping specimen ', s, ' too few measurements')
specimen += 1
else:
# B matrix made from design matrix for positions
B, H, tmpH = pmag.designATRM(npos)
#
# subtract optional baseline and put in a work array
#
work = np.zeros((nmeas, 3), 'f')
for i in range(nmeas):
for j in range(3):
# subtract baseline, if available
work[i][j] = X[i][j] - BX[i][j]
#
# calculate tensor elements
# first put ARM components in w vector
#
w = np.zeros((npos * 3), 'f')
index = 0
for i in range(npos):
for j in range(3):
w[index] = work[i][j]
index += 1
s = np.zeros((6), 'f') # initialize the s matrix
for i in range(6):
for j in range(len(w)):
s[i] += B[i][j] * w[j]
trace = s[0] + s[1] + s[2] # normalize by the trace
for i in range(6):
s[i] = s[i] / trace
a = pmag.s2a(s)
# ------------------------------------------------------------
# Calculating dels is different than in the Kappabridge
# routine. Use trace normalized tensor (a) and the applied
# unit field directions (tmpH) to generate model X,Y,Z
# components. Then compare these with the measured values.
# ------------------------------------------------------------
S = 0.
comp = np.zeros((npos * 3), 'f')
for i in range(npos):
for j in range(3):
index = i * 3 + j
compare = a[j][0] * tmpH[i][0] + a[j][1] * \
tmpH[i][1] + a[j][2] * tmpH[i][2]
comp[index] = compare
for i in range(npos * 3):
d = (w[i] / trace) - comp[i] # del values
S += d * d
nf = float(npos * 3. - 6.) # number of degrees of freedom
if S > 0:
sigma = np.sqrt(S / nf)
else:
sigma = 0
hpars = pmag.dohext(nf, sigma, s)
#
# prepare for output
#
RmagSpecRec["anisotropy_s1"] = '%8.6f' % (s[0])
RmagSpecRec["anisotropy_s2"] = '%8.6f' % (s[1])
RmagSpecRec["anisotropy_s3"] = '%8.6f' % (s[2])
RmagSpecRec["anisotropy_s4"] = '%8.6f' % (s[3])
RmagSpecRec["anisotropy_s5"] = '%8.6f' % (s[4])
RmagSpecRec["anisotropy_s6"] = '%8.6f' % (s[5])
RmagSpecRec["anisotropy_mean"] = '%8.3e' % (trace / 3)
RmagSpecRec["anisotropy_sigma"] = '%8.6f' % (sigma)
RmagSpecRec["anisotropy_unit"] = "Am^2"
RmagSpecRec["anisotropy_n"] = '%i' % (npos)
RmagSpecRec["anisotropy_tilt_correction"] = '-1'
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F"] = '%7.1f ' % (hpars["F"])
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F_crit"] = hpars["F_crit"]
RmagResRec["anisotropy_t1"] = '%8.6f ' % (hpars["t1"])
RmagResRec["anisotropy_t2"] = '%8.6f ' % (hpars["t2"])
RmagResRec["anisotropy_t3"] = '%8.6f ' % (hpars["t3"])
RmagResRec["anisotropy_v1_dec"] = '%7.1f ' % (hpars["v1_dec"])
RmagResRec["anisotropy_v2_dec"] = '%7.1f ' % (hpars["v2_dec"])
RmagResRec["anisotropy_v3_dec"] = '%7.1f ' % (hpars["v3_dec"])
RmagResRec["anisotropy_v1_inc"] = '%7.1f ' % (hpars["v1_inc"])
RmagResRec["anisotropy_v2_inc"] = '%7.1f ' % (hpars["v2_inc"])
RmagResRec["anisotropy_v3_inc"] = '%7.1f ' % (hpars["v3_inc"])
RmagResRec["anisotropy_ftest"] = '%7.1f ' % (hpars["F"])
RmagResRec["anisotropy_ftest12"] = '%7.1f ' % (hpars["F12"])
RmagResRec["anisotropy_ftest23"] = '%7.1f ' % (hpars["F23"])
RmagResRec["result_description"] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
if hpars["e12"] > hpars["e13"]:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
if hpars["e23"] > hpars['e12']:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["tilt_correction"] = '-1'
RmagResRec["anisotropy_type"] = 'ATRM'
RmagResRec["magic_method_codes"] = 'LP-AN-TRM:AE-H'
RmagSpecRec["magic_method_codes"] = 'LP-AN-TRM:AE-H'
RmagResRec["magic_software_packages"] = pmag.get_version()
RmagSpecRec["magic_software_packages"] = pmag.get_version()
RmagSpecRecs.append(RmagSpecRec)
RmagResRecs.append(RmagResRec)
specimen += 1
if data_model_num == 3:
SpecRec = RmagResRec.copy()
SpecRec.update(RmagSpecRec)
SpecRecs.append(SpecRec)
# finished iterating through specimens,
# now we need to write out the data to files
if data_model_num == 3:
# translate records
for rec in SpecRecs:
rec3 = map_magic.convert_aniso('magic3', rec)
SpecRecs3.append(rec3)
# write output to 3.0 specimens file
pmag.magic_write(output_spec_file, SpecRecs3, 'specimens')
print("specimen data stored in {}".format(output_spec_file))
return True, output_spec_file
else:
# write output to 2.5 rmag_ files
pmag.magic_write(rmag_anis, RmagSpecRecs, 'rmag_anisotropy')
print("specimen tensor elements stored in ", rmag_anis)
pmag.magic_write(rmag_res, RmagResRecs, 'rmag_results')
print("specimen statistics and eigenparameters stored in ", rmag_res)
return True, rmag_anis
[docs]
def calculate_aniso_parameters(K,n_pos=6):
"""
calculate anisotropy parameters from n_pos positions plus optional baseline measurements
"""
aniso_parameters = {}
Matrices=get_matrix(n_pos)
tmpH=Matrices['tmpH']
B = Matrices['B']
S_bs = np.dot(B, K)
# normalize by trace
trace = S_bs[0] + S_bs[1] + S_bs[2]
S_bs = S_bs / trace
s1, s2, s3, s4, s5, s6 = S_bs[0], S_bs[1], S_bs[2], S_bs[3], S_bs[4], S_bs[5]
s_matrix = [[s1, s4, s6], [s4, s2, s5], [s6, s5, s3]]
s_vec=[s1,s2,s3,s4,s5,s6]
# calculate eigen vector,
t, evectors = eig(s_matrix)
# sort vectors
t = list(t)
t1 = max(t)
ix_1 = t.index(t1)
t3 = min(t)
ix_3 = t.index(t3)
for tt in range(3):
if t[tt] != t1 and t[tt] != t3:
t2 = t[tt]
ix_2 = t.index(t2)
v1 = [evectors[0][ix_1], evectors[1][ix_1], evectors[2][ix_1]]
v2 = [evectors[0][ix_2], evectors[1][ix_2], evectors[2][ix_2]]
v3 = [evectors[0][ix_3], evectors[1][ix_3], evectors[2][ix_3]]
DIR_v1 = pmag.cart2dir(v1)
DIR_v2 = pmag.cart2dir(v2)
DIR_v3 = pmag.cart2dir(v3)
# package up the aniso_s and aniso_tau and aniso_v here: START HERE
aniso_parameters['aniso_s']=s1.astype('str')+':'+ s2.astype('str')+':'+s3.astype('str')+':'+\
s4.astype('str')+':'+ s5.astype('str')+':'+ s6.astype('str')
aniso_parameters['aniso_v1']="%f" % t1+":"+"%.1f" % DIR_v1[0]+":"+"%.1f" % DIR_v1[1]
aniso_parameters['aniso_v2']="%f" % t2+":"+"%.1f" % DIR_v2[0]+":"+"%.1f" % DIR_v2[1]
aniso_parameters['aniso_v3']="%f" % t3+":"+"%.1f" % DIR_v3[0]+":"+"%.1f" % DIR_v3[1]
aniso_parameters['aniso_p'] = "%f" % (t1 / t3)
if len(K) / 3 == 9 or len(K) / 3 == 6 or len(K) / 3 == 15:
n_pos = len(K) / 3
tmpH = Matrices['tmpH']
a = s_matrix
S = 0.
comp = np.zeros((int(n_pos) * 3), 'f')
for i in range(int(n_pos)):
for j in range(3):
index = i * 3 + j
compare = a[j][0] * tmpH[i][0] + a[j][1] * \
tmpH[i][1] + a[j][2] * tmpH[i][2]
comp[index] = compare
for i in range(int(n_pos * 3)):
d = K[i] / trace - comp[i] # del values
S += d * d
nf = float(n_pos * 3 - 6) # number of degrees of freedom
if S > 0:
sigma = np.sqrt(S / nf)
hpars = pmag.dohext(nf, sigma, [s1, s2, s3, s4, s5, s6])
aniso_parameters['aniso_tilt_correction']=-1
aniso_parameters['aniso_s_sigma'] = "%f" % sigma
aniso_parameters['aniso_ftest'] = "%f" % hpars["F"]
aniso_parameters['aniso_ftest12'] = "%f" % hpars["F12"]
aniso_parameters['aniso_ftest23'] = "%f" % hpars["F23"]
aniso_parameters['description'] = "Critical F: %s" % (hpars['F_crit'])
aniso_parameters['aniso_s_n_measurements'] = '%i' % (n_pos)
if float(hpars["F"]) > float(hpars['F_crit']): # significant anisotropy
aniso_parameters['aniso_ftest_quality'] = 'g'
else:
aniso_parameters['aniso_ftest_quality'] = 'b'
return aniso_parameters
[docs]
def atrm_magic(meas_file, dir_path=".", input_dir_path="",
input_spec_file='specimens.txt', output_spec_file='specimens.txt'):
"""
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Parameters:
meas_file (str):
input measurement file
dir_path (str):
output directory, default "."
input_dir_path (str):
input file directory IF different from dir_path, default ""
input_spec_file (str):
input specimen file name, default "specimens.txt"
output_spec_file (str):
output specimen file name, default "specimens.txt"
Returns:
Tuple : (True or False indicating if conversion was successful, output file name written)
Info:
Input for is a series of ATRM measurements with optional alteration check
The order of the measurements is:
positions:
- labfield parallel to X
- labfield parallel to Y
- labfield parallel to Z
- labfield anti-parallel to X
- labfield anti-parallel to Y
- labfield anti-parallel to Z
- optional: labfield parallel to X
"""
# fix up file names
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
input_spec_file = pmag.resolve_file_name(input_spec_file, input_dir_path)
output_spec_file = pmag.resolve_file_name(output_spec_file, dir_path)
aniso_spec_columns=['aniso_alt','aniso_ftest','aniso_ftest12','aniso_ftest23','aniso_ftest_quality','aniso_p',
'aniso_s','aniso_s_n_measurements','aniso_s_sigma','aniso_tilt_correction','aniso_type',
'aniso_v1','aniso_v2','aniso_v3','citations','description','method_codes','sample','software_packages','specimen']
# read in data
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print(
"-E- {} is not a valid measurements file, {}".format(meas_file, file_type))
print ('for data model 2.0 please use atrm_magic_dm2')
return False
old_specs=False
old_spec_recs, file_type = pmag.magic_read(input_spec_file)
if file_type != 'specimens':
print("-W- {} is not a valid specimens file ".format(input_spec_file))
old_spec_df=pd.DataFrame(columns=aniso_spec_columns)
print ('creating new specimens.txt file')
else:
old_specs=True
old_spec_df=pd.DataFrame.from_dict(old_spec_recs)
# check format of output specimens table
for col in aniso_spec_columns:
if col not in old_spec_df.columns:old_spec_df[col]=""
df=pd.DataFrame.from_dict(meas_data)
df=df[df['method_codes'].str.contains('LP-AN-TRM')]
if not len(df):
print("-E- No measurement records found with code LP-AN-TRM")
return False, "No measurement records found with code LP-AN-TRM"
#
#
# get sorted list of unique specimen names
sids=np.sort(df['specimen'].unique())
#
# reorder measurements
n_pos=6
# work on each specimen
#
for spec in sids:
meas_df=df[df['specimen']==spec]
atrm_df=meas_df[meas_df['method_codes'].str.contains('LT-T-I')]
if len(atrm_df) > 5:
atrm_df=meas_df[meas_df['method_codes'].str.contains('LT-T-I')]
atrm_df['original_order']=range(len(atrm_df))
atrm_df['order']=np.nan
atrm_df['treat_dc_field_phi']=atrm_df['treat_dc_field_phi'].astype('float')
atrm_df['treat_dc_field_theta']=atrm_df['treat_dc_field_theta'].astype('float')
atrm_df.loc[(atrm_df['treat_dc_field_phi']==0) & (atrm_df['treat_dc_field_theta']==0),'order']=0
atrm_df.loc[(atrm_df['treat_dc_field_phi']==90) & (atrm_df['treat_dc_field_theta']==0),'order']=1
atrm_df.loc[(atrm_df['treat_dc_field_phi']==0) & (atrm_df['treat_dc_field_theta']==90),'order']=2
atrm_df.loc[(atrm_df['treat_dc_field_phi']==180) & (atrm_df['treat_dc_field_theta']==0),'order']=3
atrm_df.loc[(atrm_df['treat_dc_field_phi']==270) & (atrm_df['treat_dc_field_theta']==0),'order']=4
atrm_df.loc[(atrm_df['treat_dc_field_phi']==0) & (atrm_df['treat_dc_field_theta']==-90),'order']=5
atrm_df.loc[(atrm_df['treat_dc_field_phi']==90) & (atrm_df['treat_dc_field_theta']==0),'order']=1
atrm_df.sort_values(by=['order'],inplace=True)
if np.array_equal(atrm_df['order'].values[0:6],np.arange(6)):
atrm_dirs=atrm_df[['dir_dec','dir_inc','magn_moment']].astype('float').values
M=pmag.dir2cart(atrm_dirs)
K = np.zeros(3 * n_pos, 'f')
for i in range(n_pos):
K[i * 3] = M[i][0]
K[i * 3 + 1] = M[i][1]
K[i * 3 + 2] = M[i][2]
aniso_parameters=calculate_aniso_parameters(K,n_pos=6)
aniso_alt=0
alt_check_df=meas_df[meas_df['method_codes'].str.contains('LT-PTRM-I')]
if len(alt_check_df)>0:
anis_alt_phi=alt_check_df['treat_dc_field_phi'].astype('float').values[-1]
anis_alt_theta=alt_check_df['treat_dc_field_theta'].astype('float').values[-1]
base1=atrm_df[atrm_df['treat_dc_field_phi'].astype('float')==anis_alt_phi]
base1=base1[base1['treat_dc_field_theta']==anis_alt_theta]
if len(base1)>0:
base1_M=base1[['magn_moment']].astype('float').values[0]
base2_M=alt_check_df[['magn_moment']].astype('float').values[0]
aniso_alt=100*np.abs(base1_M-base2_M)/np.mean([base1_M,base2_M]) # anisotropy alteration percent
new_spec_df=pd.DataFrame.from_dict([aniso_parameters])
new_spec_df['specimen']=spec
new_spec_df['citations']='This study'
new_spec_df['method_codes']='LP-AN-TRM'
new_spec_df['aniso_alt']='%5.2f'%(aniso_alt)
new_spec_df['software_packages']=pmag.get_version()
new_spec_df['citations']='This study'
new_spec_df['aniso_type']='ATRM'
if old_specs and 'aniso_s' in old_spec_df.columns and old_spec_df.loc[(old_spec_df['specimen']==spec)&
(old_spec_df['aniso_type']=='ATRM')].empty==False: # there is a previous record of ATRM for this specimen
print ('replacing existing ATRM data for ',spec)
for col in ['aniso_alt','aniso_ftest','aniso_ftest12','aniso_ftest23','aniso_p','aniso_s','aniso_s_n_measurements','aniso_s_sigma','aniso_type','aniso_v1','aniso_v2','aniso_v3','aniso_ftest_quality','aniso_tilt_correction','description','method_codes','software_packages','citations']:
old_spec_df.loc[(old_spec_df['specimen']==spec)&(old_spec_df['aniso_type']=='ATRM')&
(old_spec_df[col].notnull()),col]=new_spec_df[col].values[0] # replace existing ATRM data for this specimen
elif old_specs and 'aniso_s' in old_spec_df.columns and old_spec_df.loc[old_spec_df['specimen']==spec].empty==False: # there is a no previous record of ATRM for this specimen
print ('adding ATRM data for ',spec)
for col in ['aniso_alt','aniso_ftest','aniso_ftest12','aniso_ftest23','aniso_p','aniso_s','aniso_s_n_measurements','aniso_s_sigma','aniso_type','aniso_v1','aniso_v2','aniso_v3','aniso_ftest_quality','aniso_tilt_correction','description','method_codes','software_packages','citations']:
old_spec_df.loc[old_spec_df['specimen']==spec,col]=new_spec_df[col].values[0] # add ATRM data for this specimen
else: # no record of this specimen, just append to the end of the existing data frame
print ('creating new record for specimen ',spec)
old_spec_df=pd.concat([old_spec_df,new_spec_df]) # add in new record
else:
print ('something wrong with measurements for: ',spec)
old_spec_df.fillna("",inplace=True)
spec_dicts=old_spec_df.to_dict('records')
pmag.magic_write(output_spec_file,spec_dicts,'specimens')
[docs]
def zeq_magic(meas_file='measurements.txt', spec_file='',crd='s', dir_path = ".", input_dir_path="", angle=0,
n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="",
samp_file='samples.txt', contribution=None,fignum=1, image_records=False):
"""
eeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
dir_path : str
output directory for plots, default "."
input_dir_path : str
input directory, if different from dir_path, default ""
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number
image_records : generate and return a record for each image in a list of dicts
which can be ingested by pmag.magic_write
bool, default False
Returns
---------
if image_records == False:
Tuple : (True or False indicating if conversion was successful, output file name written)
if image_records == True:
Tuple : (True or False indicating if conversion was successful, output file name written, list of image recs)
"""
def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock, coord='s'):
interpretations = False
if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock):
return ZED, False
if 'method_codes' not in spec_container.df.columns:
return ZED, False
prior_spec_data = spec_container.get_records_for_code(
'LP-DIR', strict_match=False) # look up all prior directional interpretations
if 'dir_tilt_correction' in prior_spec_data.columns:
cond = prior_spec_data['dir_tilt_correction'] == coord
cond2 = prior_spec_data['dir_tilt_correction'] == int(coord)
prior_spec_data = prior_spec_data[cond | cond2]
prior_specimen_interpretations=[]
if ('meas_step_min' not in prior_spec_data.columns) or \
('meas_step_max' not in prior_spec_data.columns):
return ZED, False
if not len(prior_spec_data):
return ZED, False
mpars = {"specimen_direction_type": "Error"}
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str) == this_specimen] #.str.match(this_specimen) == True]
if len(prior_specimen_interpretations):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
# step through all prior interpretations and plot them
for ind in range(len(beg_pcas)):
interpretations = False
spec_meths = spec_methods[ind].split(':')
for m in spec_meths:
if 'DE-BFL' in m:
calculation_type = 'DE-BFL' # best fit line
if 'DE-BFP' in m:
calculation_type = 'DE-BFP' # best fit plane
if 'DE-FM' in m:
calculation_type = 'DE-FM' # fisher mean
if 'DE-BFL-A' in m:
calculation_type = 'DE-BFL-A' # anchored best fit line
treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if len(beg_pcas)!=0:
try:
# getting the starting and ending points
start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError as ex:
mpars['specimen_direction_type'] = "Error"
try:
if beg_pcas[ind] == 0:
start = 0
else:
start = treatments.index(beg_pcas[ind])
if end_pcas[ind] == 0:
end = 0
else:
end = treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
mpars['specimen_direction_type'] = "Error"
# calculate direction/plane
if mpars["specimen_direction_type"] != "Error":
# put it on the plot
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
interpretations = True
#if interactive:
# pmagplotlib.draw_figs(ZED)
else:
print('\n-W- Specimen {} record contains invalid start/stop bounds:'.format(this_specimen))
print(prior_spec_data.loc[this_specimen][['meas_step_min', 'meas_step_max']])
print('\n Measurement records:')
cols = list(set(['treat_ac_field', 'treat_temp']).intersection(this_specimen_measurements.columns))
print(this_specimen_measurements[cols])
print('\n Data will be plotted without interpretations\n')
interpretations = False
return ZED, interpretations
def make_plots(spec, cnt, meas_df, spec_container, samp_container=None):
# get sample data for orientation
if spec_container:
try:
samps = spec_container.df.loc[spec, 'sample']
except KeyError:
samps = ""
samp_df = []
if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64):
if np.isnan(samps):
samp = ""
samp_df = []
else:
samp = str(samps)
samp_container.df.index = samp_container.df.index.astype(str)
samp_df = samp_container.df[samp_container.df.index == samp]
elif isinstance(samps, type(None)):
samp = ""
samp_df = []
elif len(samps):
if isinstance(samps, str):
samp = samps
else:
samp = samps.iloc[0]
try:
samp_df = samp_container.df[samp_container.df.index == samp]
except AttributeError:
samp_df = []
else:
samp_df = []
# we can make the figure dictionary that pmagplotlib likes:
ZED = {'eqarea': cnt, 'zijd': cnt+1, 'demag': cnt+2} # make datablock
# get the relevant data
spec_df = meas_df[meas_df.specimen == s]
# remove ARM data
spec_df = spec_df[- spec_df.method_codes.str.contains(
r'LP-*[\w]*-ARM')]
# split data into NRM, thermal, and af dataframes
spec_df_nrm = spec_df[spec_df.method_codes.str.contains(
'LT-NO')] # get the NRM data
spec_df_th = spec_df[spec_df.method_codes.str.contains(
'LT-T-Z')] # zero field thermal demag steps
try:
cond = spec_df.method_codes.str.contains(r'(^|[\s\:])LT-PTRM')
spec_df_th = spec_df_th[-cond] # get rid of some pTRM steps
except ValueError:
keep_inds = []
n = 0
for ind, row in spec_df_th.copy().iterrows():
if 'LT-PTRM' in row['method_codes'] and 'ALT-PTRM' not in row['method_codes']:
keep_inds.append(n)
else:
pass
n += 1
if len(keep_inds) < n:
spec_df_th = spec_df_th.iloc[keep_inds]
spec_df_af = spec_df[spec_df.method_codes.str.contains('LT-AF-Z')]
this_spec_meas_df = None
datablock = None
if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1):
return False, False
if len(spec_df_th.index) > 1: # this is a thermal run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'K' # units are kelvin
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_temp'] = this_spec_meas_df['treat_temp'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return False, False
datablock = this_spec_meas_df[['treat_temp', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
if len(spec_df_af.index) > 1: # this is an af run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'T' # these are AF data
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_ac_field'] = this_spec_meas_df['treat_ac_field'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return False, False
datablock = this_spec_meas_df[['treat_ac_field', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock, coord)
# beginning of zeq_magic
if interactive:
save_plots = False
# read in MagIC formatted data if contribution object not provided
if not isinstance(contribution, cb.Contribution):
input_dir_path = os.path.realpath(input_dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
# read in magic formatted data
if not os.path.exists(file_path):
print('No such file:', file_path)
if image_records:
return False, [], []
return False, []
custom_filenames = {'measurements': file_path, 'specimens': spec_file, 'samples': samp_file}
contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames,
read_tables=['measurements', 'specimens',
'contribution', 'samples'])
if pmagplotlib.isServer:
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_container = contribution.tables['measurements']
meas_df = contribution.tables['measurements'].df #
#meas_df=pd.read_csv(file_path, sep='\t', header=1)
spec_container = contribution.tables.get('specimens', None)
samp_container = contribution.tables.get('samples', None)
#if not spec_file:
# spec_file = os.path.join(os.path.split(file_path)[0], "specimens.txt")
#if os.path.exists(spec_file):
# spec_container = cb.MagicDataFrame(spec_file, dtype="specimens")
#else:
# spec_container = None
meas_df['blank'] = "" # this is a dummy variable expected by plotZED
if 'treat_ac_field' in meas_df.columns:
# create 'treatment' column.
# uses treat_temp if treat_ac_field is missing OR zero.
# (have to take this into account for plotting later)
if 'treat_temp' in meas_df.columns:
meas_df['treatment'] = meas_df['treat_ac_field'].where(
cond=meas_df['treat_ac_field'].astype(bool), other=meas_df['treat_temp'])
else:
meas_df['treatment'] = meas_df['treat_ac_field']
else:
meas_df['treatment'] = meas_df['treat_temp']
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
specimens = meas_df.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
if image_records:
return False, [], []
return False, []
# check measurement table for req'd fields
missing = []
reqd_cols_present = meas_df.columns.intersection(['dir_dec', 'dir_inc', 'magn_moment'])
for col in ['dir_dec', 'dir_inc', 'magn_moment']:
if col not in reqd_cols_present:
missing.append(col)
if missing:
print('-W- Missing required column(s) {}, cannot run zeq_magic'.format(', '.join(missing)))
if image_records:
return False, [], []
return False, []
cnt = fignum
if n_plots != "all":
if len(specimens) > n_plots:
specimens = specimens[:n_plots]
saved = []
image_recs = []
if specimen:
specimens = [specimen]
for s in specimens:
s = str(s)
ZED, interpretations = make_plots(s, cnt, meas_df, spec_container, samp_container)
if not ZED:
if pmagplotlib.verbose:
print('No plots could be created for specimen:', s)
continue
titles = {key: s + "_" + key + "." + fmt for key in ZED}
# try to get the full hierarchy for plot names
df_slice = meas_container.df[meas_container.df['specimen'] == s]
location = str(meas_container.get_name('location', df_slice))
site = str(meas_container.get_name('site', df_slice))
sample = str(meas_container.get_name('sample', df_slice))
if pmagplotlib.isServer:
titles = {}
titles['eqarea'] = 'Equal Area Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['demag'] = 'Demagnetization Plot'
con_id = ""
if 'contribution' in contribution.tables:
if 'id' in contribution.tables['contribution'].df.columns:
con_id = contribution.tables['contribution'].df['id'].values[0]
pmagplotlib.add_borders(ZED, titles, con_id=con_id)
for title in titles:
int_str = ""
if interpretations and title == "eqarea":
int_str = "_interpretations"
filename = 'LO:_'+location+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(s)+'_CO:_' + '_TY:_'+title+int_str+'_.png'
titles[title] = filename
if image_records:
for title, filename in titles.items():
image_rec = {'location': location, 'site': site, 'sample': sample, 'specimen': s,
'file': filename, 'type': PLOT_TYPES[title],
'title': " ".join([s, PLOT_TYPES[title]]), 'timestamp': time.time(),
'software_packages': version.version}
image_recs.append(image_rec)
if save_plots:
saved.extend(pmagplotlib.save_plots(ZED, titles, dir_path=dir_path))
elif interactive:
pmagplotlib.draw_figs(ZED)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(ZED, titles))
else:
continue
else:
cnt += 3
if image_records:
return True, saved, image_recs
return True, saved
[docs]
def thellier_magic(meas_file="measurements.txt", dir_path=".", input_dir_path="",
spec="", n_specs=5, save_plots=True, fmt="svg", interactive=False,
contribution=None, image_records=False):
"""
thellier_magic plots arai and other useful plots for Thellier-type experimental data
Parameters
----------
meas_file : str
input measurement file, default "measurements.txt"
dir_path : str
output directory, default "."
Note: if using Windows, all figures will be saved to working directly
*not* dir_path
input_dir_path : str
input file directory IF different from dir_path, default ""
spec : str
default "", specimen to plot
n_specs : int
number of specimens to plot, default 5
if you want to make all possible plots, specify "all"
save_plots : bool, default True
True, create and save all requested plots
fmt : str
format of saved figures (default is 'svg')
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
image_records : generate and return a record for each image in a list of dicts
which can be ingested by pmag.magic_write
bool, default False
Returns
---------
status : True or False
saved : list of figures saved
if image_records == True:
image_recs : list of image records
"""
def make_plots(this_specimen, thel_data, cnt=1):
"""
Take specimen name and measurement data
and produce plots.
Return a dictionary of plots created, or False if
no plots could be created.
"""
zed = False
if pmagplotlib.verbose:
print(this_specimen)
# make the figure dictionary that pmagplotlib likes:
#AZD = {'arai': 1, 'zijd': 2, 'eqarea': 3, 'deremag': 4} # make datablock
#if save_plots:
# AZD = {'arai': 1, 'zijd': 2, 'eqarea': 3, 'deremag': 4} # make datablock
#else:
AZD = {'arai': cnt, 'zijd': cnt+1, 'eqarea': cnt +
2, 'deremag': cnt+3} # make datablock
#cnt += 4 # increment the figure counter
spec_df = thel_data[thel_data.specimen ==
this_specimen] # get data for this specimen
# get the data block for Arai plot
if len(spec_df) >= 3:
# just skip specimen if arai data is malformed
try:
araiblock, field = pmag.sortarai(spec_df, this_specimen, 0, version=3)
except Exception as ex:
print('-W-', ex)
return zed
if not save_plots:
for key, val in AZD.items():
pmagplotlib.plot_init(val, 5, 5)
# get the datablock for Zijderveld plot
zijdblock, units = pmag.find_dmag_rec(
this_specimen, spec_df, version=3)
if not len(units):
unit_string = ""
else:
unit_string = units[-1]
zed = pmagplotlib.plot_arai_zij(
AZD, araiblock, zijdblock, this_specimen, unit_string) # make the plots
return zed
# format some things
if interactive:
save_plots = False
image_recs = []
if not isinstance(contribution, cb.Contribution):
# get proper paths
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
input_dir_path = os.path.split(file_path)[0]
# read in magic formatted data
contribution = cb.Contribution(input_dir_path)
if not contribution.tables.get('measurements'):
print('-W- No measurements table found')
if image_records:
return False, [], []
return False, []
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_df = contribution.tables['measurements'].df
# try to get contribution id for server plotting
if pmagplotlib.isServer:
con_id = contribution.get_con_id()
# get key for intensity records
int_key = cb.get_intensity_col(meas_df)
# list for saved figs
saved = []
# get all the records with measurement data
meas_data = meas_df[meas_df[int_key].notnull()]
# get all the Thellier data
thel_data = meas_data.dropna(subset=['method_codes'])
thel_data = thel_data[thel_data['method_codes'].str.contains('LP-PI-TRM')]
specimens = meas_data.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
if image_records:
return False, [], []
return False, []
if spec:
if spec not in specimens:
print('could not find specimen {}'.format(spec))
if image_records:
return False, [], []
return False, []
specimens = [spec]
elif n_specs != "all":
try:
specimens = specimens[:n_specs]
except Exception as ex:
pass
cnt = 1 # set the figure counter to 1
for this_specimen in specimens: # step through the specimens list
zed = make_plots(this_specimen, thel_data, cnt)
# if plots were produced
if zed:
files = {}
if interactive:
# draw and save interactively
pmagplotlib.draw_figs(zed)
ans = input(
"S[a]ve plots, [q]uit, <return> to continue\n ")
if ans == 'q':
if image_records:
return True, [], []
return True, []
if ans == 'a':
files = {key : this_specimen + "_" + key + "." + fmt for (key, value) in zed.items()}
if not set_env.IS_WIN:
files = {key: os.path.join(dir_path, value) for (key, value) in files.items()}
incl_directory = True
saved.append(pmagplotlib.save_plots(zed, files, incl_directory=incl_directory))
elif save_plots:
# don't draw, just save figures
files = {key : this_specimen + "_" + key + "." + fmt for (key, value) in zed.items()}
incl_directory = False
if not pmagplotlib.isServer:
# not server
if not set_env.IS_WIN:
files = {key: os.path.join(dir_path, value) for (key, value) in files.items()}
incl_directory = True
else:
# isServer, fix plot titles, formatting, and file names for server
for key, value in files.copy().items():
files[key] = "SP:_{}_TY:_{}_.{}".format(this_specimen, key, fmt)
titles = {}
titles['deremag'] = 'DeReMag Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['arai'] = 'Arai Plot'
titles['TRM'] = 'TRM Acquisition data'
titles['eqarea'] = 'Equal Area Plot'
zed = pmagplotlib.add_borders(
zed, titles, con_id=con_id)
saved.append(pmagplotlib.save_plots(zed, files, incl_directory=incl_directory))
# just let the plots appear (notebook)
else:
cnt += len(zed)
# don't even need to draw 'em! They just appear.
#pmagplotlib.draw_figs(zed)
if image_records:
for plot_type, filename in files.items():
image_rec = {'specimen': this_specimen,
'file': os.path.split(filename)[1], 'type': PLOT_TYPES[plot_type],
'title': " ".join([this_specimen, PLOT_TYPES[plot_type]]),
'timestamp': time.time(), 'software_packages': version.version}
image_recs.append(image_rec)
# no plots were produced
else:
print ('no data for ',this_specimen)
print ('skipping')
if image_records:
return True, saved, image_recs
return True, saved
[docs]
def hysteresis_magic(output_dir_path=".", input_dir_path="", spec_file="specimens.txt",
meas_file="measurements.txt", fmt="svg",
save_plots=True, make_plots=True, pltspec="", n_specs=5, interactive=False):
"""
Calculate hysteresis parameters and plot hysteresis data.
Plotting may be called interactively with save_plots==False,
or be suppressed entirely with make_plots==False.
Parameters:
output_dir_path : str, default "."
Note: if using Windows, all figures will be saved to working directly
*not* dir_path
input_dir_path : str
path for intput file if different from output_dir_path (default is same)
spec_file : str, default "specimens.txt"
output file to save hysteresis data
meas_file : str, default "measurements.txt"
input measurement file
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
save_plots : bool, default True
if True, generate and save all requested plots
make_plots : bool, default True
if False, skip making plots and just save hysteresis data
(if False, save_plots will be set to False also)
pltspec : str, default ""
specimen name to plot, otherwise will plot all specimens
n_specs : int
number of specimens to plot, default 5
if you want to make all possible plots, specify "all"
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line or in the Python interpreter)
Returns:
Tuple
(True or False indicating if conversion was successful, output file names written)
"""
# put plots in output_dir_path, unless isServer
incl_directory = True
if pmagplotlib.isServer or set_env.IS_WIN:
incl_directory = False
# figure out directory/file paths
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
spec_file = pmag.resolve_file_name(spec_file, input_dir_path)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
# format and initialize variables
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
if not make_plots:
irm_init, imag_init = -1, -1
save_plots = False
if save_plots:
verbose = False
if pltspec:
pass
if interactive:
save_plots = False
SpecRecs = []
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print('bad file', meas_file)
return False, []
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
if verbose and make_plots:
print("Plots may be on top of each other - use mouse to place ")
if make_plots:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
imag_init = 0
irm_init = 0
else:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
#
if spec_file:
prior_data, file_type = pmag.magic_read(spec_file)
#
# get list of unique experiment names and specimen names
#
experiment_names, sids = [], []
hys_data = pmag.get_dictitem(meas_data, 'method_codes', 'LP-HYS', 'has')
dcd_data = pmag.get_dictitem(
meas_data, 'method_codes', 'LP-IRM-DCD', 'has')
imag_data = pmag.get_dictitem(meas_data, 'method_codes', 'LP-IMAG', 'has')
for rec in hys_data:
if rec['experiment'] not in experiment_names:
experiment_names.append(rec['experiment'])
if rec['specimen'] not in sids:
sids.append(rec['specimen'])
#
k = 0
# if plotting only one specimen, find it
if pltspec:
k = sids.index(pltspec)
# if plotting only n specimens, remove others from the list
elif n_specs != "all":
try:
sids = sids[:n_specs]
except:
pass
cnt = 0
while k < len(sids):
specimen = sids[k]
if pltspec:
if specimen != pltspec:
k += 1
continue
else:
for key, value in HDD.items():
cnt += 1
HDD[key] = cnt
#HDD = {key: value + len(HDD) + k for (key, value) in HDD.items()}
# initialize a new specimen hysteresis record
HystRec = {'specimen': specimen, 'experiments': ""}
if verbose and make_plots:
print(specimen, k+1, 'out of ', len(sids))
#
#
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
# fish out all the LP-HYS data for this specimen
spec_data = pmag.get_dictitem(hys_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
meths = spec_data[0]['method_codes'].split(':')
e = spec_data[0]['experiment']
HystRec['meas_orient_phi'],HystRec['meas_orient_theta']='0','0'
if 'treat_dc_field_phi' in spec_data[0].keys():
HystRec['meas_orient_phi']=spec_data[0]['treat_dc_field_phi']
if 'treat_dc_field_theta' in spec_data[0].keys():
HystRec['meas_orient_theta']=spec_data[0]['treat_dc_field_theta']
HystRec['experiments'] = spec_data[0]['experiment']
for rec in spec_data:
B.append(float(rec['meas_field_dc']))
M.append(float(rec['magn_moment']))
# fish out all the data for this specimen
spec_data = pmag.get_dictitem(dcd_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
HystRec['experiments'] = HystRec['experiments'] + \
':'+spec_data[0]['experiment']
irm_exp = spec_data[0]['experiment']
for rec in spec_data:
Bdcd.append(float(rec['treat_dc_field']))
Mdcd.append(float(rec['magn_moment']))
# fish out all the data for this specimen
spec_data = pmag.get_dictitem(imag_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
imag_exp = spec_data[0]['experiment']
for rec in spec_data:
Bimag.append(float(rec['meas_field_dc']))
Mimag.append(float(rec['magn_moment']))
#
# now plot the hysteresis curve
#
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
hpars = pmagplotlib.plot_hdd(HDD, B, M, e)
if interactive:
if not set_env.IS_WIN:
pmagplotlib.draw_figs(HDD)
#
if make_plots:
pmagplotlib.plot_hpars(HDD, hpars, 'bs')
HystRec['hyst_mr_moment'] = hpars['hysteresis_mr_moment']
HystRec['hyst_ms_moment'] = hpars['hysteresis_ms_moment']
HystRec['hyst_bc'] = hpars['hysteresis_bc']
HystRec['hyst_bcr'] = hpars['hysteresis_bcr']
HystRec['hyst_xhf'] = hpars['hysteresis_xhf']
HystRec['experiments'] = e
HystRec['software_packages'] = version_num
if hpars["magic_method_codes"] not in hmeths:
hmeths.append(hpars["magic_method_codes"])
methods = ""
for meth in hmeths:
methods = methods+meth.strip()+":"
HystRec["method_codes"] = methods[:-1]
HystRec["citations"] = "This study"
#
if len(Bdcd) > 0:
rmeths = []
for meth in meths:
rmeths.append(meth)
if verbose and make_plots:
print('plotting IRM')
if irm_init == 0:
cnt += 1
HDD['irm'] = cnt #5 if 'imag' in HDD else 4
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['irm'], 5, 5)
irm_init = 1
rpars = pmagplotlib.plot_irm(HDD['irm'], Bdcd, Mdcd, irm_exp)
HystRec['rem_mr_moment'] = rpars['remanence_mr_moment']
HystRec['rem_bcr'] = rpars['remanence_bcr']
HystRec['experiments'] = specimen+':'+irm_exp
if rpars["magic_method_codes"] not in meths:
meths.append(rpars["magic_method_codes"])
methods = ""
for meth in rmeths:
methods = methods+meth.strip()+":"
HystRec["method_codes"] = HystRec['method_codes']+':'+methods[:-1]
HystRec["citations"] = "This study"
else:
if irm_init:
pmagplotlib.clearFIG(HDD['irm'])
if len(Bimag) > 0:
if verbose and make_plots:
print('plotting initial magnetization curve')
# first normalize by Ms
Mnorm = []
for m in Mimag:
Mnorm.append(m / float(hpars['hysteresis_ms_moment']))
if imag_init == 0:
HDD['imag'] = 4
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['imag'], 5, 5)
imag_init = 1
pmagplotlib.plot_imag(HDD['imag'], Bimag, Mnorm, imag_exp)
else:
if imag_init:
pmagplotlib.clearFIG(HDD['imag'])
if len(list(HystRec.keys())) > 0:
HystRecs.append(HystRec)
#
files = {}
if save_plots and make_plots:
if pltspec:
s = pltspec
else:
s = specimen
files = {}
for key in list(HDD.keys()):
if incl_directory:
files[key] = os.path.join(output_dir_path, s+'_'+key+'.'+fmt)
else:
files[key] = s+'_'+key+'.'+fmt
if make_plots and save_plots:
pmagplotlib.save_plots(HDD, files, incl_directory=incl_directory)
#if pltspec:
# return True, []
if interactive:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
if incl_directory:
files[key] = os.path.join(output_dir_path, specimen+'_'+key+'.'+fmt)
else:
files[key] = specimen+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files, incl_directory=incl_directory)
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
return True, []
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = sids.index(specimen)
keepon = 0
except:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = sids.index(specimen)
else:
k += 1
if len(B) == 0 and len(Bdcd) == 0:
if verbose:
print('skipping this one - no hysteresis data')
k += 1
if k < len(sids):
# must re-init figs for Windows to keep size
if make_plots and set_env.IS_WIN:
if not save_plots:
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
if len(Bimag) > 0:
HDD['imag'] = 4
if not save_plots:
pmagplotlib.plot_init(HDD['imag'], 5, 5)
if len(Bdcd) > 0:
HDD['irm'] = 5 if 'imag' in HDD else 4
if not save_plots:
pmagplotlib.plot_init(HDD['irm'], 5, 5)
elif not make_plots and set_env.IS_WIN:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
if len(HystRecs) > 0:
# go through prior_data, clean out prior results and save combined file as spec_file
SpecRecs, keys = [], list(HystRecs[0].keys())
if len(prior_data) > 0:
prior_keys = list(prior_data[0].keys())
else:
prior_keys = []
for rec in prior_data:
for key in keys:
if key not in list(rec.keys()):
rec[key] = ""
if 'LP-HYS' not in rec['method_codes']:
SpecRecs.append(rec)
for rec in HystRecs:
for key in prior_keys:
if key not in list(rec.keys()):
rec[key] = ""
prior = pmag.get_dictitem(
prior_data, 'specimen', rec['specimen'], 'T')
if len(prior) > 0 and 'sample' in list(prior[0].keys()):
# pull sample name from prior specimens table
rec['sample'] = prior[0]['sample']
SpecRecs.append(rec)
# drop unnecessary/duplicate rows
#dir_path = os.path.split(spec_file)[0]
con = cb.Contribution(input_dir_path, read_tables=[])
con.add_magic_table_from_data('specimens', SpecRecs)
con.tables['specimens'].drop_duplicate_rows(
ignore_cols=['specimen', 'sample', 'citations', 'software_packages'])
con.tables['specimens'].df = con.tables['specimens'].df.drop_duplicates()
spec_file = os.path.join(output_dir_path, os.path.split(spec_file)[1])
con.write_table_to_file('specimens', custom_name=spec_file)
if verbose:
print("hysteresis parameters saved in ", spec_file)
return True, [spec_file]
[docs]
def eqarea_magic(in_file='sites.txt', dir_path=".", input_dir_path="",
spec_file="specimens.txt", samp_file="samples.txt",
site_file="sites.txt", loc_file="locations.txt",
plot_by="all", crd="g", ignore_tilt=False,
save_plots=True, fmt="svg", contour=False, color_map="coolwarm",
plot_ell="", n_plots=5, interactive=False, contribution=None,
source_table="sites", image_records=False):
"""
makes equal area projections from declination/inclination data
Parameters:
in_file : str, default "sites.txt"
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
spec_file : str
input specimen file name, default "specimens.txt"
samp_file: str
input sample file name, default "samples.txt"
site_file : str
input site file name, default "sites.txt"
loc_file : str
input location file name, default "locations.txt"
plot_by : str
[spc, sam, sit, loc, all] (specimen, sample, site, location, all), default "all"
crd : ['s','g','t'], coordinate system for plotting whereby:
s : specimen coordinates, aniso_tile_correction = -1
g : geographic coordinates, aniso_tile_correction = 0 (default)
t : tilt corrected coordinates, aniso_tile_correction = 100
ignore_tilt : bool
default False. If True, data are unoriented (allows plotting of measurement dec/inc)
save_plots : bool
plot and save non-interactively, default True
fmt : str
["png", "svg", "pdf", "jpg"], default "svg"
contour : bool
plot as color contour
colormap : str
color map for contour plotting, default "coolwarm"
see cartopy documentation for more options
plot_ell : str
[F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Bootstrap eigenvectors
default "" plots none
n_plots : int
maximum number of plots to make, default 5
if you want to make all possible plots, specify "all"
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line or in the Python interpreter)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
source_table : table to get plot data from (only needed with contribution argument)
for example, you could specify source_table="measurements" and plot_by="sites"
to plot measurement data by site.
default "sites"
image_records : generate and return a record for each image in a list of dicts
which can be ingested by pmag.magic_write
bool, default False
Returns:
if image_records == False
type - Tuple : (True or False indicating if conversion was successful, file name(s) written)
if image_records == True
True or False indicating if conversion was successful, output file name written, list of image recs
"""
image_recs = []
saved = []
# parse out input/out directories
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
# initialize some variables
verbose = pmagplotlib.verbose
FIG = {} # plot dictionary
FIG['eqarea'] = 1 # eqarea is figure 1
pmagplotlib.plot_init(FIG['eqarea'], 5, 5)
# get coordinate system
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
# get item to plot by
if plot_by == 'all':
plot_key = 'all'
elif plot_by == 'sit' or plot_by == "site":
plot_key = 'site'
elif plot_by == 'sam' or plot_by == 'sample':
plot_key = 'sample'
elif plot_by == 'spc' or plot_by == "specimen":
plot_key = 'specimen'
else:
plot_by = 'all'
plot_key = 'all'
# get distribution to plot ellipses/eigenvectors if desired
if save_plots:
verbose = False
# set keys
dec_key = 'dir_dec'
inc_key = 'dir_inc'
tilt_key = 'dir_tilt_correction'
# create/access contribution
if contribution is not None:
# need to get source table
table_name = source_table
input_dir_path = contribution.directory
else:
fnames = {"specimens": spec_file, "samples": samp_file,
'sites': site_file, 'locations': loc_file}
if not os.path.exists(pmag.resolve_file_name(in_file, input_dir_path)):
print('-E- Could not find {}'.format(in_file))
if image_records:
return False, [], []
return False, []
contribution = cb.Contribution(input_dir_path, custom_filenames=fnames,
single_file=in_file)
table_name = list(contribution.tables.keys())[0]
contribution.add_magic_table("contribution")
# get contribution id if available for server plots
if pmagplotlib.isServer:
con_id = contribution.get_con_id()
# try to propagate all names to measurement level
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
# the object that contains the DataFrame + useful helper methods:
data_container = contribution.tables[table_name]
# the actual DataFrame:
data = data_container.df
plot_type = data_container.dtype
if plot_key != "all" and plot_key not in data.columns:
print("-E- You can't plot by {} with the data provided".format(plot_key))
if image_records:
return False, [], []
return False, []
# add tilt key into DataFrame columns if it isn't there already
if tilt_key not in data.columns:
data.loc[:, tilt_key] = None
print(len(data), ' {} records read in'.format(plot_type))
# find desired dec,inc data:
dir_type_key = ''
#
# get plotlist if not plotting all records
#
plotlist = []
if plot_key != "all":
# return all where plot_key is not blank
if plot_key not in data.columns:
print('-E- Can\'t plot by "{}". That header is not in infile: {}'.format(
plot_key, in_file))
if image_records:
return False, [], []
return False, []
plots = data[data[plot_key].notnull()]
plotlist = plots[plot_key].unique() # grab unique values
else:
plotlist.append('All')
if n_plots != "all":
if len(plotlist) > n_plots:
plotlist = plotlist[:n_plots]
fignum = 0
for plot in plotlist:
fignum += 1
FIG['eqarea'] = fignum
pmagplotlib.plot_init(FIG['eqarea'], 5, 5)
if plot_ell:
dist = plot_ell.upper()
# if dist type is unrecognized, use Fisher
if dist not in ['F', 'K', 'B', 'BE', 'BV']:
dist = 'F'
if dist == "BV":
fignum += 1
FIG['bdirs'] = fignum
pmagplotlib.plot_init(FIG['bdirs'], 5, 5)
if verbose:
print(plot)
if plot == 'All':
# plot everything at once
plot_data = data
else:
# pull out only partial data
plot_data = data[data[plot_key] == plot]
# get location names for the data
locs = []
if 'location' in plot_data.columns:
locs = list(plot_data['location'].dropna().unique())
DIblock = []
GCblock = []
# SLblock, SPblock = [], []
title = plot
mode = 1
if dec_key not in plot_data.columns:
print("-W- No dec/inc data")
continue
# get all records where dec & inc values exist
plot_data = plot_data[plot_data[dec_key].notnull()
& plot_data[inc_key].notnull()]
if plot_data.empty:
print("-W- No dec/inc data")
continue
# get metadata for naming the plot file
locations = str(data_container.get_name('location', df_slice=plot_data))
site = str(data_container.get_name('site', df_slice=plot_data))
sample = str(data_container.get_name('sample', df_slice=plot_data))
specimen = str(data_container.get_name('specimen', df_slice=plot_data))
# make sure method_codes is in plot_data
if 'method_codes' not in plot_data.columns:
plot_data['method_codes'] = ''
# get data blocks
# would have to ignore tilt to use measurement level data
DIblock = data_container.get_di_block(df_slice=plot_data,
tilt_corr=coord, excl=['DE-BFP'], ignore_tilt=ignore_tilt)
if title == 'All':
if len(locs):
title = " ,".join(locs) + " - {} {} plotted".format(str(len(DIblock)), plot_type)
else:
title = "{} {} plotted".format(str(len(DIblock)), plot_type)
#SLblock = [[ind, row['method_codes']] for ind, row in plot_data.iterrows()]
# get great circles
great_circle_data = data_container.get_records_for_code('DE-BFP', incl=True,
use_slice=True, sli=plot_data)
if len(great_circle_data) > 0:
gc_cond = great_circle_data[tilt_key] == coord
GCblock = [[float(row[dec_key]), float(row[inc_key])]
for ind, row in great_circle_data[gc_cond].iterrows()]
#SPblock = [[ind, row['method_codes']] for ind, row in great_circle_data[gc_cond].iterrows()]
if len(DIblock) > 0:
if not contour:
pmagplotlib.plot_eq(FIG['eqarea'], DIblock, title)
else:
pmagplotlib.plot_eq_cont(
FIG['eqarea'], DIblock, color_map=color_map)
else:
pmagplotlib.plot_net(FIG['eqarea'])
if len(GCblock) > 0:
for rec in GCblock:
pmagplotlib.plot_circ(FIG['eqarea'], rec, 90., 'g')
#if len(DIblock) == 0 and len(GCblock) == 0:
if len(DIblock)+len(GCblock)<5:
if verbose:
print("insufficient records for plotting")
fignum -= 1
if 'bdirs' in FIG:
fignum -= 1
continue
# sys.exit()
if plot_ell:
ppars = pmag.doprinc(DIblock) # get principal directions
nDIs, rDIs, npars, rpars = [], [], [], []
for rec in DIblock:
angle = pmag.angle([rec[0], rec[1]], [
ppars['dec'], ppars['inc']])
if angle > 90.:
rDIs.append(rec)
else:
nDIs.append(rec)
if dist == 'B': # do on whole dataset
etitle = "Bingham confidence ellipse"
bpars = pmag.dobingham(DIblock)
for key in list(bpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (bpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (bpars[key]))
npars.append(bpars['dec'])
npars.append(bpars['inc'])
npars.append(bpars['Zeta'])
npars.append(bpars['Zdec'])
npars.append(bpars['Zinc'])
npars.append(bpars['Eta'])
npars.append(bpars['Edec'])
npars.append(bpars['Einc'])
if dist == 'F':
etitle = "Fisher confidence cone"
if len(nDIs) > 2:
fpars = pmag.fisher_mean(nDIs)
for key in list(fpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (fpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (fpars[key]))
mode += 1
npars.append(fpars['dec'])
npars.append(fpars['inc'])
npars.append(fpars['alpha95']) # Beta
npars.append(fpars['dec'])
isign = abs(fpars['inc']) / fpars['inc']
npars.append(fpars['inc']-isign*90.) # Beta inc
npars.append(fpars['alpha95']) # gamma
npars.append(fpars['dec']+90.) # Beta dec
npars.append(0.) # Beta inc
if len(rDIs) > 2:
fpars = pmag.fisher_mean(rDIs)
if verbose:
print("mode ", mode)
for key in list(fpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (fpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (fpars[key]))
mode += 1
rpars.append(fpars['dec'])
rpars.append(fpars['inc'])
rpars.append(fpars['alpha95']) # Beta
rpars.append(fpars['dec'])
isign = abs(fpars['inc']) / fpars['inc']
rpars.append(fpars['inc']-isign*90.) # Beta inc
rpars.append(fpars['alpha95']) # gamma
rpars.append(fpars['dec']+90.) # Beta dec
rpars.append(0.) # Beta inc
if dist == 'K':
etitle = "Kent confidence ellipse"
if len(nDIs) > 3:
kpars = pmag.dokent(nDIs, len(nDIs))
if verbose:
print("mode ", mode)
for key in list(kpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (kpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (kpars[key]))
mode += 1
npars.append(kpars['dec'])
npars.append(kpars['inc'])
npars.append(kpars['Zeta'])
npars.append(kpars['Zdec'])
npars.append(kpars['Zinc'])
npars.append(kpars['Eta'])
npars.append(kpars['Edec'])
npars.append(kpars['Einc'])
if len(rDIs) > 3:
kpars = pmag.dokent(rDIs, len(rDIs))
if verbose:
print("mode ", mode)
for key in list(kpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (kpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (kpars[key]))
mode += 1
rpars.append(kpars['dec'])
rpars.append(kpars['inc'])
rpars.append(kpars['Zeta'])
rpars.append(kpars['Zdec'])
rpars.append(kpars['Zinc'])
rpars.append(kpars['Eta'])
rpars.append(kpars['Edec'])
rpars.append(kpars['Einc'])
else: # assume bootstrap
if dist == 'BE':
if len(nDIs) > 5:
BnDIs = pmag.di_boot(nDIs)
Bkpars = pmag.dokent(BnDIs, 1.)
if verbose:
print("mode ", mode)
for key in list(Bkpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (Bkpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (Bkpars[key]))
mode += 1
npars.append(Bkpars['dec'])
npars.append(Bkpars['inc'])
npars.append(Bkpars['Zeta'])
npars.append(Bkpars['Zdec'])
npars.append(Bkpars['Zinc'])
npars.append(Bkpars['Eta'])
npars.append(Bkpars['Edec'])
npars.append(Bkpars['Einc'])
if len(rDIs) > 5:
BrDIs = pmag.di_boot(rDIs)
Bkpars = pmag.dokent(BrDIs, 1.)
if verbose:
print("mode ", mode)
for key in list(Bkpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (Bkpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (Bkpars[key]))
mode += 1
rpars.append(Bkpars['dec'])
rpars.append(Bkpars['inc'])
rpars.append(Bkpars['Zeta'])
rpars.append(Bkpars['Zdec'])
rpars.append(Bkpars['Zinc'])
rpars.append(Bkpars['Eta'])
rpars.append(Bkpars['Edec'])
rpars.append(Bkpars['Einc'])
etitle = "Bootstrapped confidence ellipse"
elif dist == 'BV':
sym = {'lower': ['o', 'c'], 'upper': [
'o', 'g'], 'size': 3, 'edgecolor': 'face'}
if len(nDIs) > 5:
BnDIs = pmag.di_boot(nDIs)
pmagplotlib.plot_eq_sym(
FIG['bdirs'], BnDIs, 'Bootstrapped Eigenvectors', sym)
if len(rDIs) > 5:
BrDIs = pmag.di_boot(rDIs)
if len(nDIs) > 5: # plot on existing plots
pmagplotlib.plot_di_sym(FIG['bdirs'], BrDIs, sym)
else:
pmagplotlib.plot_eq(
FIG['bdirs'], BrDIs, 'Bootstrapped Eigenvectors')
if dist == 'B':
if len(nDIs) > 3 or len(rDIs) > 3:
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], npars, 0)
elif len(nDIs) > 3 and dist != 'BV':
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], npars, 0)
if len(rDIs) > 3:
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], rpars, 0)
elif len(rDIs) > 3 and dist != 'BV':
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], rpars, 0)
for key in list(FIG.keys()):
files = {}
#if filename: # use provided filename
# filename += '.' + fmt
if pmagplotlib.isServer: # use server plot naming convention
if plot_key == 'all':
filename = 'LO:_'+locations+'_SI:__SA:__SP:__CO:_'+crd+'_TY:_'+key+'_.'+fmt
else:
filename = 'LO:_'+locations+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(specimen)+'_CO:_'+crd+'_TY:_'+key+'_.'+fmt
elif plot_key == 'all':
filename = 'all'
if locs:
loc_string = "_".join(
[str(loc).replace(' ', '_') for loc in locs])
filename += "_" + loc_string
filename += "_" + crd + "_" + key
filename += ".{}".format(fmt)
else: # use more readable naming convention
filename = ''
# fix this if plot_by is location , for example
use_names = {'location': [locations], 'site': [locations, site],
'sample': [locations, site, sample],
'specimen': [locations, site, sample, specimen]}
use = use_names[plot_key]
use.extend([crd, key])
# [locations, site, sample, specimen, crd, key]:
for item in use:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
if not pmagplotlib.isServer:
filename = os.path.join(dir_path, filename)
files[key] = filename
if image_records:
for file_type, filename in files.items():
name = specimen or sample or site or locations or "unknown"
image_rec = {'location': locations, 'site': site,
'sample': sample, 'specimen': specimen,
'file': filename, 'type': PLOT_TYPES[file_type],
'title': " ".join([name, PLOT_TYPES[file_type]]),
'timestamp': time.time(),
'software_packages': version.version}
image_recs.append(image_rec)
saved_figs = []
if pmagplotlib.isServer:
titles = {'eqarea': 'Equal Area Plot'}
FIG = pmagplotlib.add_borders(FIG, titles, con_id=con_id)
saved_figs = pmagplotlib.save_plots(FIG, files)
elif save_plots:
saved_figs = pmagplotlib.save_plots(FIG, files, dir_path = dir_path, incl_directory=True)
#continue
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans == "q":
if image_records:
return True, saved, image_recs
return True, saved
if ans == "a":
saved_figs = pmagplotlib.save_plots(FIG, files, incl_directory=True)
saved.extend(saved_figs)
continue
if image_records:
return True, saved, image_recs
return True, saved
[docs]
def polemap_magic(loc_file="locations.txt", dir_path=".", interactive=False, crd="",
sym='ro', symsize=40, rsym='g^', rsymsize=40,
fmt="pdf", res="c", proj="ortho",
flip=False, anti=False, fancy=False,
ell=False, ages=False, lat_0=90., lon_0=0., save_plots=True,
contribution=None, image_records=False):
"""
Use a MagIC format locations table to plot poles.
Parameters:
loc_file : str, default "locations.txt"
dir_path : str, default "."
directory name to find loc_file in (if not included in loc_file)
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
crd : str, default ""
coordinate system [g, t] (geographic, tilt_corrected)
sym : str, default "ro"
symbol color and shape, default red circles
(see matplotlib documentation for more options)
symsize : int, default 40
symbol size
rsym : str, default "g^"
symbol for plotting reverse poles
rsymsize : int, default 40
symbol size for reverse poles
fmt : str, default "pdf"
format for figures, ["svg", "jpg", "pdf", "png"]
res : str, default "c"
resolution [c, l, i, h] (crude, low, intermediate, high)
proj : str, default "ortho"
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
flip : bool, default False
if True, flip reverse poles to normal antipode
anti : bool, default False
if True, plot antipodes for each pole
fancy : bool, default False
if True, plot topography (not yet implementedj)
ell : bool, default False
if True, plot ellipses
ages : bool, default False
if True, plot ages
lat_0 : float, default 90.
eyeball latitude
lon_0 : float, default 0.
eyeball longitude
save_plots : bool, default True
if True, create and save all requested plots
image_records : generate and return a record for each image in a list of dicts
which can be ingested by pmag.magic_write
bool, default False
Returns:
if image_records == False
True or False indicating if conversion was successful, file name(s) written
if image_records == True
True or False indicating if conversion was successful, output file name written, list of image recs
"""
anti, ell = int(anti), int(ell)
# initialize and format variables
saved = []
lats, lons = [], []
Pars = []
dates, rlats, rlons = [], [], []
polarities = []
image_recs = []
if interactive:
save_plots = False
full_path = pmag.resolve_file_name(loc_file, dir_path)
dir_path, loc_file = os.path.split(full_path)
# create or access MagIC contribution
if contribution is not None:
dir_path = contribution.directory
con = contribution
else:
con = cb.Contribution(dir_path, single_file=loc_file)
if not list(con.tables.keys()):
print("-W - Couldn't read in data")
if image_records:
return False, "Couldn't read in data", []
return False, "Couldn't read in data"
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
pole_container = con.tables['locations']
pole_df = pole_container.df
if 'pole_lat' not in pole_df.columns or 'pole_lon' not in pole_df.columns:
print("-W- pole_lat and pole_lon are required columns to run polemap_magic.py")
if image_records:
return False, "pole_lat and pole_lon are required columns to run polemap_magic.py", []
return False, "pole_lat and pole_lon are required columns to run polemap_magic.py"
# use records with pole_lat and pole_lon
cond1, cond2 = pole_df['pole_lat'].notnull(), pole_df['pole_lon'].notnull()
Results = pole_df[cond1 & cond2]
# don't plot identical poles twice
Results.drop_duplicates(subset=['pole_lat', 'pole_lon', 'location'], inplace=True)
# use tilt correction if available
# prioritize tilt-corrected poles
if 'dir_tilt_correction' in Results.columns:
if not crd:
coords = Results['dir_tilt_correction'].unique()
if 100. in coords:
crd = 't'
elif 0. in coords:
crd = 'g'
else:
crd = ''
coord_dict = {'g': 0, 't': 100}
coord = coord_dict[crd] if crd else ""
# filter results by dir_tilt_correction if available
if (coord or coord == 0) and 'dir_tilt_correction' in Results.columns:
cond = Results['dir_tilt_correction'] == coord
cond2 = Results['dir_tilt_correction'] == str(coord)
Results = Results[cond | cond2]
# get location name and average ages
loc_list = Results['location'].values
locations = ":".join(Results['location'].unique())
if 'age' not in Results.columns and 'age_low' in Results.columns and 'age_high' in Results.columns:
Results['age'] = Results['age_low']+0.5 * \
(Results['age_high']-Results['age_low'])
if 'age' in Results.columns and ages:
dates = Results['age'].unique()
if not any(Results.index):
print("-W- No poles could be plotted")
if image_records:
return False, "No poles could be plotted", []
return False, "No poles could be plotted"
# go through rows and extract data
for ind, row in Results.iterrows():
lat, lon = float(row['pole_lat']), float(row['pole_lon'])
if 'dir_polarity' in row:
polarities.append(row['dir_polarity'])
if anti:
lats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360.
lons.append(lon)
elif not flip:
lats.append(lat)
lons.append(lon)
elif flip:
if lat < 0:
rlats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360
rlons.append(lon)
else:
lats.append(lat)
lons.append(lon)
ppars = []
ppars.append(lon)
ppars.append(lat)
ell1, ell2 = "", ""
if 'pole_dm' in list(row.keys()) and row['pole_dm']:
ell1 = float(row['pole_dm'])
if 'pole_dp' in list(row.keys()) and row['pole_dp']:
ell2 = float(row['pole_dp'])
if 'pole_alpha95' in list(row.keys()) and row['pole_alpha95']:
ell1, ell2 = float(row['pole_alpha95']), float(row['pole_alpha95'])
if ell1 and ell2 and lons:
ppars = []
ppars.append(lons[-1])
ppars.append(lats[-1])
ppars.append(ell1)
ppars.append(lons[-1])
try:
isign = abs(lats[-1]) / lats[-1]
except ZeroDivisionError:
isign = 1
ppars.append(lats[-1] - isign * 90.)
ppars.append(ell2)
ppars.append(lons[-1] + 90.)
ppars.append(0.)
Pars.append(ppars)
locations = locations.strip(':')
Opts = {'latmin': -90, 'latmax': 90, 'lonmin': 0., 'lonmax': 360.,
'lat_0': lat_0, 'lon_0': lon_0, 'proj': proj, 'sym': 'b+',
'symsize': 40, 'pltgrid': 0, 'res': res, 'boundinglat': 0.,
'edgecolor': 'face'}
Opts['details'] = {'coasts': 1, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 1, 'fancy': fancy}
base_Opts = Opts.copy()
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], [90.], [0.], Opts)
#Opts['pltgrid'] = -1
if proj=='merc':Opts['pltgrid']=1
Opts['sym'] = sym
Opts['symsize'] = symsize
if len(dates) > 0:
Opts['names'] = dates
if len(lats) > 0:
pole_lats = []
pole_lons = []
for num, lat in enumerate(lats):
lon = lons[num]
if lat > 0:
pole_lats.append(lat)
pole_lons.append(lon)
# plot the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], pole_lats, pole_lons, Opts)
# do reverse poles
if len(rlats) > 0:
reverse_Opts = Opts.copy()
reverse_Opts['sym'] = rsym
reverse_Opts['symsize'] = rsymsize
reverse_Opts['edgecolor'] = 'black'
# plot the lats and lons of the reverse poles
pmagplotlib.plot_map(FIG['map'], rlats, rlons, reverse_Opts)
Opts['names'] = []
titles = {}
files = {}
if pmagplotlib.isServer:
# plot each individual pole for the server
for ind in range(len(lats)):
lat = lats[ind]
lon = lons[ind]
polarity = ""
if 'polarities' in locals() and polarities: # second term is true if not empty
print("polarities=",polarities)
print("ind=",ind)
polarity = polarities[ind]
polarity = "_" + polarity if polarity else ""
location = loc_list[ind]
if location == "nan":
location = "unknown"
FIG["map_{}".format(ind)] = ind+2
pmagplotlib.plot_init(FIG['map_{}'.format(ind)], 6, 6)
pmagplotlib.plot_map(FIG['map_{}'.format(ind)], [90.], [0.], base_Opts)
pmagplotlib.plot_map(ind+2, [lat], [lon], Opts)
titles["map_{}".format(ind)] = location
if crd:
fname = "LO:_{}{}_TY:_POLE_map_{}.{}".format(location, polarity, crd, fmt)
fname_short = "LO:_{}{}_TY:_POLE_map_{}".format(location, polarity, crd)
else:
fname = "LO:_{}{}_TY:_POLE_map.{}".format(location, polarity, fmt)
fname_short = "LO:_{}{}_TY:_POLE_map".format(location, polarity)
if image_records:
image_rec = {'location': location, 'file': fname, 'type': 'Pole Map',
'title': 'Pole map ' + location,
'timestamp': date.today().isoformat(),
'software_packages': version.version}
image_recs.append(image_rec)
# don't allow identically named files
if files:
file_values = files.values()
file_values_short = [fname.rsplit('.')[0] for fname in file_values]
if fname_short in file_values_short:
for val in [str(n) for n in range(1, 15)]:
fname = fname_short + "_{}.".format(val) + fmt
if fname not in file_values:
break
files["map_{}".format(ind)] = fname
# truncate location names so that ultra long filenames are not created
if len(locations) > 50:
locations = locations[:50]
if pmagplotlib.isServer:
# use server plot naming convention
con_id = ''
if 'contribution' in con.tables:
# try to get contribution id
if 'id' in con.tables['contribution'].df.columns:
con_id = con.tables['contribution'].df.iloc[0]['id']
files['map'] = 'MC:_{}_TY:_POLE_map_{}.{}'.format(con_id, crd, fmt)
else:
# no contribution id available
files['map'] = 'LO:_' + locations + '_TY:_POLE_map_{}.{}'.format(crd, fmt)
else:
# use readable naming convention for non-database use
files['map'] = '{}_POLE_map_{}.{}'.format(locations, crd, fmt)
if image_records:
image_rec = {'location': locations, 'file': files['map'], 'type': 'Pole Map',
'title': 'Pole map ' + locations,
'timestamp': date.today().isoformat(),
'software_packages': version.version}
image_recs.append(image_rec)
#
if interactive and (not set_env.IS_WIN):
pmagplotlib.draw_figs(FIG)
if ell: # add ellipses if desired.
Opts['details'] = {'coasts': 0, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 0, 'fancy': fancy}
Opts['pltgrid'] = -1 # turn off meridian replotting
Opts['symsize'] = 2
Opts['sym'] = 'g-'
for ppars in Pars:
if ppars[2] != 0:
PTS = pmagplotlib.plot_ell(FIG['map'], ppars, 'g.', 0, 0)
elats, elons = [], []
for pt in PTS:
elons.append(pt[0])
elats.append(pt[1])
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], elats, elons, Opts)
if interactive and (not set_env.IS_WIN):
pmagplotlib.draw_figs(FIG)
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles['map'] = 'LO:_' + locations + '_POLE_map'
con_id = ''
if 'contribution' in con.tables:
if 'id' in con.tables['contribution'].df.columns:
con_id = con.tables['contribution'].df.iloc[0]['id']
loc_string = ""
if 'locations' in con.tables:
num_locs = len(con.tables['locations'].df.index.unique())
loc_string = "{} location{}".format(num_locs, 's' if num_locs > 1 else '')
num_lats = len([lat for lat in lats if lat > 0])
num_rlats = len(rlats)
npole_string = ""
rpole_string = ""
if num_lats:
npole_string = "{} normal ".format(num_lats) #, 's' if num_lats > 1 else '')
if num_rlats:
rpole_string = "{} reverse".format(num_rlats)
if num_lats + num_rlats > 1:
pole_string = "poles"
elif num_lats + num_rlats == 0:
pole_string = ""
else:
pole_string = "pole"
title = "MagIC contribution {}\n {} {}{} {}".format(con_id, loc_string, npole_string, rpole_string, pole_string)
titles['map'] = title.replace(' ', ' ')
FIG = pmagplotlib.add_borders(FIG, titles, black, purple, con_id)
save_plots = True
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
save_plots = True
else:
print("Good bye")
return
if save_plots:
saved = pmagplotlib.save_plots(FIG, files)
if image_records:
return True, saved, image_recs
return True, saved
[docs]
def chi_magic(infile="measurements.txt", dir_path=".", experiments="",
fmt="svg", save_plots=True, interactive=False, contribution=None):
"""
Parameters:
infile : str, default "measurements.txt"
measurement infile
dir_path : str, default "."
input directory
experiments : str, default ""
experiment name to plot
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
save_plots : bool, default True
save figures
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns:
(status, output_files) - Tuple:
True or False indicating if conversion was successful, file name(s) written
"""
saved = []
if contribution:
chi_data_all = contribution.tables['measurements'].df
else:
infile = pmag.resolve_file_name(infile, dir_path)
chi_data_all = pd.read_csv(infile, sep='\t', header=1)
if not experiments:
try:
experiments = chi_data_all.experiment.unique()
except Exception as ex:
print(ex)
experiments = ["all"]
else:
experiments = [experiments]
plotnum = 0
figs = {}
fnames = {}
for exp in experiments:
if exp == "all":
chi_data = chi_data_all
chi_data = chi_data_all[chi_data_all.experiment == exp]
if len(chi_data) <= 1:
print('Not enough data to plot {}'.format(exp))
continue
plotnum += 1
if not save_plots:
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
else:
plt.figure(plotnum)
figs[str(plotnum)] = plotnum
fnames[str(plotnum)] = exp + '_temperature.{}'.format(fmt)
# get arrays of available temps, frequencies and fields
Ts = np.sort(chi_data.meas_temp.unique())
Fs = np.sort(chi_data.meas_freq.unique())
Bs = np.sort(chi_data.meas_field_ac.unique())
# plot chi versus temperature at constant field
b = Bs.max()
for num, f in enumerate(Fs):
this_f = chi_data[chi_data.meas_freq == f]
this_f = this_f[this_f.meas_field_ac == b]
plt.plot(this_f.meas_temp, 1e6*this_f.susc_chi_volume,
label='%i' % (f)+' Hz')
plt.legend()
plt.xlabel('Temperature (K)')
plt.ylabel(r'$\chi$ ($\mu$SI)')
plt.title('B = '+'%7.2e' % (b) + ' T')
plotnum += 1
figs[str(plotnum)] = plotnum
fnames[str(plotnum)] = exp + '_frequency.{}'.format(fmt)
if not save_plots:
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
else:
plt.figure(plotnum)
## plot chi versus frequency at constant B
b = Bs.max()
t = Ts.min()
this_t = chi_data[chi_data.meas_temp == t]
this_t = this_t[this_t.meas_field_ac == b]
plt.semilogx(this_t.meas_freq, 1e6 *
this_t.susc_chi_volume, label='%i' % (t)+' K')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel(r'$\chi$ ($\mu$SI)')
plt.title('B = '+'%7.2e' % (b) + ' T')
if interactive:
pmagplotlib.draw_figs(figs)
ans = input(
"enter s[a]ve to save files, [return] to quit ")
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, fnames))
else:
return True, []
elif save_plots:
saved.extend(pmagplotlib.save_plots(figs, fnames))
return True, saved
[docs]
def quick_hyst(dir_path=".", meas_file="measurements.txt", save_plots=True,
interactive=False, fmt="png", specimen="", verbose=True, n_plots=10,
contribution=None, image_records=False):
"""
makes specimen plots of hysteresis data
Parameters
----------
dir_path : str, default "."
input directory
meas_file : str, default "measurements.txt"
name of MagIC measurement file
save_plots : bool, default True
save figures
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
specimen : str, default ""
specific specimen to plot
verbose : bool, default True
if True, print more verbose output
image_records : bool, default False
if True, return a list of created images
Returns
---------
if image_records == False:
Tuple : (True or False indicating if conversion was successful, output file name(s) written)
if image_records == True:
Tuple : (True or False indicating if conversion was successful, output file name(s) written, list of images)
"""
image_recs = []
if contribution is None:
con = cb.Contribution(dir_path, read_tables=['measurements'],
custom_filenames={'measurements': meas_file})
else:
con = contribution
# get as much name data as possible (used for naming plots)
if 'measurements' not in con.tables:
print("-W- No measurement file found")
if image_records:
return False, [], []
return False, []
con.propagate_location_to_measurements()
if 'measurements' not in con.tables:
print(main.__doc__)
print('bad file')
if image_records:
return False, [], []
return False, []
meas_container = con.tables['measurements']
#meas_df = meas_container.df
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
saved = []
HystRecs = []
HDD = {}
HDD['hyst'] = 1
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
#
# get list of unique experiment names and specimen names
#
sids = []
hyst_data = meas_container.get_records_for_code('LP-HYS')
#experiment_names = hyst_data['experiment_name'].unique()
if not len(hyst_data):
print("-W- No hysteresis data found")
if image_records:
return False, [], []
return False, []
if 'specimen' not in hyst_data.columns:
print('-W- No specimen names in measurements data, cannot complete quick_hyst.py')
if image_records:
return False, [], []
return False, []
sids = hyst_data['specimen'].unique()
# if 'treat_temp' is provided, use that value, otherwise assume 300
hyst_data['treat_temp'].where(
hyst_data['treat_temp'].notnull(), '300', inplace=True)
# start at first specimen, or at provided specimen ('-spc')
k = 0
if specimen:
try:
print(sids)
k = list(sids).index(specimen)
except ValueError:
print('-W- No specimen named: {}.'.format(specimen))
print('-W- Please provide a valid specimen name')
if image_records:
return False, [], []
return False, []
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
if n_plots != "all":
if len(sids) > n_plots:
sids = sids[:n_plots]
while k < len(sids):
locname, site, sample, synth = '', '', '', ''
s = sids[k]
if verbose:
print(s, k + 1, 'out of ', len(sids))
# B, M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M = [], []
# get all measurements for this specimen
spec = hyst_data[hyst_data['specimen'] == s]
# get names
if 'location' in spec:
locname = spec['location'].iloc[0]
if 'site' in spec:
site = spec['sample'].iloc[0]
if 'sample' in spec:
sample = spec['sample'].iloc[0]
# get all records with non-blank values in any intlist column
# find intensity data
for int_column in intlist:
if int_column in spec.columns:
int_col = int_column
break
meas_data = spec[spec[int_column].notnull()]
if len(meas_data) == 0:
break
#
c = ['k-', 'b-', 'c-', 'g-', 'm-', 'r-', 'y-']
cnum = 0
Temps = []
xlab, ylab, title = '', '', ''
Temps = meas_data['treat_temp'].unique()
for t in Temps:
print('working on t: ', t)
t_data = meas_data[meas_data['treat_temp'] == t]
m = int_col
B = t_data['meas_field_dc'].astype(float).values
M = t_data[m].astype(float).values
# now plot the hysteresis curve(s)
#
if len(B) > 0:
B = np.array(B)
M = np.array(M)
if t == Temps[-1]:
xlab = 'Field (T)'
ylab = m
title = 'Hysteresis: ' + s
if t == Temps[0]:
pmagplotlib.clearFIG(HDD['hyst'])
pmagplotlib.plot_xy(
HDD['hyst'], B, M, sym=c[cnum], xlab=xlab, ylab=ylab, title=title)
pmagplotlib.plot_xy(HDD['hyst'], [
1.1*B.min(), 1.1*B.max()], [0, 0], sym='k-', xlab=xlab, ylab=ylab, title=title)
pmagplotlib.plot_xy(HDD['hyst'], [0, 0], [
1.1*M.min(), 1.1*M.max()], sym='k-', xlab=xlab, ylab=ylab, title=title)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(HDD)
cnum += 1
if cnum == len(c):
cnum = 0
#
files = {}
if save_plots:
if specimen != "":
s = specimen
for key in list(HDD.keys()):
if pmagplotlib.isServer:
if synth == '':
files[key] = "LO:_"+locname+'_SI:_'+site + \
'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else:
files[key] = 'SY:_'+synth+'_TY:_'+key+'_.'+fmt
else:
if synth == '':
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
else:
files[key] = "{}_{}.{}".format(synth, key, fmt)
pmagplotlib.save_plots(HDD, files)
saved.extend([value for value in files.values()])
if image_records:
for plot_type, filename in files.items():
image_rec = {'location': locname, 'site': site, 'sample': sample, 'specimen': s,
'file': filename, 'type': PLOT_TYPES[plot_type], 'title' : " ".join(["Hysteresis", s]),
'timestamp': time.time(), 'software_packages': version.version}
image_recs.append(image_rec)
if specimen:
if image_records:
return True, saved, image_recs
return True, saved
if interactive:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
if image_records:
for plot_type, filename in files.items():
image_rec = {'location': locname, 'site': site, 'sample': sample, 'specimen': s,
'file': filename, 'type': PLOT_TYPES[plot_type], 'title' : " ".join(["Hysteresis", s]),
'timestamp': time.time(), 'software_packages': version.version}
image_recs.append(image_rec)
pmagplotlib.save_plots(HDD, files)
saved.extend([value for value in files.values()])
k += 1
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
if image_records:
return True, saved, image_records
return True, saved
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = list(sids).index(specimen)
keepon = 0
except ValueError:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = list(sids).index(specimen)
else:
k += 1
if not len(B):
if verbose:
print('skipping this one - no hysteresis data')
k += 1
if image_records:
return True, saved, image_recs
return True, saved
[docs]
def vgpmap_magic(dir_path=".", results_file="sites.txt", crd="",
sym='ro', size=8, rsym="g^", rsize=8,
fmt="pdf", res="c", proj="ortho",
flip=False, anti=False, fancy=False,
ell=False, ages=False, lat_0=0, lon_0=0,
save_plots=True, interactive=False, contribution=None,
image_records=False):
"""
makes a map of vgps and a95/dp,dm for site means in a sites table
Parameters
----------
dir_path : str, default "."
input directory path
results_file : str, default "sites.txt"
name of MagIC format sites file
crd : str, default ""
coordinate system [g, t] (geographic, tilt_corrected)
sym : str, default "ro"
symbol color and shape, default red circles
(see matplotlib documentation for more color/shape options)
size : int, default 8
symbol size
rsym : str, default "g^"
symbol for plotting reverse poles
(see matplotlib documentation for more color/shape options)
rsize : int, default 8
symbol size for reverse poles
fmt : str, default "pdf"
format for figures, ["svg", "jpg", "pdf", "png"]
res : str, default "c"
resolution [c, l, i, h] (crude, low, intermediate, high)
proj : str, default "ortho"
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
flip : bool, default False
if True, flip reverse poles to normal antipode
anti : bool, default False
if True, plot antipodes for each pole
fancy : bool, default False
if True, plot topography (not yet implemented)
ell : bool, default False
if True, plot ellipses
ages : bool, default False
if True, plot ages
lat_0 : float, default 0.
eyeball latitude
lon_0 : float, default 0.
eyeball longitude
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
image_records : bool, default False
if True, return a list of created images
Returns
---------
if image_records == False:
type - Tuple : (True or False indicating if conversion was successful, file name(s) written)
if image_records == True:
Tuple : (True or False indicating if conversion was successful, output file name written, list of image recs)
"""
coord_dict = {'g': 0, 't': 100}
coord = coord_dict[crd] if crd else ""
if contribution is None:
con = cb.Contribution(dir_path, single_file=results_file)
else:
con = contribution
if not list(con.tables.keys()):
print("-W - Couldn't read in data")
if image_records:
return False, [], []
return False, []
if 'sites' not in con.tables:
print("-W - No sites data")
if image_records:
return False, [], []
return False, []
con.add_magic_table('contribution')
con_id = con.get_con_id()
image_recs = []
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
# read in sites file
lats, lons = [], []
Pars = []
dates, rlats, rlons = [], [], []
site_container = con.tables['sites']
site_container.front_and_backfill(['location'])
site_df = site_container.df
# use records with vgp_lat and vgp_lon
if 'vgp_lat' in site_df.columns and 'vgp_lon' in site_df.columns:
cond1, cond2 = site_df['vgp_lat'].notnull(), site_df['vgp_lon'].notnull()
else:
print ('nothing to plot')
sys.exit()
Results = site_df[cond1 & cond2]
# use tilt correction
if coord and 'dir_tilt_correction' in Results.columns:
Results = Results[Results['dir_tilt_correction'] == coord]
# get location name and average ages
locs = Results['location'].dropna().unique()
if len(locs):
location = ":".join(Results['location'].unique())
else:
location = ""
if 'age' in Results.columns and ages == 1:
dates = Results['age'].unique()
# go through rows and extract data
for ind, row in Results.iterrows():
try:
lat, lon = float(row['vgp_lat']), float(row['vgp_lon'])
except ValueError:
lat = float(str(row['vgp_lat']).replace(' ', '').translate({0x2c: '.', 0xa0: None, 0x2212: '-'}))
lon = float(str(row['vgp_lon']).replace(' ', '').translate({0x2c: '.', 0xa0: None, 0x2212: '-'}))
if anti:
lats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360.
lons.append(lon)
elif not flip:
lats.append(lat)
lons.append(lon)
elif flip:
if lat < 0:
rlats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360
rlons.append(lon)
else:
lats.append(lat)
lons.append(lon)
ppars = []
ppars.append(lon)
ppars.append(lat)
ell1, ell2 = "", ""
if 'vgp_dm' in list(row.keys()) and row['vgp_dm']:
ell1 = float(row['vgp_dm'])
if 'vgp_dp' in list(row.keys()) and row['vgp_dp']:
ell2 = float(row['vgp_dp'])
if 'vgp_alpha95' in list(row.keys()) and (row['vgp_alpha95'] or row['vgp_alpha95'] == 0):
ell1, ell2 = float(row['vgp_alpha95']), float(row['vgp_alpha95'])
if ell1 and ell2:
ppars = []
ppars.append(lons[-1])
ppars.append(lats[-1])
ppars.append(ell1)
ppars.append(lons[-1])
try:
isign = abs(lats[-1]) / lats[-1]
except ZeroDivisionError:
isign = 1
ppars.append(lats[-1] - isign * 90.)
ppars.append(ell2)
ppars.append(lons[-1] + 90.)
ppars.append(0.)
Pars.append(ppars)
location = location.strip(':')
Opts = {'latmin': -90, 'latmax': 90, 'lonmin': 0., 'lonmax': 360.,
'lat_0': lat_0, 'lon_0': lon_0, 'proj': proj, 'sym': 'bs',
'symsize': 3, 'pltgrid': 0, 'res': res, 'boundinglat': 0.}
Opts['details'] = {'coasts': 1, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 1, 'fancy': fancy}
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], [90.], [0.], Opts)
Opts['pltgrid'] = -1
Opts['sym'] = sym
Opts['symsize'] = size
Opts['names'] = []
if len(dates) > 0:
Opts['names'] = dates
if len(lats) > 0:
# add the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], lats, lons, Opts)
if len(rlats) > 0:
Opts['sym'] = rsym
Opts['symsize'] = rsize
if len(lats) > 0:
color, symbol = Opts['sym'][0], Opts['sym'][1]
plt.gca().scatter(rlons, rlats, s=Opts['symsize'], c=color, marker=symbol,
transform=ccrs.PlateCarree(), edgecolors=Opts['edgecolor'])
else:
pmagplotlib.plot_map(FIG['map'], rlats, rlons, Opts)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(FIG)
if ell == 1: # add ellipses if desired.
Opts['details'] = {'coasts': 0, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 0, 'fancy': fancy}
Opts['pltgrid'] = -1 # turn off meridian replotting
Opts['symsize'] = 2
Opts['sym'] = 'g-'
for ppars in Pars:
if ppars[2] != 0:
PTS = pmagplotlib.plot_ell(FIG['map'], ppars, 'g.', 0, 0)
elats, elons = [], []
for pt in PTS:
elons.append(pt[0])
elats.append(pt[1])
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], elats, elons, Opts)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(FIG)
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer: # use server plot naming convention
if con_id:
files[key] = 'MC:_' + con_id + '_TY:_VGP_map.' + fmt
else:
files[key] = 'LO:_' + location + '_TY:_VGP_map.' + fmt
else: # use more readable naming convention
files[key] = '{}_VGP_map.{}'.format(location, fmt)
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['map'] = location + ' VGP map'
FIG = pmagplotlib.add_borders(FIG, titles, black, purple)
save_plots = True
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
save_plots = True
else:
print("Good bye")
if image_recs:
return True, [], image_recs
return True, []
if save_plots:
version_num = pmag.get_version()
pmagplotlib.save_plots(FIG, files)
if image_records:
for file_type, filename in files.items():
image_rec = {'file': filename, 'title': "map of VGPs", 'type': 'VGP Map',
'keywords': "", 'software_packages': version_num,
'timestamp': date.today().isoformat()}
image_recs.append(image_rec)
return True, files.values, image_recs
return True, files.values()
[docs]
def histplot(infile="", data=(), outfile="",
xlab='x', binsize=False, norm=1,
fmt='svg', save_plots=True, interactive=False):
"""
makes histograms for data
Parameters:
infile (str): default ""
input file name
format: single variable
data (tuple): list-like, default ()
list/array of values to plot if infile is not provided
outfile (str): default ""
name for plot, if not provided defaults to hist.FMT
xlab (str): default 'x'
label for x axis
binsize (int): default False
desired binsize. if not specified, an appropriate binsize will be calculated.
norm (int): default 1
1: norm, 0: don't norm, -1: show normed and non-normed axes
fmt (str): default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
save_plots (bool): default True
if True, create and save all requested plots
interactive (bool): default False
interactively plot and display
(this is best used on the command line only)
"""
# set outfile name
if outfile:
fmt = ""
else:
outfile = 'hist.'+fmt
# read in data from infile or use data argument
if os.path.exists(infile):
D = np.loadtxt(infile)
else:
D = np.array(data)
try:
if not len(D):
print('-W- No data found')
return False, []
except ValueError:
pass
except TypeError:
print('-W- Not enough data found')
return False, []
fig = pmagplotlib.plot_init(1, 8, 7)
try:
len(D)
except TypeError:
D = np.array([D])
if len(D) < 5:
print("-W- Not enough points to plot histogram ({} point(s) provided, 5 required)".format(len(D)))
return False, []
# if binsize not provided, calculate reasonable binsize
if not binsize:
binsize = int(np.around(1 + 3.22 * np.log(len(D))))
binsize = int(binsize)
Nbins = int(len(D) / binsize)
ax = fig.add_subplot(111)
if norm == 1:
print('normalizing')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=True)
ax.set_ylabel('Frequency')
elif norm == 0:
print('not normalizing')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=False)
ax.set_ylabel('Number')
elif norm == -1:
#print('trying twin')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=True)
ax.set_ylabel('Frequency')
ax2 = ax.twinx()
n, bins, patches = ax2.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=False)
ax2.set_ylabel('Number', rotation=-90)
plt.axis([D.min(), D.max(), 0, n.max()+.1*n.max()])
ax.set_xlabel(xlab)
name = 'N = ' + str(len(D))
plt.title(name)
if interactive:
pmagplotlib.draw_figs({1: 'hist'})
p = input('s[a]ve to save plot, [q]uit to exit without saving ')
if p != 'a':
return True, []
plt.savefig(outfile)
print('plot saved in ', outfile)
return True, [outfile]
if pmagplotlib.isServer:
pmagplotlib.add_borders({'hist': 1}, {'hist': 'Intensity Histogram'})
if save_plots:
plt.savefig(outfile)
print('plot saved in ', outfile)
return True, [outfile]
[docs]
def dmag_magic(in_file="measurements.txt", dir_path=".", input_dir_path="",
spec_file="specimens.txt", samp_file="samples.txt",
site_file="sites.txt", loc_file="locations.txt",
plot_by="loc", LT="AF", norm=True, XLP="",
save_plots=True, fmt="svg", interactive=False,
n_plots=5, contribution=None):
"""
plots intensity decay curves for demagnetization experiments
Parameters:
in_file (str): default "measurements.txt"
dir_path (str):
output directory, default "."
input_dir_path (str):
input file directory (if different from dir_path), default ""
spec_file (str):
input specimen file name, default "specimens.txt"
samp_file (str):
input sample file name, default "samples.txt"
site_file (str):
input site file name, default "sites.txt"
loc_file (str):
input location file name, default "locations.txt"
plot_by (str):
[spc, sam, sit, loc] (specimen, sample, site, location), default "loc"
LT (str):
lab treatment [T, AF, M], default AF
norm (bool):
normalize by NRM magnetization, default True
XLP (str):
exclude specific lab protocols, (for example, method codes like LP-PI)
default ""
save_plots (bool):
plot and save non-interactively, default True
fmt (str): str
["png", "svg", "pdf", "jpg"], default "svg"
interactive (bool): default False
interactively plot and display for each specimen
(this is best used on the command line only)
n_plots (int): default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns:
True or False indicating if conversion was successful, file name(s) written
"""
dir_path = os.path.realpath(dir_path)
if not input_dir_path:
input_dir_path = dir_path
input_dir_path = os.path.realpath(input_dir_path)
# format plot_key
name_dict = {'loc': 'location', 'sit': 'site',
'sam': 'sample', 'spc': 'specimen'}
if plot_by not in name_dict.values():
try:
plot_key = name_dict[plot_by]
except KeyError:
print(
'Unrecognized plot_by {}, falling back to plot by location'.format(plot_by))
plot_key = "loc"
else:
plot_key = plot_by
# figure out what kind of experiment
LT = "LT-" + LT + "-Z"
if LT == "LT-T-Z":
units, dmag_key = 'K', 'treat_temp'
elif LT == "LT-AF-Z":
units, dmag_key = 'T', 'treat_ac_field'
elif LT == 'LT-M-Z':
units, dmag_key = 'J', 'treat_mw_energy'
else:
units = 'U'
# init
FIG = {} # plot dictionary
FIG['demag'] = 1 # demag is figure 1
# create or access contribution
if contribution is None:
fnames = {"specimens": spec_file, "samples": samp_file,
'sites': site_file, 'locations': loc_file}
if not os.path.exists(pmag.resolve_file_name(in_file, input_dir_path)):
print('-E- Could not find {}'.format(in_file))
return False, []
contribution = cb.Contribution(input_dir_path, single_file=in_file,
custom_filenames=fnames)
file_type = list(contribution.tables.keys())[0]
print(len(contribution.tables['measurements'].df),
' records read from ', in_file)
# add plot_key into measurements table
if plot_key not in contribution.tables['measurements'].df.columns:
#contribution.propagate_name_down(plot_key, 'measurements')
contribution.propagate_location_to_measurements()
data_container = contribution.tables[file_type]
# pare down to only records with useful data
# grab records that have the requested code
data_slice = data_container.get_records_for_code(LT)
# and don't have the offending code
data = data_container.get_records_for_code(XLP, incl=False, use_slice=True,
sli=data_slice, strict_match=False)
# make sure quality is in the dataframe
if 'quality' not in data.columns:
data['quality'] = 'g'
# get intensity key and make sure intensity data is not blank
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
# get rid of any entirely blank intensity columns
for col_name in IntMeths:
if not data[col_name].any():
data.drop(col_name, axis=1, inplace=True)
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
if len(IntMeths) == 0:
print('-E- No intensity headers found')
return False, []
# plot first intensity method found - normalized to initial value anyway - doesn't matter which used
int_key = IntMeths[0]
data = data[data[int_key].notnull()]
# make list of individual plots
# by default, will be by location_name
plotlist = data[plot_key].unique()
plotlist.sort()
pmagplotlib.plot_init(FIG['demag'], 5, 5)
last_plot = False
saved = []
# iterate through and plot the data
if n_plots != "all":
if len(plotlist) > n_plots:
plotlist = plotlist[:n_plots]
for plot in plotlist:
if plot == plotlist[-1]:
last_plot = True
plot_data = data[data[plot_key] == plot].copy()
if not save_plots:
print(plot, 'plotting by: ', plot_key)
if len(plot_data) > 2:
title = plot
spcs = []
spcs = plot_data['specimen'].unique()
for spc in spcs:
INTblock = []
spec_data = plot_data[plot_data['specimen'] == spc]
for ind, rec in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0, float(
rec[int_key]), 1, rec['quality']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(FIG['demag'], INTblock,
title, 0, units, norm)
if save_plots:
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer:
files[key] = title + '_' + LT + '.' + fmt
incl_dir = False
else: # if not server, include directory in output path
files[key] = os.path.join(
dir_path, title + '_' + LT + '.' + fmt)
incl_dir = True
saved.extend(pmagplotlib.save_plots(FIG, files, incl_directory=incl_dir))
elif interactive:
pmagplotlib.draw_figs(FIG)
prompt = " S[a]ve to save plot, [q]uit, Return to continue: "
ans = input(prompt)
if ans == 'q':
return True, []
if ans == "a":
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer:
files[key] = title + '_' + LT + '.' + fmt
incl_dir = False
else: # if not server, include directory in output path
files[key] = os.path.join(
dir_path, title + '_' + LT + '.' + fmt)
incl_dir = True
saved.extend(pmagplotlib.save_plots(FIG, files, incl_directory=incl_dir))
else:
pmagplotlib.draw_figs(FIG)
pmagplotlib.clearFIG(FIG['demag'])
if last_plot:
return True, saved
PLOT_TYPES = {'eqarea': "Equal Area Plot", "arai": "ARAI plot",
"zijd": "Zijderveld Plot", "demag": "Demagnetization Plot",
"deremag": "De-Remagnetization Plot", 'hyst': 'Hysteresis Loop',
"data": "Anisotropy Data Plot", "conf": "Anisotropy Confidence Plot",
"tcdf": "tcdf", "cdf_0": "cdf_0", "cdf_1": "cdf_1", "cdf_2": "cdf_2",
"day": "Day Plot", "S-Bcr": "Day Plot", "S-Bc": "Day Plot",
"bcr1-bcr2": "Day Plot"}
[docs]
def df_depthplot(df,d_key='core_depth',fmt='png',location='unknown',save=False):
"""
Makes depth (or height) plots of various columns in the dataframe
Parameters:
df : pandas dataframe
d_key (str):
name of column for plotting against ['core_depth','composite_depth','height']
fmt (str):
format of saved figure, default is 'png'
location (str):
name of location
save (boolean):
if True, save plot to location.fmt
"""
plot_keys=['dir_dec','dir_inc','int_rel','int_rel_ARM','int_rel_IRM','int_rel_chi',
'int_abs','vgp_lat','vgp_lon','magn_moment','magn_volume','magn_mass',
'susc_chi_volume','susc_chi_mass']
use_keys=plot_keys[:]
for key in plot_keys:
if key not in df.columns:
use_keys.remove(key)
else:
test_df=df.dropna(subset=[key])
if len(test_df)==0:
use_keys.remove(key)
print('plotting these columns: ',use_keys)
cols=len(use_keys)
height=11
width=cols*2
plt_num=1
pmagplotlib.clearFIG(1)
fig=plt.figure(1,figsize=(width,height))
for key in use_keys:
fig.add_subplot(1,cols,plt_num)
plt.plot(df[key],df[d_key],'k.')
plt.xlabel(key)
if plt_num==1:
plt.ylabel=(d_key)
plt_num+=1
#plt.title(location)
if save:
plt.savefig(location+'.'+fmt)
[docs]
def validate_magic(top_dir,doi=False,private_key=False,contribution_id=False):
"""
download and validate a magic contribution
Parameters
-----------
top_dir: str
name of project
doi: str
DOI of paper to download
contribution_id: str
id of contribution
private_key : str
private key of contribution in private workspace
"""
# set up directories
magic_dir=top_dir+'/MagIC'
dirs=os.listdir()
if top_dir not in dirs:
os.makedirs(top_dir)
dirs=os.listdir(top_dir)
if 'MagIC' not in dirs:
os.makedirs(magic_dir)
if doi:
magic_contribution='magic_contribution.txt' # set the file name string
download_magic_from_doi(doi) # download the contribution from MagIC
os.rename(magic_contribution, magic_dir+'/'+magic_contribution) # move the contribution to the directory
download_magic(magic_contribution,dir_path=magic_dir,print_progress=False) # unpack the file
elif contribution_id:
magic_contribution='magic_contribution_'+str(contribution_id)+'.txt' # set the file name string
download_magic_from_id(contribution_id) # download the contribution from MagIC
os.rename(magic_contribution, magic_dir+'/'+magic_contribution) # move the contribution to the directory
download_magic(magic_contribution,dir_path=magic_dir,print_progress=False) # unpack the file
elif private_key:
shared_contribution_response = requests.get(api.format('data'), params={'id': contribution_id, 'key': private_key})
if (shared_contribution_response.status_code == 200):
shared_contribution_text = shared_contribution_response.text
print(shared_contribution_text[0:200], '\n')
elif (shared_contribution_response.status_code == 204): # bad file
print('Contribution ID and/or private key do not match any contributions in MagIC.', '\n')
else:
print('Error:', shared_contribution_response.json()['err'][0]['message'], '\n')
return False,False
# save and unpack downloaded data
magic_contribution='magic_contribution_'+str(contribution_id)+'.txt'
magic_out=open(magic_dir+'/'+magic_contribution, 'w', errors="backslashreplace")
magic_out.write(shared_contribution_text)
download_magic(magic_contribution,dir_path=magic_dir,print_progress=False) # unpack the file
validation=upload_magic(dir_path=magic_dir,input_dir_path=magic_dir,concat=True)
upload_file=validation[0].split('/')[-1]
return magic_dir,upload_file
[docs]
def simul_correlation_prob(alpha, k1, k2, trials=10000, print_result=False):
"""
The function runs an algorithm from Bogue and Coe (1981; doi: 10.1029/JB086iB12p11883)
for probabilistic correlation, evaluating the probability that the similarity between
two paleomagnetic directions is due to simultaneous sampling of the ancient magnetic
field. Original written in Python by S. Bogue, translated to PmagPy functionality by AFP.
Parameters:
alpha : angle between paleomagnetic directions (site means)
k1 (float): kappa estimate for first direction
k2 (float): kappa estimate for second direction
trials (int): the number of simulations [default = 10,000]
print_result (boolean): the probability value returned in a sentence [default = False]
Returns:
float
number indicating probability value
Example:
Provide an angle and two precision parameter estimates to get the probability of
simultaneity, compare to RC / 11 comparison from Table 2 of the original publication
(exact value may differ due to RNG):
>>> ipmag.simul_correlation_prob(3.6, 391, 146)
0.8127
"""
#sets initial value for counters
hit = 0
miss = 0
# trial loop
for i in range(trials):
# generates two synthetic directions, using the estimated kappas
lontp1,lattp1 = fishrot(k1, 1, 0, 90, di_block=False)
lontp2,lattp2 = fishrot(k2, 1, 0, 90, di_block=False)
# determines the angle between the generated directions
angle=pmag.angle([lontp1[0], lattp1[0]], [lontp2[0], lattp2[0]])
# checks if angle between synthetic directions meets or exceeds 'known' angle from directions to be tested
if angle >= alpha:
hit = hit + 1
else:
miss = miss + 1
# calculates probability based on how often the angle between the 'real' datasets is met or exceeded
simul_prob = 1.0 * hit / trials
if print_result:
print ('The probability that directions represent simultaneous samples of the geomagnetic field is: {0:5.3f}'.format(simul_prob))
else:
return simul_prob
[docs]
def rand_correlation_prob(sec_var, delta1, delta2, alpha, trials=10000, print_result=False):
"""
The function runs an algorithm from Bogue and Coe (1981; doi: 10.1029/JB086iB12p11883)
for probabilistic correlation, evaluating the probability that the similarity between
two paleomagnetic directions is due to random sampling of the ancient magnetic
field. Original written in Python by S. Bogue, translated to PmagPy functionality by AFP.
Parameters:
sec_var: kappa estimate of regional secular variation (probably 30 or 40)
alpha: angle between paleomagnetic directions (or poles)
delta1: distance of direction 1 from mean direction
delta2: distance of direction 2 from mean direction
trials: the number of simulations, default=10,000
print_result: the probability value printed as a sentence, default=False
Returns:
float
number indicating probability value
Example:
Provide estimate of regional secular variation, angle between directions,
distance of each direction from a mean direction (like GAD) to return probability
of random field sampling, compare to RC / 11 comparison from Table 2 of the original
publication (exact value may differ due to RNG):
>>> ipmag.rand_correlation_prob(40, 17.2, 20, 3.6)
np.float64(0.0103)
"""
# calc probability of getting vgp within alpha of vgp1
i = 0
miss = 0
hit = 0
for i in range(trials):
dec,inc = fishrot(sec_var, 1, 0, 0, di_block=False)
angle = pmag.angle([dec[0], inc[0]], [0, delta1])
if (angle <= alpha):
hit = hit + 1
else:
miss = miss + 1
prand1=(1.0 * hit) / trials
#print ('P1(Hr): ',prand1);
#calc probability of getting vgp within alpha of vgp2
i = 0
hit = 0
miss = 0
for i in range(trials):
dec, inc = fishrot(sec_var, 1, 0, 0, di_block=False)
angle = pmag.angle([dec[0], inc[0]], [0,delta2])
if (angle <= alpha):
hit = hit + 1
else:
miss = miss + 1
prand2=(1.0 * hit) / trials
#the average of the two trials is the probability of the "random" hypothesis
rand_prob = np.round((prand1 + prand2) / 2, 4)
if print_result:
print ('The probability (average of P1 and P2) that directions represent random samples of the geomagnetic field is: {0:5.3f}'.format((prand1+prand2)/2))
return rand_prob