Skip to content

Commit 9b352bc

Browse files
Merge branch 'dev' of https://github.com/gimli-org/gimli into dev
2 parents fb56b29 + 13c26dd commit 9b352bc

30 files changed

+407
-1117
lines changed

.github/workflows/main.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ jobs:
3939
OPENBLAS_CORETYPE: "ARMV8" # current bug in OpenBlas core detection
4040
steps:
4141
- name: Run pg.test()
42-
run: python3 -c "import pygimli; pygimli.test(show=False)"
42+
run: python3 -c "import pygimli; pygimli.test(show=False, abort=True)"
4343
working-directory: source
4444
- name: Install as development package
4545
working-directory: source

INSTALLATION.rst

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -21,14 +21,13 @@ Installation
2121
</p><br>
2222

2323

24-
On all platforms, we recommend to install pyGIMLi via the conda package manager
25-
contained in the Anaconda Python distribution. For details on how to install
26-
Anaconda, see `this page <https://docs.anaconda.com/anaconda/install/>`_.
27-
28-
Note that Anaconda comes with many (great) packages, many of which you likely
29-
will not use. If you want to save space, you can install the `light-weight
30-
version Miniconda
31-
<https://docs.anaconda.com/free/miniconda/miniconda-install/>`_.
24+
On all platforms, we recommend install pyGIMLi via the conda package manager
25+
and the cleanest way is to create a new environment.
26+
Besides large installers like `Anaconda <https://anaconda.org/>`_
27+
that come with many (great) packages, you can save space and install a
28+
light-weight installer like
29+
`Miniconda <https://docs.anaconda.com/free/miniconda/miniconda-install/>`_
30+
or `Miniforge <https://conda-forge.org/download/>`_.
3231

3332
.. note::
3433

@@ -91,12 +90,19 @@ follow these steps:
9190
9291
pyGIMLi on Google Colab
9392
-----------------------
94-
Even though still experimental, pyGIMLi can be run on Google Colab without any
95-
installation on your own computer. Just create a new Notebook and install the
96-
pyGIMLi package via pip:
93+
pyGIMLi can be run on Google Colab without any installation on your own
94+
computer. Just create a new Notebook and install pyGIMLi via pip:
95+
96+
.. code:: python
97+
98+
!pip install pygimli tetgen
99+
100+
It turns out that there are some packages preinstalled that lead to some
101+
incompatibl numpy version, so you might have to uninstall them first.
97102

98103
.. code:: python
99104
105+
!pip uninstall -y numba tensorflow pytensor thinc
100106
!pip install pygimli tetgen
101107
102108
Staying up-to-date

core/src/inversion.cpp

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,6 @@ const RVector & RInversion::run(){ ALLOW_PYTHON_THREADS
248248

249249
bool RInversion::oneStep() {
250250
iter_++;
251-
252251
deltaModelIter_.resize(model_.size());
253252
deltaModelIter_ *= 0.0;
254253
deltaDataIter_ = (tD_->trans(data_) - tD_->trans(response_));
@@ -257,19 +256,19 @@ bool RInversion::oneStep() {
257256
if (verbose_) std::cout << "sum(abs(deltaDataIter_)) == Zero" << std::endl;
258257
return false;
259258
}
260-
// Vec deltaModel0(model_.size());
259+
// Vec deltaModel0(model_.size());
261260
Vec modelNew( model_.size());
262261
Vec responseNew( data_.size());
263262
Vec roughness(this->constraintsCount(), 0.0);
264263

265264
this->checkJacobian((recalcJacobian_ && iter_ > 1) || jacobiNeedRecalc_);
266265

267266
// if ((recalcJacobian_ && iter_ > 1) || jacobiNeedRecalc_ ) {
268-
// Stopwatch swatch(true);
269-
// if (verbose_) std::cout << "Recalculating Jacobian matrix ...";
270-
// forward_->createJacobian(model_);
271-
// if (verbose_) std::cout << swatch.duration(true) << " s" << std::endl;
272-
// }
267+
// Stopwatch swatch(true);
268+
// if (verbose_) std::cout << "Recalculating Jacobian matrix ...";
269+
// forward_->createJacobian(model_);
270+
// if (verbose_) std::cout << swatch.duration(true) << " s" << std::endl;
271+
// }
273272

274273
if (!localRegularization_) {
275274
DOSAVE echoMinMax(model_, "model: ");
@@ -304,10 +303,7 @@ bool RInversion::oneStep() {
304303
DOSAVE save(tM_->deriv(model_), "modelTrans");
305304
DOSAVE save(tD_->deriv(response_), "responseTrans");
306305

307-
308-
309306
if (verbose_) std::cout << "solve CGLSCDWWtrans with lambda = " << lambda_ << std::endl;
310-
311307
try{
312308
solveCGLSCDWWhtrans(*forward_->jacobian(), *forward_->constraints(),
313309
dataWeight_,
@@ -322,19 +318,18 @@ bool RInversion::oneStep() {
322318
// __MS("Debug halt! 1700")
323319
// forward_->mesh()->save("S1700.bms");
324320
// forward_->regionManager().mesh().save("R1700.bms");
325-
321+
326322
// __MS("Debug halt! 17708")
327323
// forward_->mesh()->save("S17708.bms");
328324
// forward_->regionManager().mesh().save("R17708.bms");
329-
325+
330326
// std::cout<<forward_->mesh()->boundary(2121).marker() << std::endl;
331327
// std::cout<<forward_->mesh()->boundary(2122).marker() << std::endl;
332328

333329
// exit(1);
334330
log(Error, "solveCGLSCDWWhtrans throws unknown exception.");
335331
}
336332
} // else no optimization
337-
338333
//restrictMax(deltaModelIter_, 50.0); // only work for log trans
339334
DOSAVE echoMinMax(deltaModelIter_, "dm");
340335
modelNew = tM_->update(model_, deltaModelIter_);
@@ -394,7 +389,6 @@ bool RInversion::oneStep() {
394389
DOSAVE forward_->mesh()->exportVTK("fop-model" + str(iter_));
395390
DOSAVE forward_->mesh()->clearData();
396391
}
397-
398392
return true;
399393
}
400394

core/src/meshgenerators.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ Mesh createMesh3D(const Mesh & mesh, const RVector & z, int topMarker, int botto
246246
}
247247
}
248248
}
249-
249+
mesh3.createNeighborInfos();
250250
return mesh3;
251251
}
252252

core/src/ttdijkstramodelling.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -608,8 +608,8 @@ void TravelTimeDijkstraModelling::createJacobian(RSparseMapMatrix & jacobian,
608608
}
609609
if (this->verbose()){
610610
std::cout << "/" << swatch.duration(true) << " ";
611+
std::cout << std::endl;
611612
}
612-
std::cout << std::endl;
613613
}
614614

615615
TTModellingWithOffset::TTModellingWithOffset(Mesh & mesh, DataContainer & dataContainer, bool verbose)

doc/examples/3_ert/plot_02_ert_field_data.py

Lines changed: 32 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,11 @@
99
- 2D inversion with topography
1010
- geometric factor generation
1111
- topography effect
12+
13+
The data is the profile 11 already shown by Günther et al. (2006, Fig. 11).
1214
"""
13-
# sphinx_gallery_thumbnail_number = 5
15+
# sphinx_gallery_thumbnail_number = 7
16+
import matplotlib.pyplot as plt
1417
import pygimli as pg
1518
from pygimli.physics import ert
1619

@@ -21,36 +24,46 @@
2124
data = pg.getExampleData('ert/slagdump.ohm', verbose=True)
2225
print(data)
2326

27+
###############################################################################
28+
# Let us first have a look at the topography contained in the data
29+
plt.plot(pg.x(data), pg.z(data), 'x-')
30+
2431
###############################################################################
2532
# The data file does not contain geometric factors (token field 'k'),
2633
# so we create them based on the given topography.
34+
k0 = ert.createGeometricFactors(data) # the analytical one
2735
data['k'] = ert.createGeometricFactors(data, numerical=True)
2836

29-
###############################################################################
30-
# We initialize the ERTManager for further steps and eventually inversion.
31-
mgr = ert.ERTManager(sr=False)
32-
3337
###############################################################################
3438
# It might be interesting to see the topography effect, i.e the ratio between
35-
# the numerically computed geometry factor and the analytical formula
36-
k0 = ert.createGeometricFactors(data)
37-
_ = ert.showData(data, vals=k0/data['k'], label='Topography effect')
39+
# the numerically computed geometry factor and the analytical formula after
40+
# Rücker et al. (2006). We display it using a colormap with neutral white.
41+
_ = ert.showData(data, vals=k0/ data['k'], label='Topography effect',
42+
cMin=2/3, cMax=3/2, logScale=True, cMap="bwr")
3843

3944
###############################################################################
40-
# The data container has no apparent resistivities (token field 'rhoa') yet.
41-
# We can let the Manager fix this later for us (as we now have the 'k' field),
42-
# or we do it manually.
43-
mgr.checkData(data)
44-
print(data)
45+
# We can now compute the apparent resistivity and display it, once with the
46+
# wrong analytical formula and once with the numerical values in data['k']
47+
data['rhoa'] = data['r'] * data['k']
48+
kw = dict(cMin=6, cMax=33)
49+
fig, ax = plt.subplots(ncols=2)
50+
data.show(data['r']*k0, ax=ax[0], **kw);
51+
data.show(ax=ax[1], **kw)
52+
ax[0].set_title('Uncorrected')
53+
ax[1].set_title('Corrected');
4554

4655
###############################################################################
4756
# The data container does not necessarily contain data errors data errors
4857
# (token field 'err'), requiring us to enter data errors. We can let the
4958
# manager guess some defaults for us automaticly or set them manually
50-
data['err'] = ert.estimateError(data, relativeError=0.03, absoluteUError=5e-5)
51-
# or manually:
52-
# data['err'] = data_errors # somehow
53-
_ = ert.show(data, data['err']*100)
59+
data.estimateError(relativeError=0.03, absoluteUError=5e-5)
60+
# which internally calls
61+
# data['err'] = ert.estimateError(data, ...) # can also set manually
62+
_ = data.show(data['err']*100, label='error estimate (%)')
63+
64+
###############################################################################
65+
# We initialize the ERTManager for further steps and eventually inversion.
66+
mgr = ert.ERTManager(data)
5467

5568
###############################################################################
5669
# Now the data have all necessary fields ('rhoa', 'err' and 'k') so we can run
@@ -59,8 +72,7 @@
5972
#
6073
mod = mgr.invert(data, lam=10, verbose=True,
6174
paraDX=0.3, paraMaxCellSize=10, paraDepth=20, quality=33.6)
62-
63-
_ = mgr.showResult()
75+
ax, cb = mgr.showResult()
6476

6577
###############################################################################
6678
# We can view the resulting model in the usual way.
@@ -69,4 +81,4 @@
6981

7082
###############################################################################
7183
# Or just plot the model only using your own options.
72-
_ = mgr.showResult(mod, cMin=5, cMax=30, cMap="Spectral_r", logScale=True)
84+
ax, cb = mgr.showResult(mod, cMin=5, cMax=30, cMap="Spectral_r", logScale=True)

doc/tutorials/2_modelling/plot_3-mod-fem-BC.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,9 @@ def uDirichlet(boundary):
7272
# Alternatively we can define the gradients of the solution on the boundary,
7373
# i.e., Neumann type BC. This is done with another dictionary {marker: value} and passed by the bc dictionary.
7474
neumannBC = {1: -0.5, # left
75-
4: 2.5} # bottom
75+
3: 2.5} # bottom
7676

77-
dirichletBC = {3: 1.0} # top
77+
dirichletBC = {4: 1.0} # top
7878

7979
u = solve(grid, f=0., bc={'Dirichlet': dirichletBC, 'Neumann': neumannBC})
8080

doc/tutorials/2_modelling/plot_5-mod-fem-heat-2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
###############################################################################
3939
# Call :py:func:`pygimli.solver.solveFiniteElements` to solve the heat
4040
# diffusion equation :math:`\nabla\cdot(a\nabla T)=0` with :math:`T(bottom)=0`
41-
# (boundary marker 8) and :math:`T(top)=1` (boundary marker 4), where :math:`a`
41+
# (boundary marker 4) and :math:`T(top)=1` (boundary marker 8), where :math:`a`
4242
# is the thermal diffusivity and :math:`T` is the temperature distribution.
4343
# We assign thermal diffusivities to the four # regions using their marker
4444
# numbers in a dictionary (a) and the fixed temperatures at the boundaries

pygimli/CMakeLists.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ else()
4949
add_custom_target(whl${TARGET_NAME}PackageBuild DEPENDS whlpgcore
5050
COMMAND "${Python_EXECUTABLE}" -m pip wheel . --wheel-dir=${WHEELHOUSE} --find-links ${WHEELHOUSE}
5151
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
52-
COMMENT "Building python wheel package for ${TARGET_NAME}"
52+
COMMENT "Building python wheel package (${WHEELFILE}) for ${TARGET_NAME}"
5353
)
5454

5555
add_custom_target(whl${TARGET_NAME} DEPENDS whl${TARGET_NAME}PackageBuild)
@@ -59,12 +59,12 @@ else()
5959
add_custom_target(whl${TARGET_NAME}TestInstall DEPENDS whl${TARGET_NAME} DEPENDS whlpgcoreTestRun
6060
COMMAND "${Python3_EXECUTABLE}" -m pip uninstall -y ${TARGET_NAME}
6161
COMMAND "${Python3_EXECUTABLE}" -m pip install ${WHEELHOUSE}/${PY_WHEELFILE} --quiet --find-links ${WHEELHOUSE}
62-
COMMENT "Installing ${TARGET_NAME} in virtual test environment"
62+
COMMENT "Installing ${TARGET_NAME} ((${WHEELFILE}) in virtual test environment"
6363
)
6464
add_custom_target(whl${TARGET_NAME}TestRun DEPENDS whl${TARGET_NAME}TestInstall
6565
COMMAND ${Python3_EXECUTABLE} -c "import pygimli as pg; pg.version()"
6666
VERBATIM
67-
COMMENT "Testing ${TARGET_NAME} installation in virtual test environment. ${Python3_EXECUTABLE} -c \'import pygimli as pg; pg.version()\'"
67+
COMMENT "Testing ${TARGET_NAME} installation (${WHEELFILE}) in virtual test environment. ${Python3_EXECUTABLE} -c \'import pygimli as pg; pg.version()\'"
6868
)
6969
add_custom_target(whl${TARGET_NAME}Test DEPENDS whl${TARGET_NAME}TestRun)
7070

pygimli/core/datacontainer.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,3 +220,28 @@ def __DataContainer_subset(self, **kwargs):
220220
return new
221221

222222
DataContainer.subset = __DataContainer_subset
223+
224+
def __DataContainer_markSensorInvalid(self, idx):
225+
"""Mark all measurements using a specific sensor invalid."""
226+
for tok in self.dataMap().keys():
227+
if self.isSensorIndex(tok):
228+
self.markInvalid(self[tok] == idx)
229+
230+
DataContainer.markSensorInvalid = __DataContainer_markSensorInvalid
231+
232+
def __DataContainer_removeSensorData(self, idx):
233+
"""Remove all measurements using a specific sensor."""
234+
self.markSensorInvalid(idx)
235+
self.removeInvalid() # TODO4Nico: do it correctly
236+
## It will also remove any previously invalid measurements
237+
## So you better use only remove or markValid
238+
239+
DataContainer.removeSensorData = __DataContainer_removeSensorData
240+
241+
def __DataContainer_removeSensor(self, idx):
242+
"""Remove a specific sensor."""
243+
self.removeSensorData(idx)
244+
self.removeUnusedSensors()
245+
## Same argument as above, there might be other unused ones
246+
247+
DataContainer.removeSensor = __DataContainer_removeSensor

0 commit comments

Comments
 (0)