1 |
commit: 021c7068ab1f61ef171029cbb243d13f1b33237e |
2 |
Author: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru> |
3 |
AuthorDate: Mon May 11 22:56:45 2020 +0000 |
4 |
Commit: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru> |
5 |
CommitDate: Mon May 11 22:56:45 2020 +0000 |
6 |
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=021c7068 |
7 |
|
8 |
sci-physics/SU2: new package |
9 |
|
10 |
The SU2 package contains several bunbled libraries that currently |
11 |
aren't unbundled in ebuild. |
12 |
|
13 |
CGNS: |
14 |
Tried to unbundled CNGS but it failed to compiled against system |
15 |
gcnslib-3.3.0; successfully compiled against cgnslib-3.4.0 |
16 |
but related tests are failed. |
17 |
|
18 |
Metis, Parmetis: |
19 |
This packages couldn't be installed simultaneously in Gentoo |
20 |
and required by build system if compiled via meson build system |
21 |
with mpi option being enabled. They could be optionaly chosen |
22 |
if autotools build system is used (ebuild uses meson). |
23 |
|
24 |
Some addidional features disabled due to their experimantal status |
25 |
or due to requirement to download additional third-party libraries. |
26 |
At this moment ebuild doesn't provide such features to be built. |
27 |
They are also will be bundled if implemented and compiled statically. |
28 |
|
29 |
Signed-off-by: Sergey Torokhov <torokhov-s-a <AT> yandex.ru> |
30 |
|
31 |
sci-physics/SU2/Manifest | 3 + |
32 |
sci-physics/SU2/SU2-7.0.4.ebuild | 112 + |
33 |
sci-physics/SU2/files/SU2-7.0.4-fix-env.patch | 24 + |
34 |
.../SU2/files/SU2-7.0.4-fix-python-optimize.patch | 2302 ++++++++++++++++++++ |
35 |
.../SU2/files/SU2-7.0.4-unbundle_boost.patch | 31 + |
36 |
sci-physics/SU2/metadata.xml | 28 + |
37 |
6 files changed, 2500 insertions(+) |
38 |
|
39 |
diff --git a/sci-physics/SU2/Manifest b/sci-physics/SU2/Manifest |
40 |
new file mode 100644 |
41 |
index 0000000..6ebe00f |
42 |
--- /dev/null |
43 |
+++ b/sci-physics/SU2/Manifest |
44 |
@@ -0,0 +1,3 @@ |
45 |
+DIST SU2-7.0.4-TestCases.tar.gz 437960103 BLAKE2B 2469edc23f62589fa18be5fff6e036965f6b5f6e2be207642d318aac4d2044c07f0891568f86c1a3ab065e79afce50cc73ad0857b82093d79ac28a4d0451a4ad SHA512 f21d963815e024582e99647a21ebae0b17fc69f75bc34bb72cc3a86cc9ff8502342b31755b5da73e7088b4d0ce430bdd6b4efefc03583cbfcf5156c1849328e1 |
46 |
+DIST SU2-7.0.4-Tutorials.tar.gz 64282233 BLAKE2B b0d13a0988d5617868fad6098fe8110e3600415f05784ff04416cb23162fadc8c1d06d50c5200b14f65afb3e97ee766b21dfdcd4ec8ded9026baf510ca829e48 SHA512 604a05e15a8eae1c7255016261a6576a97fc364f66004ecaccaae932e3a97624c2599d354dd874562824caa8f8ea3dac2f03e0105b1c27d66ec0bf59e3a27105 |
47 |
+DIST SU2-7.0.4.tar.gz 20516147 BLAKE2B 21f45e4918bbc6a72bf47ad61d3301abed50a7cf569e9e8d4040201ff653e583d50a547853365302671922f023d0cc6f3735c1afcd0f3b6bf3c3fc92dc807787 SHA512 8e69f0e1d335adef0bd98666c98e29bc15ee0d7a0fcbbbc91a1ba02275ca52fda7f8f47434547f7982ce0e73a6ff78bd2ed57ca328d1e87b8afdd3b0a698d262 |
48 |
|
49 |
diff --git a/sci-physics/SU2/SU2-7.0.4.ebuild b/sci-physics/SU2/SU2-7.0.4.ebuild |
50 |
new file mode 100644 |
51 |
index 0000000..1884b11 |
52 |
--- /dev/null |
53 |
+++ b/sci-physics/SU2/SU2-7.0.4.ebuild |
54 |
@@ -0,0 +1,112 @@ |
55 |
+# Copyright 1999-2020 Gentoo Authors |
56 |
+# Distributed under the terms of the GNU General Public License v2 |
57 |
+ |
58 |
+EAPI=7 |
59 |
+ |
60 |
+PYTHON_COMPAT=( python3_{6,7,8} ) |
61 |
+ |
62 |
+inherit meson python-single-r1 |
63 |
+ |
64 |
+DESCRIPTION="SU2: An Open-Source Suite for Multiphysics Simulation and Design" |
65 |
+HOMEPAGE="https://su2code.github.io/" |
66 |
+SRC_URI=" |
67 |
+ https://github.com/su2code/SU2/archive/v${PV}.tar.gz -> ${P}.tar.gz |
68 |
+ test? ( https://github.com/su2code/TestCases/archive/v${PV}.tar.gz -> ${P}-TestCases.tar.gz ) |
69 |
+ tutorials? ( https://github.com/su2code/Tutorials/archive/v${PV}.tar.gz -> ${P}-Tutorials.tar.gz ) |
70 |
+" |
71 |
+ |
72 |
+LICENSE="LGPL-2.1" |
73 |
+SLOT="0" |
74 |
+KEYWORDS="~amd64" |
75 |
+ |
76 |
+IUSE="cgns -mkl +mpi openblas tecio test tutorials" |
77 |
+RESTRICT="!test? ( test )" |
78 |
+REQUIRED_USE=" |
79 |
+ ${PYTHON_REQUIRED_USE} |
80 |
+ mkl? ( !openblas ) |
81 |
+" |
82 |
+ |
83 |
+RDEPEND=" |
84 |
+ ${PYTHON_DEPS} |
85 |
+ mpi? ( virtual/mpi[cxx] ) |
86 |
+ mkl? ( sci-libs/mkl ) |
87 |
+ openblas? ( sci-libs/openblas ) |
88 |
+" |
89 |
+DEPEND=" |
90 |
+ ${RDEPEND} |
91 |
+ tecio? ( dev-libs/boost:= ) |
92 |
+" |
93 |
+BDEPEND="virtual/pkgconfig" |
94 |
+ |
95 |
+PATCHES=( |
96 |
+ "${FILESDIR}/${P}-fix-env.patch" |
97 |
+ "${FILESDIR}/${P}-unbundle_boost.patch" |
98 |
+ "${FILESDIR}/${P}-fix-python-optimize.patch" |
99 |
+) |
100 |
+ |
101 |
+DOCS=( "LICENSE.md" "README.md" "SU2_PY/documentation.txt" ) |
102 |
+ |
103 |
+src_unpack() { |
104 |
+ unpack "${P}.tar.gz" |
105 |
+ if use test ; then |
106 |
+ einfo "Unpacking ${P}-TestCases.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}/TestCases" |
107 |
+ tar -C "${P}"/TestCases --strip-components=1 -xzf "${DISTDIR}/${P}-TestCases.tar.gz" || die |
108 |
+ fi |
109 |
+ if use tutorials ; then |
110 |
+ einfo "Unpacking ${P}-Tutorials.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}" |
111 |
+ mkdir "${P}"/Tutorials |
112 |
+ tar -C "${P}"/Tutorials --strip-components=1 -xzf "${DISTDIR}/${P}-Tutorials.tar.gz" || die |
113 |
+ fi |
114 |
+} |
115 |
+ |
116 |
+src_configure() { |
117 |
+ local emesonargs=( |
118 |
+ -Denable-autodiff=false |
119 |
+ -Denable-directdiff=false |
120 |
+ -Denable-pastix=false |
121 |
+ -Denable-pywrapper=false |
122 |
+ -Dwith-omp=false |
123 |
+ $(meson_feature mpi with-mpi) |
124 |
+ $(meson_use cgns enable-cgns) |
125 |
+ $(meson_use mkl enable-mkl) |
126 |
+ $(meson_use openblas enable-openblas) |
127 |
+ $(meson_use tecio enable-tecio) |
128 |
+ $(meson_use test enable-tests) |
129 |
+ ) |
130 |
+ meson_src_configure |
131 |
+} |
132 |
+ |
133 |
+src_test() { |
134 |
+ ln -s ../../${P}-build/SU2_CFD/src/SU2_CFD SU2_PY/SU2_CFD |
135 |
+ ln -s ../../${P}-build/SU2_DEF/src/SU2_DEF SU2_PY/SU2_DEF |
136 |
+ ln -s ../../${P}-build/SU2_DOT/src/SU2_DOT SU2_PY/SU2_DOT |
137 |
+ ln -s ../../${P}-build/SU2_GEO/src/SU2_GEO SU2_PY/SU2_GEO |
138 |
+ ln -s ../../${P}-build/SU2_MSH/src/SU2_MSH SU2_PY/SU2_MSH |
139 |
+ ln -s ../../${P}-build/SU2_SOL/src/SU2_SOL SU2_PY/SU2_SOL |
140 |
+ |
141 |
+ export SU2_RUN="${S}/SU2_PY" |
142 |
+ export SU2_HOME="${S}" |
143 |
+ export PATH=$PATH:$SU2_RUN |
144 |
+ export PYTHONPATH=$PYTHONPATH:$SU2_RUN |
145 |
+ |
146 |
+ einfo "Running UnitTests ..." |
147 |
+ ../${P}-build/UnitTests/test_driver |
148 |
+ |
149 |
+ pushd TestCases/ |
150 |
+ use mpi && python parallel_regression.py |
151 |
+ use mpi || python serial_regression.py |
152 |
+ use tutorials && use mpi && python tutorials.py |
153 |
+ popd |
154 |
+} |
155 |
+ |
156 |
+src_install() { |
157 |
+ meson_src_install |
158 |
+ mkdir -p "${ED}$(python_get_sitedir)" |
159 |
+ mv "${ED}"/usr/bin/{FSI,SU2,*.py} -t "${ED}$(python_get_sitedir)" |
160 |
+ python_optimize "${D}/$(python_get_sitedir)" |
161 |
+ |
162 |
+ if use tutorials ; then |
163 |
+ insinto "/usr/share/${P}" |
164 |
+ doins -r Tutorials |
165 |
+ fi |
166 |
+} |
167 |
|
168 |
diff --git a/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch b/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch |
169 |
new file mode 100644 |
170 |
index 0000000..3f65764 |
171 |
--- /dev/null |
172 |
+++ b/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch |
173 |
@@ -0,0 +1,24 @@ |
174 |
+diff -Naur old_env/SU2_CFD/include/output/tools/CWindowingTools.hpp new_env/SU2_CFD/include/output/tools/CWindowingTools.hpp |
175 |
+--- old_env/SU2_CFD/include/output/tools/CWindowingTools.hpp 2020-03-31 12:26:03.000000000 +0300 |
176 |
++++ new_env/SU2_CFD/include/output/tools/CWindowingTools.hpp 2020-05-10 17:04:24.000000000 +0300 |
177 |
+@@ -28,7 +28,7 @@ |
178 |
+ #pragma once |
179 |
+ |
180 |
+ #include <vector> |
181 |
+-#include "../../../Common/include/option_structure.hpp" |
182 |
++#include "../../../../Common/include/option_structure.hpp" |
183 |
+ |
184 |
+ class CWindowingTools{ |
185 |
+ public: |
186 |
+diff -Naur old_env/UnitTests/meson.build new_env/UnitTests/meson.build |
187 |
+--- old_env/UnitTests/meson.build 2020-05-10 17:03:43.000000000 +0300 |
188 |
++++ new_env/UnitTests/meson.build 2020-05-10 17:04:35.000000000 +0300 |
189 |
+@@ -24,7 +24,7 @@ |
190 |
+ test_driver = executable( |
191 |
+ 'test_driver', |
192 |
+ unit_test_files, |
193 |
+- install : true, |
194 |
++ install : false, |
195 |
+ dependencies : [su2_cfd_dep, common_dep, su2_deps, catch2_dep], |
196 |
+ cpp_args: ['-fPIC', default_warning_flags, su2_cpp_args] |
197 |
+ ) |
198 |
|
199 |
diff --git a/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch b/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch |
200 |
new file mode 100644 |
201 |
index 0000000..6ad7387 |
202 |
--- /dev/null |
203 |
+++ b/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch |
204 |
@@ -0,0 +1,2302 @@ |
205 |
+diff -Naur old/SU2_PY/FSI/FSIInterface.py new/SU2_PY/FSI/FSIInterface.py |
206 |
+--- old/SU2_PY/FSI/FSIInterface.py 2020-05-01 19:09:18.000000000 +0300 |
207 |
++++ new/SU2_PY/FSI/FSIInterface.py 2020-05-10 16:17:07.000000000 +0300 |
208 |
+@@ -6,8 +6,8 @@ |
209 |
+ # \version 7.0.4 "Blackbird" |
210 |
+ # |
211 |
+ # SU2 Project Website: https://su2code.github.io |
212 |
+-# |
213 |
+-# The SU2 Project is maintained by the SU2 Foundation |
214 |
++# |
215 |
++# The SU2 Project is maintained by the SU2 Foundation |
216 |
+ # (http://su2foundation.org) |
217 |
+ # |
218 |
+ # Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md) |
219 |
+@@ -16,7 +16,7 @@ |
220 |
+ # modify it under the terms of the GNU Lesser General Public |
221 |
+ # License as published by the Free Software Foundation; either |
222 |
+ # version 2.1 of the License, or (at your option) any later version. |
223 |
+-# |
224 |
++# |
225 |
+ # SU2 is distributed in the hope that it will be useful, |
226 |
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of |
227 |
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
228 |
+@@ -42,19 +42,19 @@ |
229 |
+ # ---------------------------------------------------------------------- |
230 |
+ |
231 |
+ class Interface: |
232 |
+- """ |
233 |
++ """ |
234 |
+ FSI interface class that handles fluid/solid solvers synchronisation and communication |
235 |
+ """ |
236 |
+- |
237 |
++ |
238 |
+ def __init__(self, FSI_config, FluidSolver, SolidSolver, have_MPI): |
239 |
+- """ |
240 |
+- Class constructor. Declare some variables and do some screen outputs. |
241 |
+- """ |
242 |
+- |
243 |
++ """ |
244 |
++ Class constructor. Declare some variables and do some screen outputs. |
245 |
++ """ |
246 |
++ |
247 |
+ if have_MPI == True: |
248 |
+ from mpi4py import MPI |
249 |
+ self.MPI = MPI |
250 |
+- self.comm = MPI.COMM_WORLD #MPI World communicator |
251 |
++ self.comm = MPI.COMM_WORLD #MPI World communicator |
252 |
+ self.have_MPI = True |
253 |
+ myid = self.comm.Get_rank() |
254 |
+ else: |
255 |
+@@ -62,42 +62,42 @@ |
256 |
+ self.have_MPI = False |
257 |
+ myid = 0 |
258 |
+ |
259 |
+- self.rootProcess = 0 #the root process is chosen to be MPI rank = 0 |
260 |
++ self.rootProcess = 0 #the root process is chosen to be MPI rank = 0 |
261 |
+ |
262 |
+- self.nDim = FSI_config['NDIM'] #problem dimension |
263 |
++ self.nDim = FSI_config['NDIM'] #problem dimension |
264 |
+ |
265 |
+- self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank |
266 |
+- self.haveSolidSolver = False #True if the solid solver is initialized on the current rank |
267 |
+- self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node |
268 |
+- self.haveSolidInterface = False #True if the current rank owns at least one solid interface node |
269 |
++ self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank |
270 |
++ self.haveSolidSolver = False #True if the solid solver is initialized on the current rank |
271 |
++ self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node |
272 |
++ self.haveSolidInterface = False #True if the current rank owns at least one solid interface node |
273 |
+ |
274 |
+- self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized |
275 |
+- self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized |
276 |
++ self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized |
277 |
++ self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized |
278 |
+ self.fluidInterfaceProcessors = list() #list of partitions where there are fluid interface nodes |
279 |
+- self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes |
280 |
++ self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes |
281 |
+ |
282 |
+- self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver |
283 |
+- self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver |
284 |
++ self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver |
285 |
++ self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver |
286 |
+ |
287 |
+- self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions |
288 |
+- self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions |
289 |
++ self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions |
290 |
++ self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions |
291 |
+ |
292 |
+- self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition |
293 |
+- self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes |
294 |
+- self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition |
295 |
+- self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes |
296 |
+- |
297 |
+- self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition |
298 |
+- self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition |
299 |
+- self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition |
300 |
+- self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions |
301 |
+- self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions |
302 |
+- |
303 |
+- self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition |
304 |
+- self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition |
305 |
+- self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition |
306 |
+- self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions |
307 |
+- self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions |
308 |
++ self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition |
309 |
++ self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes |
310 |
++ self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition |
311 |
++ self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes |
312 |
++ |
313 |
++ self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition |
314 |
++ self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition |
315 |
++ self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition |
316 |
++ self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions |
317 |
++ self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions |
318 |
++ |
319 |
++ self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition |
320 |
++ self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition |
321 |
++ self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition |
322 |
++ self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions |
323 |
++ self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions |
324 |
+ |
325 |
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'): |
326 |
+ self.MappingMatrixA = None |
327 |
+@@ -106,83 +106,83 @@ |
328 |
+ self.MappingMatrixB_T = None |
329 |
+ self.d_RBF = self.nDim+1 |
330 |
+ else: |
331 |
+- self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping |
332 |
+- self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping |
333 |
++ self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping |
334 |
++ self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping |
335 |
+ self.d_RBF = 0 |
336 |
+ |
337 |
+- self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping) |
338 |
++ self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping) |
339 |
+ self.localFluidInterface_array_Y_init = None |
340 |
+ self.localFluidInterface_array_Z_init = None |
341 |
+ |
342 |
+- self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only) |
343 |
++ self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only) |
344 |
+ |
345 |
+- self.solidInterface_array_DispX = None #solid interface displacement |
346 |
++ self.solidInterface_array_DispX = None #solid interface displacement |
347 |
+ self.solidInterface_array_DispY = None |
348 |
+ self.solidInterface_array_DispZ = None |
349 |
+ |
350 |
+- self.solidInterfaceResidual_array_X = None #solid interface position residual |
351 |
++ self.solidInterfaceResidual_array_X = None #solid interface position residual |
352 |
+ self.solidInterfaceResidual_array_Y = None |
353 |
+ self.solidInterfaceResidual_array_Z = None |
354 |
+ |
355 |
+- self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration |
356 |
++ self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration |
357 |
+ self.solidInterfaceResidualnM1_array_Y = None |
358 |
+ self.solidInterfaceResidualnM1_array_Z = None |
359 |
+- |
360 |
+- self.fluidInterface_array_DispX = None #fluid interface displacement |
361 |
++ |
362 |
++ self.fluidInterface_array_DispX = None #fluid interface displacement |
363 |
+ self.fluidInterface_array_DispY = None |
364 |
+ self.fluidInterface_array_DispZ = None |
365 |
+ |
366 |
+- self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface |
367 |
++ self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface |
368 |
+ self.fluidLoads_array_Y = None |
369 |
+ self.fluidLoads_array_Z = None |
370 |
+ |
371 |
+- self.solidLoads_array_X = None #loads on the solid side of the f/s interface |
372 |
++ self.solidLoads_array_X = None #loads on the solid side of the f/s interface |
373 |
+ self.solidLoads_array_Y = None |
374 |
+ self.solidLoads_array_Z = None |
375 |
+ |
376 |
+- self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method |
377 |
+- self.FSIIter = 0 #current FSI iteration |
378 |
+- self.unsteady = False #flag for steady or unsteady simulation (default is steady) |
379 |
+- |
380 |
+- # ---Some screen output --- |
381 |
+- self.MPIPrint('Fluid solver : SU2_CFD') |
382 |
+- self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER'])) |
383 |
++ self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method |
384 |
++ self.FSIIter = 0 #current FSI iteration |
385 |
++ self.unsteady = False #flag for steady or unsteady simulation (default is steady) |
386 |
++ |
387 |
++ # ---Some screen output --- |
388 |
++ self.MPIPrint('Fluid solver : SU2_CFD') |
389 |
++ self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER'])) |
390 |
+ |
391 |
+- if FSI_config['TIME_MARCHING'] == 'YES': |
392 |
++ if FSI_config['TIME_MARCHING'] == 'YES': |
393 |
+ self.MPIPrint('Unsteady coupled simulation with physical time step : {} s'.format(FSI_config['UNST_TIMESTEP'])) |
394 |
+ self.unsteady = True |
395 |
+- else: |
396 |
+- self.MPIPrint('Steady coupled simulation') |
397 |
++ else: |
398 |
++ self.MPIPrint('Steady coupled simulation') |
399 |
+ |
400 |
+- if FSI_config['MATCHING_MESH'] == 'YES': |
401 |
+- self.MPIPrint('Matching fluid-solid interface') |
402 |
+- else: |
403 |
++ if FSI_config['MATCHING_MESH'] == 'YES': |
404 |
++ self.MPIPrint('Matching fluid-solid interface') |
405 |
++ else: |
406 |
+ if FSI_config['MESH_INTERP_METHOD'] == 'TPS': |
407 |
+- self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation') |
408 |
++ self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation') |
409 |
+ elif FSI_config['MESH_INTERP_METHOD'] == 'RBF': |
410 |
+ self.MPIPrint('Non matching fluid-solid interface with Radial Basis Function interpolation') |
411 |
+ self.RBF_rad = FSI_config['RBF_RADIUS'] |
412 |
+- self.MPIPrint('Radius value : {}'.format(self.RBF_rad)) |
413 |
++ self.MPIPrint('Radius value : {}'.format(self.RBF_rad)) |
414 |
+ else: |
415 |
+- self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation') |
416 |
++ self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation') |
417 |
+ |
418 |
+- self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED'])) |
419 |
++ self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED'])) |
420 |
+ |
421 |
+- self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER'])) |
422 |
++ self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER'])) |
423 |
+ |
424 |
+- self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE'])) |
425 |
++ self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE'])) |
426 |
+ |
427 |
+- if FSI_config['AITKEN_RELAX'] == 'STATIC': |
428 |
+- self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM'])) |
429 |
+- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC': |
430 |
+- self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM'])) |
431 |
+- else: |
432 |
+- self.MPIPrint('No Aitken under-relaxation') |
433 |
++ if FSI_config['AITKEN_RELAX'] == 'STATIC': |
434 |
++ self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM'])) |
435 |
++ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC': |
436 |
++ self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM'])) |
437 |
++ else: |
438 |
++ self.MPIPrint('No Aitken under-relaxation') |
439 |
+ |
440 |
+ self.MPIPrint('FSI interface is set') |
441 |
+ |
442 |
+ def MPIPrint(self, message): |
443 |
+- """ |
444 |
++ """ |
445 |
+ Print a message on screen only from the master process. |
446 |
+ """ |
447 |
+ |
448 |
+@@ -198,28 +198,28 @@ |
449 |
+ """ |
450 |
+ Perform a synchronization barrier in case of parallel run with MPI. |
451 |
+ """ |
452 |
+- |
453 |
++ |
454 |
+ if self.have_MPI == True: |
455 |
+ self.comm.barrier() |
456 |
+ |
457 |
+ def connect(self, FSI_config, FluidSolver, SolidSolver): |
458 |
+- """ |
459 |
+- Connection between solvers. |
460 |
+- Creates the communication support between the two solvers. |
461 |
+- Gets information about f/s interfaces from the two solvers. |
462 |
+- """ |
463 |
++ """ |
464 |
++ Connection between solvers. |
465 |
++ Creates the communication support between the two solvers. |
466 |
++ Gets information about f/s interfaces from the two solvers. |
467 |
++ """ |
468 |
+ if self.have_MPI == True: |
469 |
+ myid = self.comm.Get_rank() |
470 |
+- MPIsize = self.comm.Get_size() |
471 |
++ MPIsize = self.comm.Get_size() |
472 |
+ else: |
473 |
+ myid = 0 |
474 |
+ MPIsize = 1 |
475 |
+- |
476 |
+- # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) --- |
477 |
++ |
478 |
++ # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) --- |
479 |
+ self.fluidInterfaceIdentifier = None |
480 |
+ self.nLocalFluidInterfaceNodes = 0 |
481 |
+ if FluidSolver != None: |
482 |
+- print('Fluid solver is initialized on process {}'.format(myid)) |
483 |
++ print('Fluid solver is initialized on process {}'.format(myid)) |
484 |
+ self.haveFluidSolver = True |
485 |
+ allMovingMarkersTags = FluidSolver.GetAllMovingMarkersTag() |
486 |
+ allMarkersID = FluidSolver.GetAllBoundaryMarkers() |
487 |
+@@ -229,23 +229,23 @@ |
488 |
+ if allMovingMarkersTags[0] in allMarkersID.keys(): |
489 |
+ self.fluidInterfaceIdentifier = allMarkersID[allMovingMarkersTags[0]] |
490 |
+ if self.fluidInterfaceIdentifier != None: |
491 |
+- self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier) |
492 |
+- if self.nLocalFluidInterfaceNodes != 0: |
493 |
++ self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier) |
494 |
++ if self.nLocalFluidInterfaceNodes != 0: |
495 |
+ self.haveFluidInterface = True |
496 |
+- print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes)) |
497 |
+- else: |
498 |
+- pass |
499 |
++ print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes)) |
500 |
++ else: |
501 |
++ pass |
502 |
+ |
503 |
+- if SolidSolver != None: |
504 |
+- print('Solid solver is initialized on process {}'.format(myid)) |
505 |
++ if SolidSolver != None: |
506 |
++ print('Solid solver is initialized on process {}'.format(myid)) |
507 |
+ self.haveSolidSolver = True |
508 |
+- self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID() |
509 |
+- self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier) |
510 |
+- if self.nLocalSolidInterfaceNodes != 0: |
511 |
++ self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID() |
512 |
++ self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier) |
513 |
++ if self.nLocalSolidInterfaceNodes != 0: |
514 |
+ self.haveSolidInterface = True |
515 |
+ print('Number of interface solid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalSolidInterfaceNodes)) |
516 |
+- else: |
517 |
+- pass |
518 |
++ else: |
519 |
++ pass |
520 |
+ |
521 |
+ # --- Exchange information about processors on which the solvers are defined and where the interface nodes are lying --- |
522 |
+ if self.have_MPI == True: |
523 |
+@@ -266,18 +266,18 @@ |
524 |
+ else: |
525 |
+ sendBufSolidInterface = np.array(int(0)) |
526 |
+ rcvBufFluid = np.zeros(MPIsize, dtype = int) |
527 |
+- rcvBufSolid = np.zeros(MPIsize, dtype = int) |
528 |
++ rcvBufSolid = np.zeros(MPIsize, dtype = int) |
529 |
+ rcvBufFluidInterface = np.zeros(MPIsize, dtype = int) |
530 |
+- rcvBufSolidInterface = np.zeros(MPIsize, dtype = int) |
531 |
++ rcvBufSolidInterface = np.zeros(MPIsize, dtype = int) |
532 |
+ self.comm.Allgather(sendBufFluid, rcvBufFluid) |
533 |
+ self.comm.Allgather(sendBufSolid, rcvBufSolid) |
534 |
+ self.comm.Allgather(sendBufFluidInterface, rcvBufFluidInterface) |
535 |
+ self.comm.Allgather(sendBufSolidInterface, rcvBufSolidInterface) |
536 |
+ for iProc in range(MPIsize): |
537 |
+- if rcvBufFluid[iProc] == 1: |
538 |
++ if rcvBufFluid[iProc] == 1: |
539 |
+ self.fluidSolverProcessors.append(iProc) |
540 |
+ if rcvBufSolid[iProc] == 1: |
541 |
+- self.solidSolverProcessors.append(iProc) |
542 |
++ self.solidSolverProcessors.append(iProc) |
543 |
+ if rcvBufFluidInterface[iProc] == 1: |
544 |
+ self.fluidInterfaceProcessors.append(iProc) |
545 |
+ if rcvBufSolidInterface[iProc] == 1: |
546 |
+@@ -285,19 +285,19 @@ |
547 |
+ del sendBufFluid, sendBufSolid, rcvBufFluid, rcvBufSolid, sendBufFluidInterface, sendBufSolidInterface, rcvBufFluidInterface, rcvBufSolidInterface |
548 |
+ else: |
549 |
+ self.fluidSolverProcessors.append(0) |
550 |
+- self.solidSolverProcessors.append(0) |
551 |
++ self.solidSolverProcessors.append(0) |
552 |
+ self.fluidInterfaceProcessors.append(0) |
553 |
+ self.solidInterfaceProcessors.append(0) |
554 |
+ |
555 |
+- self.MPIBarrier() |
556 |
+- |
557 |
+- # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) --- |
558 |
++ self.MPIBarrier() |
559 |
++ |
560 |
++ # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) --- |
561 |
+ # Calculate the number of halo nodes on each partition |
562 |
+ self.nLocalFluidInterfaceHaloNode = 0 |
563 |
+- for iVertex in range(self.nLocalFluidInterfaceNodes): |
564 |
++ for iVertex in range(self.nLocalFluidInterfaceNodes): |
565 |
+ if FluidSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True: |
566 |
+ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) |
567 |
+- self.FluidHaloNodeList[GlobalIndex] = iVertex |
568 |
++ self.FluidHaloNodeList[GlobalIndex] = iVertex |
569 |
+ self.nLocalFluidInterfaceHaloNode += 1 |
570 |
+ # Calculate the number of physical (= not halo) nodes on each partition |
571 |
+ self.nLocalFluidInterfacePhysicalNodes = self.nLocalFluidInterfaceNodes - self.nLocalFluidInterfaceHaloNode |
572 |
+@@ -308,10 +308,10 @@ |
573 |
+ |
574 |
+ # Same thing for the solid part |
575 |
+ self.nLocalSolidInterfaceHaloNode = 0 |
576 |
+- #for iVertex in range(self.nLocalSolidInterfaceNodes): |
577 |
++ #for iVertex in range(self.nLocalSolidInterfaceNodes): |
578 |
+ #if SoliddSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True: |
579 |
+ #GlobalIndex = SolidSolver.GetVertexGlobalIndex(self.solidInterfaceIdentifier, iVertex) |
580 |
+- #self.SolidHaloNodeList[GlobalIndex] = iVertex |
581 |
++ #self.SolidHaloNodeList[GlobalIndex] = iVertex |
582 |
+ #self.nLocalSolidInterfaceHaloNode += 1 |
583 |
+ self.nLocalSolidInterfacePhysicalNodes = self.nLocalSolidInterfaceNodes - self.nLocalSolidInterfaceHaloNode |
584 |
+ if self.have_MPI == True: |
585 |
+@@ -323,11 +323,11 @@ |
586 |
+ # --- Calculate the total number of nodes (with and without halo) at the fluid interface (sum over all the partitions) and broadcast the number accross all processors --- |
587 |
+ sendBuffHalo = np.array(int(self.nLocalFluidInterfaceNodes)) |
588 |
+ sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes)) |
589 |
+- rcvBuffHalo = np.zeros(1, dtype=int) |
590 |
++ rcvBuffHalo = np.zeros(1, dtype=int) |
591 |
+ rcvBuffPhysical = np.zeros(1, dtype=int) |
592 |
+- if self.have_MPI == True: |
593 |
++ if self.have_MPI == True: |
594 |
+ self.comm.barrier() |
595 |
+- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM) |
596 |
++ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM) |
597 |
+ self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM) |
598 |
+ self.nFluidInterfaceNodes = rcvBuffHalo[0] |
599 |
+ self.nFluidInterfacePhysicalNodes = rcvBuffPhysical[0] |
600 |
+@@ -339,11 +339,11 @@ |
601 |
+ # Same thing for the solid part |
602 |
+ sendBuffHalo = np.array(int(self.nLocalSolidInterfaceNodes)) |
603 |
+ sendBuffPhysical = np.array(int(self.nLocalSolidInterfacePhysicalNodes)) |
604 |
+- rcvBuffHalo = np.zeros(1, dtype=int) |
605 |
++ rcvBuffHalo = np.zeros(1, dtype=int) |
606 |
+ rcvBuffPhysical = np.zeros(1, dtype=int) |
607 |
+ if self.have_MPI == True: |
608 |
+- self.comm.barrier() |
609 |
+- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM) |
610 |
++ self.comm.barrier() |
611 |
++ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM) |
612 |
+ self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM) |
613 |
+ self.nSolidInterfaceNodes = rcvBuffHalo[0] |
614 |
+ self.nSolidInterfacePhysicalNodes = rcvBuffPhysical[0] |
615 |
+@@ -375,7 +375,7 @@ |
616 |
+ if myid in self.fluidInterfaceProcessors: |
617 |
+ globalIndexStart = 0 |
618 |
+ for iProc in range(myid): |
619 |
+- globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc] |
620 |
++ globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc] |
621 |
+ globalIndexStop = globalIndexStart + self.nLocalFluidInterfacePhysicalNodes-1 |
622 |
+ else: |
623 |
+ globalIndexStart = 0 |
624 |
+@@ -387,8 +387,8 @@ |
625 |
+ temp[0] = [0,self.nLocalFluidInterfacePhysicalNodes-1] |
626 |
+ self.fluidGlobalIndexRange = list() |
627 |
+ self.fluidGlobalIndexRange.append(temp) |
628 |
+- |
629 |
+- # Same thing for the solid part |
630 |
++ |
631 |
++ # Same thing for the solid part |
632 |
+ if self.have_MPI == True: |
633 |
+ if myid in self.solidInterfaceProcessors: |
634 |
+ globalIndexStart = 0 |
635 |
+@@ -404,14 +404,14 @@ |
636 |
+ temp = {} |
637 |
+ temp[0] = [0,self.nSolidInterfacePhysicalNodes-1] |
638 |
+ self.solidGlobalIndexRange = list() |
639 |
+- self.solidGlobalIndexRange.append(temp) |
640 |
++ self.solidGlobalIndexRange.append(temp) |
641 |
+ |
642 |
+- self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes)) |
643 |
+- self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes)) |
644 |
++ self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes)) |
645 |
++ self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes)) |
646 |
+ self.MPIPrint('Total number of fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes)) |
647 |
+ self.MPIPrint('Total number of solid interface nodes : {}'.format(self.nSolidInterfacePhysicalNodes)) |
648 |
+ |
649 |
+- self.MPIBarrier() |
650 |
++ self.MPIBarrier() |
651 |
+ |
652 |
+ # --- Create all the PETSc vectors required for parallel communication and parallel mesh mapping/interpolation (working for serial too) --- |
653 |
+ if self.have_MPI == True: |
654 |
+@@ -432,8 +432,8 @@ |
655 |
+ self.solidInterface_array_DispY.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
656 |
+ self.solidInterface_array_DispZ.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
657 |
+ self.solidInterface_array_DispX.set(0.0) |
658 |
+- self.solidInterface_array_DispY.set(0.0) |
659 |
+- self.solidInterface_array_DispZ.set(0.0) |
660 |
++ self.solidInterface_array_DispY.set(0.0) |
661 |
++ self.solidInterface_array_DispZ.set(0.0) |
662 |
+ |
663 |
+ if self.have_MPI == True: |
664 |
+ self.fluidInterface_array_DispX = PETSc.Vec().create(self.comm) |
665 |
+@@ -536,30 +536,30 @@ |
666 |
+ self.solidInterfaceResidualnM1_array_Z.set(0.0) |
667 |
+ |
668 |
+ def interfaceMapping(self,FluidSolver, SolidSolver, FSI_config): |
669 |
+- """ |
670 |
+- Creates the one-to-one mapping between interfaces in case of matching meshes. |
671 |
+- Creates the interpolation rules between interfaces in case of non-matching meshes. |
672 |
+- """ |
673 |
+- if self.have_MPI == True: |
674 |
++ """ |
675 |
++ Creates the one-to-one mapping between interfaces in case of matching meshes. |
676 |
++ Creates the interpolation rules between interfaces in case of non-matching meshes. |
677 |
++ """ |
678 |
++ if self.have_MPI == True: |
679 |
+ myid = self.comm.Get_rank() |
680 |
+- MPIsize = self.comm.Get_size() |
681 |
++ MPIsize = self.comm.Get_size() |
682 |
+ else: |
683 |
+ myid = 0 |
684 |
+ MPIsize = 1 |
685 |
+ |
686 |
+- # --- Get the fluid interface from fluid solver on each partition --- |
687 |
+- GlobalIndex = int() |
688 |
++ # --- Get the fluid interface from fluid solver on each partition --- |
689 |
++ GlobalIndex = int() |
690 |
+ localIndex = 0 |
691 |
+ fluidIndexing_temp = {} |
692 |
+ self.localFluidInterface_array_X_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes)) |
693 |
+ self.localFluidInterface_array_Y_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes)) |
694 |
+ self.localFluidInterface_array_Z_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes)) |
695 |
+ for iVertex in range(self.nLocalFluidInterfaceNodes): |
696 |
+- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) |
697 |
+- posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex) |
698 |
+- posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex) |
699 |
+- posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex) |
700 |
+- if GlobalIndex in self.FluidHaloNodeList[myid].keys(): |
701 |
++ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) |
702 |
++ posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex) |
703 |
++ posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex) |
704 |
++ posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex) |
705 |
++ if GlobalIndex in self.FluidHaloNodeList[myid].keys(): |
706 |
+ self.haloNodesPositionsInit[GlobalIndex] = (posx, posy, posz) |
707 |
+ else: |
708 |
+ fluidIndexing_temp[GlobalIndex] = self.__getGlobalIndex('fluid', myid, localIndex) |
709 |
+@@ -576,17 +576,17 @@ |
710 |
+ self.fluidIndexing = fluidIndexing_temp.copy() |
711 |
+ del fluidIndexing_temp |
712 |
+ |
713 |
+- # --- Get the solid interface from solid solver on each partition --- |
714 |
++ # --- Get the solid interface from solid solver on each partition --- |
715 |
+ localIndex = 0 |
716 |
+ solidIndexing_temp = {} |
717 |
+- self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes) |
718 |
++ self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes) |
719 |
+ self.localSolidInterface_array_Y = np.zeros(self.nLocalSolidInterfaceNodes) |
720 |
+ self.localSolidInterface_array_Z = np.zeros(self.nLocalSolidInterfaceNodes) |
721 |
+ for iVertex in range(self.nLocalSolidInterfaceNodes): |
722 |
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex) |
723 |
+- posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex) |
724 |
+- posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex) |
725 |
+- posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex) |
726 |
++ posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex) |
727 |
++ posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex) |
728 |
++ posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex) |
729 |
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys(): |
730 |
+ pass |
731 |
+ else: |
732 |
+@@ -605,14 +605,14 @@ |
733 |
+ del solidIndexing_temp |
734 |
+ |
735 |
+ |
736 |
+- # --- Create the PETSc parallel interpolation matrix --- |
737 |
++ # --- Create the PETSc parallel interpolation matrix --- |
738 |
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'): |
739 |
+ if self.have_MPI == True: |
740 |
+ self.MappingMatrixA = PETSc.Mat().create(self.comm) |
741 |
+ self.MappingMatrixB = PETSc.Mat().create(self.comm) |
742 |
+ self.MappingMatrixA_T = PETSc.Mat().create(self.comm) |
743 |
+ self.MappingMatrixB_T = PETSc.Mat().create(self.comm) |
744 |
+- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' : |
745 |
++ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' : |
746 |
+ self.MappingMatrixA.setType('mpiaij') |
747 |
+ self.MappingMatrixB.setType('mpiaij') |
748 |
+ self.MappingMatrixA_T.setType('mpiaij') |
749 |
+@@ -627,7 +627,7 @@ |
750 |
+ self.MappingMatrixB = PETSc.Mat().create() |
751 |
+ self.MappingMatrixA_T = PETSc.Mat().create() |
752 |
+ self.MappingMatrixB_T = PETSc.Mat().create() |
753 |
+- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' : |
754 |
++ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' : |
755 |
+ self.MappingMatrixA.setType('aij') |
756 |
+ self.MappingMatrixB.setType('aij') |
757 |
+ self.MappingMatrixA_T.setType('aij') |
758 |
+@@ -637,16 +637,16 @@ |
759 |
+ self.MappingMatrixB.setType('aij') |
760 |
+ self.MappingMatrixA_T.setType('aij') |
761 |
+ self.MappingMatrixB_T.setType('aij') |
762 |
+- self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF)) |
763 |
++ self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF)) |
764 |
+ self.MappingMatrixA.setUp() |
765 |
+ self.MappingMatrixA.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False) |
766 |
+- self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF)) |
767 |
++ self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF)) |
768 |
+ self.MappingMatrixB.setUp() |
769 |
+ self.MappingMatrixB.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False) |
770 |
+- self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF)) |
771 |
++ self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF)) |
772 |
+ self.MappingMatrixA_T.setUp() |
773 |
+ self.MappingMatrixA_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False) |
774 |
+- self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes)) |
775 |
++ self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes)) |
776 |
+ self.MappingMatrixB_T.setUp() |
777 |
+ self.MappingMatrixB_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False) |
778 |
+ else: |
779 |
+@@ -660,21 +660,21 @@ |
780 |
+ self.MappingMatrix_T = PETSc.Mat().create() |
781 |
+ self.MappingMatrix.setType('aij') |
782 |
+ self.MappingMatrix_T.setType('aij') |
783 |
+- self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes)) |
784 |
++ self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes)) |
785 |
+ self.MappingMatrix.setUp() |
786 |
+ self.MappingMatrix.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False) |
787 |
+- self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes)) |
788 |
++ self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes)) |
789 |
+ self.MappingMatrix_T.setUp() |
790 |
+ self.MappingMatrix_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False) |
791 |
+- |
792 |
+- |
793 |
++ |
794 |
++ |
795 |
+ # --- Fill the interpolation matrix in parallel (working in serial too) --- |
796 |
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'): |
797 |
+ self.MPIPrint('Building interpolation matrices...') |
798 |
+ if self.have_MPI == True: |
799 |
+ for iProc in self.solidInterfaceProcessors: |
800 |
+ if myid == iProc: |
801 |
+- for jProc in self.solidInterfaceProcessors: |
802 |
++ for jProc in self.solidInterfaceProcessors: |
803 |
+ self.comm.Send(self.localSolidInterface_array_X, dest=jProc, tag=1) |
804 |
+ self.comm.Send(self.localSolidInterface_array_Y, dest=jProc, tag=2) |
805 |
+ self.comm.Send(self.localSolidInterface_array_Z, dest=jProc, tag=3) |
806 |
+@@ -726,7 +726,7 @@ |
807 |
+ self.TPSMeshMapping_B(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc) |
808 |
+ else: |
809 |
+ self.NearestNeighboorMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc) |
810 |
+- else: |
811 |
++ else: |
812 |
+ self.matchingMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc) |
813 |
+ else: |
814 |
+ if FSI_config['MATCHING_MESH'] == 'NO': |
815 |
+@@ -735,10 +735,10 @@ |
816 |
+ elif FSI_config['MESH_INTERP_METHOD'] == 'TPS' : |
817 |
+ self.TPSMeshMapping_B(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0) |
818 |
+ else: |
819 |
+- self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0) |
820 |
+- else: |
821 |
++ self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0) |
822 |
++ else: |
823 |
+ self.matchingMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0) |
824 |
+- |
825 |
++ |
826 |
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'): |
827 |
+ self.MappingMatrixB.assemblyBegin() |
828 |
+ self.MappingMatrixB.assemblyEnd() |
829 |
+@@ -751,9 +751,9 @@ |
830 |
+ self.MappingMatrix_T.assemblyBegin() |
831 |
+ self.MappingMatrix_T.assemblyEnd() |
832 |
+ self.MPIPrint("Interpolation matrix is built.") |
833 |
+- |
834 |
++ |
835 |
+ self.MPIBarrier() |
836 |
+- |
837 |
++ |
838 |
+ del self.localSolidInterface_array_X |
839 |
+ del self.localSolidInterface_array_Y |
840 |
+ del self.localSolidInterface_array_Z |
841 |
+@@ -768,20 +768,20 @@ |
842 |
+ myid = 0 |
843 |
+ |
844 |
+ # --- Instantiate the spatial indexing --- |
845 |
+- prop_index = index.Property() |
846 |
+- prop_index.dimension = self.nDim |
847 |
+- SolidSpatialTree = index.Index(properties=prop_index) |
848 |
+- |
849 |
++ prop_index = index.Property() |
850 |
++ prop_index.dimension = self.nDim |
851 |
++ SolidSpatialTree = index.Index(properties=prop_index) |
852 |
++ |
853 |
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0] |
854 |
+ |
855 |
+ for jVertex in range(nSolidNodes): |
856 |
+ posX = solidInterfaceBuffRcv_X[jVertex] |
857 |
+ posY = solidInterfaceBuffRcv_Y[jVertex] |
858 |
+ posZ = solidInterfaceBuffRcv_Z[jVertex] |
859 |
+- if self.nDim == 2 : |
860 |
+- SolidSpatialTree.add(jVertex, (posX, posY)) |
861 |
+- else : |
862 |
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ)) |
863 |
++ if self.nDim == 2 : |
864 |
++ SolidSpatialTree.add(jVertex, (posX, posY)) |
865 |
++ else : |
866 |
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ)) |
867 |
+ |
868 |
+ if self.nFluidInterfacePhysicalNodes != self.nSolidInterfacePhysicalNodes: |
869 |
+ raise Exception("Fluid and solid interface must have the same number of nodes for matching meshes ! ") |
870 |
+@@ -822,20 +822,20 @@ |
871 |
+ myid = 0 |
872 |
+ |
873 |
+ # --- Instantiate the spatial indexing --- |
874 |
+- prop_index = index.Property() |
875 |
+- prop_index.dimension = self.nDim |
876 |
+- SolidSpatialTree = index.Index(properties=prop_index) |
877 |
+- |
878 |
++ prop_index = index.Property() |
879 |
++ prop_index.dimension = self.nDim |
880 |
++ SolidSpatialTree = index.Index(properties=prop_index) |
881 |
++ |
882 |
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0] |
883 |
+ |
884 |
+ for jVertex in range(nSolidNodes): |
885 |
+ posX = solidInterfaceBuffRcv_X[jVertex] |
886 |
+ posY = solidInterfaceBuffRcv_Y[jVertex] |
887 |
+ posZ = solidInterfaceBuffRcv_Z[jVertex] |
888 |
+- if self.nDim == 2 : |
889 |
+- SolidSpatialTree.add(jVertex, (posX, posY)) |
890 |
+- else : |
891 |
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ)) |
892 |
++ if self.nDim == 2 : |
893 |
++ SolidSpatialTree.add(jVertex, (posX, posY)) |
894 |
++ else : |
895 |
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ)) |
896 |
+ |
897 |
+ # --- For each fluid interface node, find the nearest solid interface node and fill the boolean mapping matrix --- |
898 |
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes): |
899 |
+@@ -863,20 +863,20 @@ |
900 |
+ myid = 0 |
901 |
+ |
902 |
+ # --- Instantiate the spatial indexing --- |
903 |
+- prop_index = index.Property() |
904 |
+- prop_index.dimension = self.nDim |
905 |
+- SolidSpatialTree = index.Index(properties=prop_index) |
906 |
+- |
907 |
++ prop_index = index.Property() |
908 |
++ prop_index.dimension = self.nDim |
909 |
++ SolidSpatialTree = index.Index(properties=prop_index) |
910 |
++ |
911 |
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0] |
912 |
+ |
913 |
+ for jVertex in range(nSolidNodes): |
914 |
+ posX = solidInterfaceBuffRcv_X[jVertex] |
915 |
+ posY = solidInterfaceBuffRcv_Y[jVertex] |
916 |
+ posZ = solidInterfaceBuffRcv_Z[jVertex] |
917 |
+- if self.nDim == 2 : |
918 |
+- SolidSpatialTree.add(jVertex, (posX, posY)) |
919 |
+- else : |
920 |
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ)) |
921 |
++ if self.nDim == 2 : |
922 |
++ SolidSpatialTree.add(jVertex, (posX, posY)) |
923 |
++ else : |
924 |
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ)) |
925 |
+ |
926 |
+ for iVertexSolid in range(self.nLocalSolidInterfaceNodes): |
927 |
+ posX = self.localSolidInterface_array_X[iVertexSolid] |
928 |
+@@ -915,20 +915,20 @@ |
929 |
+ myid = 0 |
930 |
+ |
931 |
+ # --- Instantiate the spatial indexing --- |
932 |
+- prop_index = index.Property() |
933 |
+- prop_index.dimension = self.nDim |
934 |
+- SolidSpatialTree = index.Index(properties=prop_index) |
935 |
+- |
936 |
++ prop_index = index.Property() |
937 |
++ prop_index.dimension = self.nDim |
938 |
++ SolidSpatialTree = index.Index(properties=prop_index) |
939 |
++ |
940 |
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0] |
941 |
+ |
942 |
+ for jVertex in range(nSolidNodes): |
943 |
+ posX = solidInterfaceBuffRcv_X[jVertex] |
944 |
+ posY = solidInterfaceBuffRcv_Y[jVertex] |
945 |
+ posZ = solidInterfaceBuffRcv_Z[jVertex] |
946 |
+- if self.nDim == 2 : |
947 |
+- SolidSpatialTree.add(jVertex, (posX, posY)) |
948 |
+- else : |
949 |
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ)) |
950 |
++ if self.nDim == 2 : |
951 |
++ SolidSpatialTree.add(jVertex, (posX, posY)) |
952 |
++ else : |
953 |
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ)) |
954 |
+ |
955 |
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes): |
956 |
+ posX = self.localFluidInterface_array_X_init[iVertexFluid] |
957 |
+@@ -965,7 +965,7 @@ |
958 |
+ myid = self.comm.Get_rank() |
959 |
+ else: |
960 |
+ myid = 0 |
961 |
+- |
962 |
++ |
963 |
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0] |
964 |
+ |
965 |
+ for iVertexSolid in range(self.nLocalSolidInterfaceNodes): |
966 |
+@@ -999,7 +999,7 @@ |
967 |
+ myid = self.comm.Get_rank() |
968 |
+ else: |
969 |
+ myid = 0 |
970 |
+- |
971 |
++ |
972 |
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0] |
973 |
+ |
974 |
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes): |
975 |
+@@ -1031,7 +1031,7 @@ |
976 |
+ """ |
977 |
+ phi = 0.0 |
978 |
+ eps = distance/rad |
979 |
+- |
980 |
++ |
981 |
+ if eps < 1: |
982 |
+ phi = ((1.0-eps)**4)*(4.0*eps+1.0) |
983 |
+ else: |
984 |
+@@ -1044,20 +1044,20 @@ |
985 |
+ Description |
986 |
+ """ |
987 |
+ phi = 0.0 |
988 |
+- |
989 |
++ |
990 |
+ if distance > 0.0: |
991 |
+ phi = (distance**2)*np.log10(distance) |
992 |
+ else: |
993 |
+ phi = 0.0 |
994 |
+ |
995 |
+- return phi |
996 |
++ return phi |
997 |
+ |
998 |
+ |
999 |
+ def interpolateSolidPositionOnFluidMesh(self, FSI_config): |
1000 |
+- """ |
1001 |
+- Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh. |
1002 |
+- """ |
1003 |
+- if self.have_MPI == True: |
1004 |
++ """ |
1005 |
++ Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh. |
1006 |
++ """ |
1007 |
++ if self.have_MPI == True: |
1008 |
+ myid = self.comm.Get_rank() |
1009 |
+ MPIsize = self.comm.Get_size() |
1010 |
+ else: |
1011 |
+@@ -1110,12 +1110,12 @@ |
1012 |
+ del gamma_array_DispY |
1013 |
+ del gamma_array_DispZ |
1014 |
+ del KSP_solver |
1015 |
+- else: |
1016 |
++ else: |
1017 |
+ self.MappingMatrix.mult(self.solidInterface_array_DispX, self.fluidInterface_array_DispX) |
1018 |
+ self.MappingMatrix.mult(self.solidInterface_array_DispY, self.fluidInterface_array_DispY) |
1019 |
+ self.MappingMatrix.mult(self.solidInterface_array_DispZ, self.fluidInterface_array_DispZ) |
1020 |
+ |
1021 |
+- # --- Checking conservation --- |
1022 |
++ # --- Checking conservation --- |
1023 |
+ WSX = self.solidLoads_array_X.dot(self.solidInterface_array_DispX) |
1024 |
+ WSY = self.solidLoads_array_Y.dot(self.solidInterface_array_DispY) |
1025 |
+ WSZ = self.solidLoads_array_Z.dot(self.solidInterface_array_DispZ) |
1026 |
+@@ -1124,11 +1124,11 @@ |
1027 |
+ WFY = self.fluidLoads_array_Y.dot(self.fluidInterface_array_DispY) |
1028 |
+ WFZ = self.fluidLoads_array_Z.dot(self.fluidInterface_array_DispZ) |
1029 |
+ |
1030 |
+- self.MPIPrint("Checking f/s interface conservation...") |
1031 |
+- self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ)) |
1032 |
+- self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ)) |
1033 |
++ self.MPIPrint("Checking f/s interface conservation...") |
1034 |
++ self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ)) |
1035 |
++ self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ)) |
1036 |
++ |
1037 |
+ |
1038 |
+- |
1039 |
+ # --- Redistribute the interpolated fluid interface according to the partitions that own the fluid interface --- |
1040 |
+ # Gather the fluid interface on the master process |
1041 |
+ if self.have_MPI == True: |
1042 |
+@@ -1156,7 +1156,7 @@ |
1043 |
+ displ = tuple(displ) |
1044 |
+ |
1045 |
+ del sendBuffNumber, rcvBuffNumber |
1046 |
+- |
1047 |
++ |
1048 |
+ #print("DEBUG MESSAGE From proc {}, counts = {}".format(myid, counts)) |
1049 |
+ #print("DEBUG MESSAGE From proc {}, displ = {}".format(myid, displ)) |
1050 |
+ |
1051 |
+@@ -1213,18 +1213,18 @@ |
1052 |
+ del sendBuff |
1053 |
+ |
1054 |
+ def interpolateFluidLoadsOnSolidMesh(self, FSI_config): |
1055 |
+- """ |
1056 |
+- Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh. |
1057 |
+- """ |
1058 |
+- if self.have_MPI == True: |
1059 |
++ """ |
1060 |
++ Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh. |
1061 |
++ """ |
1062 |
++ if self.have_MPI == True: |
1063 |
+ myid = self.comm.Get_rank() |
1064 |
+ MPIsize = self.comm.Get_size() |
1065 |
+ else: |
1066 |
+ myid = 0 |
1067 |
+ MPIsize = 1 |
1068 |
+- |
1069 |
++ |
1070 |
+ # --- Interpolate (or map) in parallel the fluid interface loads on the solid interface --- |
1071 |
+- #self.MappingMatrix.transpose() |
1072 |
++ #self.MappingMatrix.transpose() |
1073 |
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'): |
1074 |
+ if self.have_MPI == True: |
1075 |
+ gamma_array_LoadX = PETSc.Vec().create(self.comm) |
1076 |
+@@ -1280,10 +1280,10 @@ |
1077 |
+ self.solidLoads_array_X_recon = None |
1078 |
+ self.solidLoads_array_Y_recon = None |
1079 |
+ self.solidLoads_array_Z_recon = None |
1080 |
+- if myid == self.rootProcess: |
1081 |
+- self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1082 |
+- self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1083 |
+- self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1084 |
++ if myid == self.rootProcess: |
1085 |
++ self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1086 |
++ self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1087 |
++ self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1088 |
+ myNumberOfNodes = self.solidLoads_array_X.getArray().shape[0] |
1089 |
+ sendBuffNumber = np.array([myNumberOfNodes], dtype=int) |
1090 |
+ rcvBuffNumber = np.zeros(MPIsize, dtype=int) |
1091 |
+@@ -1293,9 +1293,9 @@ |
1092 |
+ displ = np.zeros(MPIsize, dtype=int) |
1093 |
+ for ii in range(rcvBuffNumber.shape[0]): |
1094 |
+ displ[ii] = rcvBuffNumber[0:ii].sum() |
1095 |
+- displ = tuple(displ) |
1096 |
++ displ = tuple(displ) |
1097 |
+ |
1098 |
+- del sendBuffNumber, rcvBuffNumber |
1099 |
++ del sendBuffNumber, rcvBuffNumber |
1100 |
+ |
1101 |
+ self.comm.Gatherv(self.solidLoads_array_X.getArray(), [self.solidLoads_array_X_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess) |
1102 |
+ self.comm.Gatherv(self.solidLoads_array_Y.getArray(), [self.solidLoads_array_Y_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess) |
1103 |
+@@ -1336,25 +1336,25 @@ |
1104 |
+ |
1105 |
+ |
1106 |
+ '''def getSolidInterfacePosition(self, SolidSolver): |
1107 |
+- """ |
1108 |
+- Gets the current solid interface position from the solid solver. |
1109 |
+- """ |
1110 |
++ """ |
1111 |
++ Gets the current solid interface position from the solid solver. |
1112 |
++ """ |
1113 |
+ if self.have_MPI == True: |
1114 |
+- myid = self.comm.Get_rank() |
1115 |
++ myid = self.comm.Get_rank() |
1116 |
+ else: |
1117 |
+ myid = 0 |
1118 |
+- |
1119 |
++ |
1120 |
+ # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector --- |
1121 |
+ GlobalIndex = int() |
1122 |
+ localIndex = 0 |
1123 |
+- for iVertex in range(self.nLocalSolidInterfaceNodes): |
1124 |
++ for iVertex in range(self.nLocalSolidInterfaceNodes): |
1125 |
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex) |
1126 |
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys(): |
1127 |
+ pass |
1128 |
+ else: |
1129 |
+- newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex) |
1130 |
+- newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex) |
1131 |
+- newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex) |
1132 |
++ newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex) |
1133 |
++ newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex) |
1134 |
++ newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex) |
1135 |
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex) |
1136 |
+ self.solidInterface_array_X.setValues([iGlobalVertex],newPosx) |
1137 |
+ self.solidInterface_array_Y.setValues([iGlobalVertex],newPosy) |
1138 |
+@@ -1375,25 +1375,25 @@ |
1139 |
+ #print("DEBUG MESSAGE From PROC {} : array_X = {}".format(myid, self.solidInterface_array_X.getArray()))''' |
1140 |
+ |
1141 |
+ def getSolidInterfaceDisplacement(self, SolidSolver): |
1142 |
+- """ |
1143 |
+- Gets the current solid interface position from the solid solver. |
1144 |
+- """ |
1145 |
++ """ |
1146 |
++ Gets the current solid interface position from the solid solver. |
1147 |
++ """ |
1148 |
+ if self.have_MPI == True: |
1149 |
+- myid = self.comm.Get_rank() |
1150 |
++ myid = self.comm.Get_rank() |
1151 |
+ else: |
1152 |
+ myid = 0 |
1153 |
+- |
1154 |
++ |
1155 |
+ # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector --- |
1156 |
+ GlobalIndex = int() |
1157 |
+ localIndex = 0 |
1158 |
+- for iVertex in range(self.nLocalSolidInterfaceNodes): |
1159 |
++ for iVertex in range(self.nLocalSolidInterfaceNodes): |
1160 |
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex) |
1161 |
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys(): |
1162 |
+ pass |
1163 |
+ else: |
1164 |
+- newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex) |
1165 |
+- newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex) |
1166 |
+- newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex) |
1167 |
++ newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex) |
1168 |
++ newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex) |
1169 |
++ newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex) |
1170 |
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex) |
1171 |
+ self.solidInterface_array_DispX.setValues([iGlobalVertex],newDispx) |
1172 |
+ self.solidInterface_array_DispY.setValues([iGlobalVertex],newDispy) |
1173 |
+@@ -1408,9 +1408,9 @@ |
1174 |
+ self.solidInterface_array_DispZ.assemblyEnd() |
1175 |
+ |
1176 |
+ def getFluidInterfaceNodalForce(self, FSI_config, FluidSolver): |
1177 |
+- """ |
1178 |
+- Gets the fluid interface loads from the fluid solver. |
1179 |
+- """ |
1180 |
++ """ |
1181 |
++ Gets the fluid interface loads from the fluid solver. |
1182 |
++ """ |
1183 |
+ if self.have_MPI == True: |
1184 |
+ myid = self.comm.Get_rank() |
1185 |
+ else: |
1186 |
+@@ -1422,17 +1422,17 @@ |
1187 |
+ FZ = 0.0 |
1188 |
+ |
1189 |
+ # --- Get the fluid interface loads from the fluid solver and directly fill the corresponding PETSc vector --- |
1190 |
+- for iVertex in range(self.nLocalFluidInterfaceNodes): |
1191 |
+- halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces |
1192 |
+- if halo==False: |
1193 |
+- if FSI_config['CSD_SOLVER'] == 'GETDP': |
1194 |
+- newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex) |
1195 |
+- newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex) |
1196 |
+- newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex) |
1197 |
+- else: |
1198 |
+- newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex) |
1199 |
+- newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex) |
1200 |
+- newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex) |
1201 |
++ for iVertex in range(self.nLocalFluidInterfaceNodes): |
1202 |
++ halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces |
1203 |
++ if halo==False: |
1204 |
++ if FSI_config['CSD_SOLVER'] == 'GETDP': |
1205 |
++ newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex) |
1206 |
++ newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex) |
1207 |
++ newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex) |
1208 |
++ else: |
1209 |
++ newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex) |
1210 |
++ newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex) |
1211 |
++ newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex) |
1212 |
+ iGlobalVertex = self.__getGlobalIndex('fluid', myid, localIndex) |
1213 |
+ self.fluidLoads_array_X.setValues([iGlobalVertex], newFx) |
1214 |
+ self.fluidLoads_array_Y.setValues([iGlobalVertex], newFy) |
1215 |
+@@ -1457,22 +1457,22 @@ |
1216 |
+ FX_b = self.fluidLoads_array_X.sum() |
1217 |
+ FY_b = self.fluidLoads_array_Y.sum() |
1218 |
+ FZ_b = self.fluidLoads_array_Z.sum() |
1219 |
+- |
1220 |
++ |
1221 |
+ |
1222 |
+ def setFluidInterfaceVarCoord(self, FluidSolver): |
1223 |
+- """ |
1224 |
+- Communicate the change of coordinates of the fluid interface to the fluid solver. |
1225 |
+- Prepare the fluid solver for mesh deformation. |
1226 |
+- """ |
1227 |
++ """ |
1228 |
++ Communicate the change of coordinates of the fluid interface to the fluid solver. |
1229 |
++ Prepare the fluid solver for mesh deformation. |
1230 |
++ """ |
1231 |
+ if self.have_MPI == True: |
1232 |
+- myid = self.comm.Get_rank() |
1233 |
++ myid = self.comm.Get_rank() |
1234 |
+ else: |
1235 |
+ myid = 0 |
1236 |
+- |
1237 |
++ |
1238 |
+ # --- Send the new fluid interface position to the fluid solver (on each partition, halo nodes included) --- |
1239 |
+ localIndex = 0 |
1240 |
+- for iVertex in range(self.nLocalFluidInterfaceNodes): |
1241 |
+- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) |
1242 |
++ for iVertex in range(self.nLocalFluidInterfaceNodes): |
1243 |
++ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) |
1244 |
+ if GlobalIndex in self.FluidHaloNodeList[myid].keys(): |
1245 |
+ posX0, posY0, posZ0 = self.haloNodesPositionsInit[GlobalIndex] |
1246 |
+ DispX, DispY, DispZ = self.haloNodesDisplacements[GlobalIndex] |
1247 |
+@@ -1491,32 +1491,32 @@ |
1248 |
+ FluidSolver.SetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex, posZ) |
1249 |
+ localIndex += 1 |
1250 |
+ # Prepares the mesh deformation in the fluid solver |
1251 |
+- nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex) |
1252 |
++ nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex) |
1253 |
++ |
1254 |
+ |
1255 |
+- |
1256 |
+ def setSolidInterfaceLoads(self, SolidSolver, FSI_config, time): |
1257 |
+- """ |
1258 |
+- Communicates the new solid interface loads to the solid solver. |
1259 |
+- In case of rigid body motion, calculates the new resultant forces (lift, drag, ...). |
1260 |
+- """ |
1261 |
++ """ |
1262 |
++ Communicates the new solid interface loads to the solid solver. |
1263 |
++ In case of rigid body motion, calculates the new resultant forces (lift, drag, ...). |
1264 |
++ """ |
1265 |
+ if self.have_MPI == True: |
1266 |
+- myid = self.comm.Get_rank() |
1267 |
++ myid = self.comm.Get_rank() |
1268 |
+ else: |
1269 |
+ myid = 0 |
1270 |
+ |
1271 |
+- FY = 0.0 # solid-side resultant forces |
1272 |
++ FY = 0.0 # solid-side resultant forces |
1273 |
+ FX = 0.0 |
1274 |
+ FZ = 0.0 |
1275 |
+- FFX = 0.0 # fluid-side resultant forces |
1276 |
+- FFY = 0.0 |
1277 |
+- FFZ = 0.0 |
1278 |
++ FFX = 0.0 # fluid-side resultant forces |
1279 |
++ FFY = 0.0 |
1280 |
++ FFZ = 0.0 |
1281 |
+ |
1282 |
+ # --- Check for total force conservation after interpolation |
1283 |
+ FFX = self.fluidLoads_array_X.sum() |
1284 |
+ FFY = self.fluidLoads_array_Y.sum() |
1285 |
+ FFZ = self.fluidLoads_array_Z.sum() |
1286 |
+ |
1287 |
+- |
1288 |
++ |
1289 |
+ for iVertex in range(self.nLocalSolidInterfaceNodes): |
1290 |
+ FX += self.localSolidLoads_array_X[iVertex] |
1291 |
+ FY += self.localSolidLoads_array_Y[iVertex] |
1292 |
+@@ -1527,9 +1527,9 @@ |
1293 |
+ FY = self.comm.allreduce(FY) |
1294 |
+ FZ = self.comm.allreduce(FZ) |
1295 |
+ |
1296 |
+- self.MPIPrint("Checking f/s interface total force...") |
1297 |
+- self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ)) |
1298 |
+- self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ)) |
1299 |
++ self.MPIPrint("Checking f/s interface total force...") |
1300 |
++ self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ)) |
1301 |
++ self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ)) |
1302 |
+ |
1303 |
+ # --- Send the new solid interface loads to the solid solver (on each partition, halo nodes included) --- |
1304 |
+ GlobalIndex = int() |
1305 |
+@@ -1541,25 +1541,25 @@ |
1306 |
+ pass |
1307 |
+ else: |
1308 |
+ Fx = self.localSolidLoads_array_X[localIndex] |
1309 |
+- Fy = self.localSolidLoads_array_Y[localIndex] |
1310 |
+- Fz = self.localSolidLoads_array_Z[localIndex] |
1311 |
++ Fy = self.localSolidLoads_array_Y[localIndex] |
1312 |
++ Fz = self.localSolidLoads_array_Z[localIndex] |
1313 |
+ SolidSolver.applyload(iVertex, Fx, Fy, Fz, time) |
1314 |
+ localIndex += 1 |
1315 |
+- if FSI_config['CSD_SOLVER'] == 'NATIVE': |
1316 |
++ if FSI_config['CSD_SOLVER'] == 'NATIVE': |
1317 |
+ SolidSolver.setGeneralisedForce() |
1318 |
+- SolidSolver.setGeneralisedMoment() |
1319 |
++ SolidSolver.setGeneralisedMoment() |
1320 |
+ |
1321 |
+ def computeSolidInterfaceResidual(self, SolidSolver): |
1322 |
+- """ |
1323 |
+- Computes the solid interface FSI displacement residual. |
1324 |
+- """ |
1325 |
++ """ |
1326 |
++ Computes the solid interface FSI displacement residual. |
1327 |
++ """ |
1328 |
+ |
1329 |
+ if self.have_MPI == True: |
1330 |
+- myid = self.comm.Get_rank() |
1331 |
++ myid = self.comm.Get_rank() |
1332 |
+ else: |
1333 |
+ myid = 0 |
1334 |
+ |
1335 |
+- normInterfaceResidualSquare = 0.0 |
1336 |
++ normInterfaceResidualSquare = 0.0 |
1337 |
+ |
1338 |
+ # --- Create and fill the PETSc vector for the predicted solid interface position (predicted by the solid computation) --- |
1339 |
+ if self.have_MPI == True: |
1340 |
+@@ -1575,27 +1575,27 @@ |
1341 |
+ predDisp_array_Y = PETSc.Vec().create() |
1342 |
+ predDisp_array_Y.setType('seq') |
1343 |
+ predDisp_array_Z = PETSc.Vec().create() |
1344 |
+- predDisp_array_Z.setType('seq') |
1345 |
++ predDisp_array_Z.setType('seq') |
1346 |
+ predDisp_array_X.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1347 |
+ predDisp_array_Y.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1348 |
+ predDisp_array_Z.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF) |
1349 |
+- |
1350 |
+- if myid in self.solidSolverProcessors: |
1351 |
+- for iVertex in range(self.nLocalSolidInterfaceNodes): |
1352 |
+- predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex) |
1353 |
+- predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex) |
1354 |
+- predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex) |
1355 |
++ |
1356 |
++ if myid in self.solidSolverProcessors: |
1357 |
++ for iVertex in range(self.nLocalSolidInterfaceNodes): |
1358 |
++ predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex) |
1359 |
++ predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex) |
1360 |
++ predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex) |
1361 |
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, iVertex) |
1362 |
+ predDisp_array_X.setValues([iGlobalVertex], predDispx) |
1363 |
+ predDisp_array_Y.setValues([iGlobalVertex], predDispy) |
1364 |
+ predDisp_array_Z.setValues([iGlobalVertex], predDispz) |
1365 |
+- |
1366 |
+- predDisp_array_X.assemblyBegin() |
1367 |
+- predDisp_array_X.assemblyEnd() |
1368 |
+- predDisp_array_Y.assemblyBegin() |
1369 |
+- predDisp_array_Y.assemblyEnd() |
1370 |
+- predDisp_array_Z.assemblyBegin() |
1371 |
+- predDisp_array_Z.assemblyEnd() |
1372 |
++ |
1373 |
++ predDisp_array_X.assemblyBegin() |
1374 |
++ predDisp_array_X.assemblyEnd() |
1375 |
++ predDisp_array_Y.assemblyBegin() |
1376 |
++ predDisp_array_Y.assemblyEnd() |
1377 |
++ predDisp_array_Z.assemblyBegin() |
1378 |
++ predDisp_array_Z.assemblyEnd() |
1379 |
+ |
1380 |
+ # --- Calculate the residual (vector and norm) --- |
1381 |
+ self.solidInterfaceResidual_array_X = predDisp_array_X - self.solidInterface_array_DispX |
1382 |
+@@ -1615,45 +1615,45 @@ |
1383 |
+ del predDisp_array_Y |
1384 |
+ del predDisp_array_Z |
1385 |
+ |
1386 |
+- return sqrt(normInterfaceResidualSquare) |
1387 |
++ return sqrt(normInterfaceResidualSquare) |
1388 |
+ |
1389 |
+ def relaxSolidPosition(self,FSI_config): |
1390 |
+- """ |
1391 |
+- Apply solid displacement under-relaxation. |
1392 |
+- """ |
1393 |
++ """ |
1394 |
++ Apply solid displacement under-relaxation. |
1395 |
++ """ |
1396 |
+ if self.have_MPI == True: |
1397 |
+- myid = self.comm.Get_rank() |
1398 |
++ myid = self.comm.Get_rank() |
1399 |
+ else: |
1400 |
+ myid = 0 |
1401 |
+ |
1402 |
+ # --- Set the Aitken coefficient for the relaxation --- |
1403 |
+- if FSI_config['AITKEN_RELAX'] == 'STATIC': |
1404 |
+- self.aitkenParam = FSI_config['AITKEN_PARAM'] |
1405 |
+- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC': |
1406 |
+- self.setAitkenCoefficient(FSI_config) |
1407 |
+- else: |
1408 |
+- self.aitkenParam = 1.0 |
1409 |
++ if FSI_config['AITKEN_RELAX'] == 'STATIC': |
1410 |
++ self.aitkenParam = FSI_config['AITKEN_PARAM'] |
1411 |
++ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC': |
1412 |
++ self.setAitkenCoefficient(FSI_config) |
1413 |
++ else: |
1414 |
++ self.aitkenParam = 1.0 |
1415 |
+ |
1416 |
+- self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam)) |
1417 |
++ self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam)) |
1418 |
+ |
1419 |
+ # --- Relax the solid interface position --- |
1420 |
+ self.solidInterface_array_DispX += self.aitkenParam*self.solidInterfaceResidual_array_X |
1421 |
+ self.solidInterface_array_DispY += self.aitkenParam*self.solidInterfaceResidual_array_Y |
1422 |
+ self.solidInterface_array_DispZ += self.aitkenParam*self.solidInterfaceResidual_array_Z |
1423 |
+- |
1424 |
++ |
1425 |
+ |
1426 |
+ def setAitkenCoefficient(self, FSI_config): |
1427 |
+- """ |
1428 |
+- Computes the Aitken coefficients for solid displacement under-relaxation. |
1429 |
+- """ |
1430 |
+- |
1431 |
+- deltaResNormSquare = 0.0 |
1432 |
+- prodScalRes = 0.0 |
1433 |
+- |
1434 |
++ """ |
1435 |
++ Computes the Aitken coefficients for solid displacement under-relaxation. |
1436 |
++ """ |
1437 |
++ |
1438 |
++ deltaResNormSquare = 0.0 |
1439 |
++ prodScalRes = 0.0 |
1440 |
++ |
1441 |
+ # --- Create the PETSc vector for the difference between the residuals (current and previous FSI iter) --- |
1442 |
+- if self.FSIIter == 0: |
1443 |
+- self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam) |
1444 |
+- else: |
1445 |
++ if self.FSIIter == 0: |
1446 |
++ self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam) |
1447 |
++ else: |
1448 |
+ if self.have_MPI: |
1449 |
+ deltaResx_array_X = PETSc.Vec().create(self.comm) |
1450 |
+ deltaResx_array_X.setType('mpi') |
1451 |
+@@ -1688,9 +1688,9 @@ |
1452 |
+ deltaResNormSquare_X = (deltaResx_array_X.norm())**2 |
1453 |
+ deltaResNormSquare_Y = (deltaResx_array_Y.norm())**2 |
1454 |
+ deltaResNormSquare_Z = (deltaResx_array_Z.norm())**2 |
1455 |
+- deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z |
1456 |
++ deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z |
1457 |
+ |
1458 |
+- self.aitkenParam *= -prodScalRes/deltaResNormSquare |
1459 |
++ self.aitkenParam *= -prodScalRes/deltaResNormSquare |
1460 |
+ |
1461 |
+ deltaResx_array_X.destroy() |
1462 |
+ deltaResx_array_Y.destroy() |
1463 |
+@@ -1708,27 +1708,27 @@ |
1464 |
+ self.solidInterfaceResidual_array_Z.copy(self.solidInterfaceResidualnM1_array_Z) |
1465 |
+ |
1466 |
+ def displacementPredictor(self, FSI_config , SolidSolver, deltaT): |
1467 |
+- """ |
1468 |
+- Calculates a prediciton for the solid interface position for the next time step. |
1469 |
+- """ |
1470 |
++ """ |
1471 |
++ Calculates a prediciton for the solid interface position for the next time step. |
1472 |
++ """ |
1473 |
+ |
1474 |
+ if self.have_MPI == True: |
1475 |
+- myid = self.comm.Get_rank() |
1476 |
++ myid = self.comm.Get_rank() |
1477 |
+ else: |
1478 |
+ myid = 0 |
1479 |
+ |
1480 |
+- if FSI_config['DISP_PRED'] == 'FIRST_ORDER': |
1481 |
+- self.MPIPrint("First order predictor") |
1482 |
+- alpha_0 = 1.0 |
1483 |
+- alpha_1 = 0.0 |
1484 |
+- elif FSI_config['DISP_PRED'] == 'SECOND_ORDER': |
1485 |
+- self.MPIPrint("Second order predictor") |
1486 |
+- alpha_0 = 1.0 |
1487 |
+- alpha_1 = 0.5 |
1488 |
+- else: |
1489 |
+- self.MPIPrint("No predictor") |
1490 |
+- alpha_0 = 0.0 |
1491 |
+- alpha_1 = 0.0 |
1492 |
++ if FSI_config['DISP_PRED'] == 'FIRST_ORDER': |
1493 |
++ self.MPIPrint("First order predictor") |
1494 |
++ alpha_0 = 1.0 |
1495 |
++ alpha_1 = 0.0 |
1496 |
++ elif FSI_config['DISP_PRED'] == 'SECOND_ORDER': |
1497 |
++ self.MPIPrint("Second order predictor") |
1498 |
++ alpha_0 = 1.0 |
1499 |
++ alpha_1 = 0.5 |
1500 |
++ else: |
1501 |
++ self.MPIPrint("No predictor") |
1502 |
++ alpha_0 = 0.0 |
1503 |
++ alpha_1 = 0.0 |
1504 |
+ |
1505 |
+ # --- Create the PETSc vectors to store the solid interface velocity --- |
1506 |
+ if self.have_MPI == True: |
1507 |
+@@ -1774,18 +1774,18 @@ |
1508 |
+ # --- Fill the PETSc vectors --- |
1509 |
+ GlobalIndex = int() |
1510 |
+ localIndex = 0 |
1511 |
+- for iVertex in range(self.nLocalSolidInterfaceNodes): |
1512 |
+- GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex) |
1513 |
++ for iVertex in range(self.nLocalSolidInterfaceNodes): |
1514 |
++ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex) |
1515 |
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys(): |
1516 |
+ pass |
1517 |
+ else: |
1518 |
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex) |
1519 |
+- velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex) |
1520 |
+- vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex) |
1521 |
+- velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex) |
1522 |
+- velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex) |
1523 |
+- velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex) |
1524 |
+- velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex) |
1525 |
++ velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex) |
1526 |
++ vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex) |
1527 |
++ velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex) |
1528 |
++ velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex) |
1529 |
++ velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex) |
1530 |
++ velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex) |
1531 |
+ Vel_array_X.setValues([iGlobalVertex],velx) |
1532 |
+ Vel_array_Y.setValues([iGlobalVertex],vely) |
1533 |
+ Vel_array_Z.setValues([iGlobalVertex],velz) |
1534 |
+@@ -1822,27 +1822,27 @@ |
1535 |
+ del VelnM1_array_X, VelnM1_array_Y, VelnM1_array_Z |
1536 |
+ |
1537 |
+ def writeFSIHistory(self, TimeIter, time, varCoordNorm, FSIConv): |
1538 |
+- """ |
1539 |
+- Write the FSI history file of the computaion. |
1540 |
+- """ |
1541 |
++ """ |
1542 |
++ Write the FSI history file of the computaion. |
1543 |
++ """ |
1544 |
+ |
1545 |
+ if self.have_MPI == True: |
1546 |
+ myid = self.comm.Get_rank() |
1547 |
+ else: |
1548 |
+ myid = 0 |
1549 |
+- |
1550 |
++ |
1551 |
+ if myid == self.rootProcess: |
1552 |
+- if self.unsteady: |
1553 |
+- if TimeIter == 0: |
1554 |
+- histFile = open('FSIhistory.dat', "w") |
1555 |
++ if self.unsteady: |
1556 |
++ if TimeIter == 0: |
1557 |
++ histFile = open('FSIhistory.dat', "w") |
1558 |
+ histFile.write("TimeIter\tTime\tFSIRes\tFSINbIter\n") |
1559 |
+- else: |
1560 |
+- histFile = open('FSIhistory.dat', "a") |
1561 |
+- if FSIConv: |
1562 |
+- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n') |
1563 |
+- else: |
1564 |
+- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n') |
1565 |
+- histFile.close() |
1566 |
++ else: |
1567 |
++ histFile = open('FSIhistory.dat', "a") |
1568 |
++ if FSIConv: |
1569 |
++ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n') |
1570 |
++ else: |
1571 |
++ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n') |
1572 |
++ histFile.close() |
1573 |
+ else: |
1574 |
+ if self.FSIIter == 0: |
1575 |
+ histFile = open('FSIhistory.dat', "w") |
1576 |
+@@ -1851,7 +1851,7 @@ |
1577 |
+ histFile = open('FSIhistory.dat', "a") |
1578 |
+ histFile.write(str(self.FSIIter) + '\t' + str(varCoordNorm) + '\n') |
1579 |
+ histFile.close() |
1580 |
+- |
1581 |
++ |
1582 |
+ |
1583 |
+ self.MPIBarrier() |
1584 |
+ |
1585 |
+@@ -1868,254 +1868,254 @@ |
1586 |
+ globalIndex = globalStartIndex + iLocalVertex |
1587 |
+ |
1588 |
+ return globalIndex |
1589 |
+- |
1590 |
++ |
1591 |
+ |
1592 |
+ def UnsteadyFSI(self,FSI_config, FluidSolver, SolidSolver): |
1593 |
+- """ |
1594 |
+- Run the unsteady FSI computation by synchronizing the fluid and solid solvers. |
1595 |
+- F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes). |
1596 |
+- """ |
1597 |
++ """ |
1598 |
++ Run the unsteady FSI computation by synchronizing the fluid and solid solvers. |
1599 |
++ F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes). |
1600 |
++ """ |
1601 |
+ |
1602 |
+ if self.have_MPI == True: |
1603 |
+- myid = self.comm.Get_rank() |
1604 |
+- numberPart = self.comm.Get_size() |
1605 |
++ myid = self.comm.Get_rank() |
1606 |
++ numberPart = self.comm.Get_size() |
1607 |
+ else: |
1608 |
+ myid = 0 |
1609 |
+ numberPart = 1 |
1610 |
+ |
1611 |
+- # --- Set some general variables for the unsteady computation --- # |
1612 |
+- deltaT = FSI_config['UNST_TIMESTEP'] # physical time step |
1613 |
+- totTime = FSI_config['UNST_TIME'] # physical simulation time |
1614 |
+- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step) |
1615 |
+- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance |
1616 |
+- TimeIterTreshold = 0 # time iteration from which we allow the solid to deform |
1617 |
+- |
1618 |
+- if FSI_config['RESTART_SOL'] == 'YES': |
1619 |
+- startTime = FSI_config['START_TIME'] |
1620 |
+- NbTimeIter = ((totTime)/deltaT)-1 |
1621 |
+- time = startTime |
1622 |
+- TimeIter = FSI_config['RESTART_ITER'] |
1623 |
+- else: |
1624 |
+- NbTimeIter = (totTime/deltaT)-1 # number of time iterations |
1625 |
+- time = 0.0 # initial time |
1626 |
+- TimeIter = 0 # initial time iteration |
1627 |
+- |
1628 |
+- NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer |
1629 |
+- |
1630 |
+- varCoordNorm = 0.0 # FSI residual |
1631 |
+- FSIConv = False # FSI convergence flag |
1632 |
+- |
1633 |
+- self.MPIPrint('\n**********************************') |
1634 |
+- self.MPIPrint('* Begin unsteady FSI computation *') |
1635 |
+- self.MPIPrint('**********************************\n') |
1636 |
+- |
1637 |
+- # --- Initialize the coupled solution --- # |
1638 |
+- #If restart (DOES NOT WORK YET) |
1639 |
+- if FSI_config['RESTART_SOL'] == 'YES': |
1640 |
+- TimeIterTreshold = -1 |
1641 |
+- FluidSolver.setTemporalIteration(TimeIter) |
1642 |
+- if myid == self.rootProcess: |
1643 |
+- SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True) |
1644 |
++ # --- Set some general variables for the unsteady computation --- # |
1645 |
++ deltaT = FSI_config['UNST_TIMESTEP'] # physical time step |
1646 |
++ totTime = FSI_config['UNST_TIME'] # physical simulation time |
1647 |
++ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step) |
1648 |
++ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance |
1649 |
++ TimeIterTreshold = 0 # time iteration from which we allow the solid to deform |
1650 |
++ |
1651 |
++ if FSI_config['RESTART_SOL'] == 'YES': |
1652 |
++ startTime = FSI_config['START_TIME'] |
1653 |
++ NbTimeIter = ((totTime)/deltaT)-1 |
1654 |
++ time = startTime |
1655 |
++ TimeIter = FSI_config['RESTART_ITER'] |
1656 |
++ else: |
1657 |
++ NbTimeIter = (totTime/deltaT)-1 # number of time iterations |
1658 |
++ time = 0.0 # initial time |
1659 |
++ TimeIter = 0 # initial time iteration |
1660 |
++ |
1661 |
++ NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer |
1662 |
++ |
1663 |
++ varCoordNorm = 0.0 # FSI residual |
1664 |
++ FSIConv = False # FSI convergence flag |
1665 |
++ |
1666 |
++ self.MPIPrint('\n**********************************') |
1667 |
++ self.MPIPrint('* Begin unsteady FSI computation *') |
1668 |
++ self.MPIPrint('**********************************\n') |
1669 |
++ |
1670 |
++ # --- Initialize the coupled solution --- # |
1671 |
++ #If restart (DOES NOT WORK YET) |
1672 |
++ if FSI_config['RESTART_SOL'] == 'YES': |
1673 |
++ TimeIterTreshold = -1 |
1674 |
++ FluidSolver.setTemporalIteration(TimeIter) |
1675 |
++ if myid == self.rootProcess: |
1676 |
++ SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True) |
1677 |
++ if self.have_MPI == True: |
1678 |
++ self.comm.barrier() |
1679 |
++ FluidSolver.setInitialMesh(True) |
1680 |
++ if myid == self.rootProcess: |
1681 |
++ SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray()) |
1682 |
+ if self.have_MPI == True: |
1683 |
+- self.comm.barrier() |
1684 |
+- FluidSolver.setInitialMesh(True) |
1685 |
+- if myid == self.rootProcess: |
1686 |
+- SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray()) |
1687 |
+- if self.have_MPI == True: |
1688 |
+- self.comm.barrier() |
1689 |
+- if myid == self.rootProcess: |
1690 |
+- SolidSolver.updateSolution() |
1691 |
+- #If no restart |
1692 |
+- else: |
1693 |
+- self.MPIPrint('Setting FSI initial conditions') |
1694 |
++ self.comm.barrier() |
1695 |
++ if myid == self.rootProcess: |
1696 |
++ SolidSolver.updateSolution() |
1697 |
++ #If no restart |
1698 |
++ else: |
1699 |
++ self.MPIPrint('Setting FSI initial conditions') |
1700 |
+ if myid in self.solidSolverProcessors: |
1701 |
+- SolidSolver.setInitialDisplacements() |
1702 |
++ SolidSolver.setInitialDisplacements() |
1703 |
+ self.getSolidInterfaceDisplacement(SolidSolver) |
1704 |
+- self.interpolateSolidPositionOnFluidMesh(FSI_config) |
1705 |
+- self.setFluidInterfaceVarCoord(FluidSolver) |
1706 |
+- FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver |
1707 |
+- self.MPIPrint('\nFSI initial conditions are set') |
1708 |
+- self.MPIPrint('Beginning time integration\n') |
1709 |
+- |
1710 |
+- # --- External temporal loop --- # |
1711 |
+- while TimeIter <= NbTimeIter: |
1712 |
+- |
1713 |
+- if TimeIter > TimeIterTreshold: |
1714 |
+- NbFSIIter = NbFSIIterMax |
1715 |
+- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter)) |
1716 |
+- else: |
1717 |
+- NbFSIIter = 1 |
1718 |
+- |
1719 |
+- self.FSIIter = 0 |
1720 |
+- FSIConv = False |
1721 |
+- FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration |
1722 |
+- |
1723 |
+- # --- Internal FSI loop --- # |
1724 |
+- while self.FSIIter <= (NbFSIIter-1): |
1725 |
++ self.interpolateSolidPositionOnFluidMesh(FSI_config) |
1726 |
++ self.setFluidInterfaceVarCoord(FluidSolver) |
1727 |
++ FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver |
1728 |
++ self.MPIPrint('\nFSI initial conditions are set') |
1729 |
++ self.MPIPrint('Beginning time integration\n') |
1730 |
++ |
1731 |
++ # --- External temporal loop --- # |
1732 |
++ while TimeIter <= NbTimeIter: |
1733 |
++ |
1734 |
++ if TimeIter > TimeIterTreshold: |
1735 |
++ NbFSIIter = NbFSIIterMax |
1736 |
++ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter)) |
1737 |
++ else: |
1738 |
++ NbFSIIter = 1 |
1739 |
++ |
1740 |
++ self.FSIIter = 0 |
1741 |
++ FSIConv = False |
1742 |
++ FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration |
1743 |
+ |
1744 |
+- self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter)) |
1745 |
++ # --- Internal FSI loop --- # |
1746 |
++ while self.FSIIter <= (NbFSIIter-1): |
1747 |
+ |
1748 |
+- # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- # |
1749 |
+- self.interpolateSolidPositionOnFluidMesh(FSI_config) |
1750 |
++ self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter)) |
1751 |
++ |
1752 |
++ # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- # |
1753 |
++ self.interpolateSolidPositionOnFluidMesh(FSI_config) |
1754 |
+ self.MPIPrint('\nPerforming dynamic mesh deformation (ALE)...\n') |
1755 |
+ self.setFluidInterfaceVarCoord(FluidSolver) |
1756 |
+ FluidSolver.DynamicMeshUpdate(TimeIter) |
1757 |
+- |
1758 |
+- # --- Fluid solver call for FSI subiteration --- # |
1759 |
+- self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...') |
1760 |
++ |
1761 |
++ # --- Fluid solver call for FSI subiteration --- # |
1762 |
++ self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...') |
1763 |
+ self.MPIBarrier() |
1764 |
+- FluidSolver.ResetConvergence() |
1765 |
+- FluidSolver.Run() |
1766 |
++ FluidSolver.ResetConvergence() |
1767 |
++ FluidSolver.Run() |
1768 |
+ self.MPIBarrier() |
1769 |
+ |
1770 |
+- # --- Surface fluid loads interpolation and communication --- # |
1771 |
+- self.MPIPrint('\nProcessing interface fluid loads...\n') |
1772 |
++ # --- Surface fluid loads interpolation and communication --- # |
1773 |
++ self.MPIPrint('\nProcessing interface fluid loads...\n') |
1774 |
+ self.MPIBarrier() |
1775 |
+- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver) |
1776 |
++ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver) |
1777 |
+ self.MPIBarrier() |
1778 |
+- if TimeIter > TimeIterTreshold: |
1779 |
+- self.interpolateFluidLoadsOnSolidMesh(FSI_config) |
1780 |
+- self.setSolidInterfaceLoads(SolidSolver, FSI_config, time) |
1781 |
++ if TimeIter > TimeIterTreshold: |
1782 |
++ self.interpolateFluidLoadsOnSolidMesh(FSI_config) |
1783 |
++ self.setSolidInterfaceLoads(SolidSolver, FSI_config, time) |
1784 |
+ |
1785 |
+- # --- Solid solver call for FSI subiteration --- # |
1786 |
+- self.MPIPrint('\nLaunching solid solver for a single time iteration...\n') |
1787 |
++ # --- Solid solver call for FSI subiteration --- # |
1788 |
++ self.MPIPrint('\nLaunching solid solver for a single time iteration...\n') |
1789 |
+ if myid in self.solidSolverProcessors: |
1790 |
+- if FSI_config['CSD_SOLVER'] == 'NATIVE': |
1791 |
+- SolidSolver.timeIteration(time) |
1792 |
+- elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER': |
1793 |
+- SolidSolver.run(time-deltaT, time) |
1794 |
+- |
1795 |
+- # --- Compute and monitor the FSI residual --- # |
1796 |
+- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver) |
1797 |
+- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm)) |
1798 |
+- if varCoordNorm < FSITolerance: |
1799 |
+- FSIConv = True |
1800 |
+- break |
1801 |
++ if FSI_config['CSD_SOLVER'] == 'NATIVE': |
1802 |
++ SolidSolver.timeIteration(time) |
1803 |
++ elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER': |
1804 |
++ SolidSolver.run(time-deltaT, time) |
1805 |
++ |
1806 |
++ # --- Compute and monitor the FSI residual --- # |
1807 |
++ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver) |
1808 |
++ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm)) |
1809 |
++ if varCoordNorm < FSITolerance: |
1810 |
++ FSIConv = True |
1811 |
++ break |
1812 |
+ |
1813 |
+- # --- Relaxe the solid position --- # |
1814 |
++ # --- Relaxe the solid position --- # |
1815 |
+ self.MPIPrint('\nProcessing interface displacements...\n') |
1816 |
+- self.relaxSolidPosition(FSI_config) |
1817 |
+- |
1818 |
+- self.FSIIter += 1 |
1819 |
+- # --- End OF FSI loop --- # |
1820 |
++ self.relaxSolidPosition(FSI_config) |
1821 |
++ |
1822 |
++ self.FSIIter += 1 |
1823 |
++ # --- End OF FSI loop --- # |
1824 |
+ |
1825 |
+ self.MPIBarrier() |
1826 |
+ |
1827 |
+- # --- Update the FSI history file --- # |
1828 |
+- if TimeIter > TimeIterTreshold: |
1829 |
+- self.MPIPrint('\nBGS is converged (strong coupling)') |
1830 |
+- self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv) |
1831 |
+- |
1832 |
+- # --- Update, monitor and output the fluid solution before the next time step ---# |
1833 |
+- FluidSolver.Update() |
1834 |
+- FluidSolver.Monitor(TimeIter) |
1835 |
+- FluidSolver.Output(TimeIter) |
1836 |
+- |
1837 |
+- if TimeIter >= TimeIterTreshold: |
1838 |
+- if myid in self.solidSolverProcessors: |
1839 |
+- # --- Output the solid solution before thr next time step --- # |
1840 |
+- SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter) |
1841 |
+- |
1842 |
+- # --- Displacement predictor for the next time step and update of the solid solution --- # |
1843 |
+- self.MPIPrint('\nSolid displacement prediction for next time step') |
1844 |
+- self.displacementPredictor(FSI_config, SolidSolver, deltaT) |
1845 |
++ # --- Update the FSI history file --- # |
1846 |
++ if TimeIter > TimeIterTreshold: |
1847 |
++ self.MPIPrint('\nBGS is converged (strong coupling)') |
1848 |
++ self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv) |
1849 |
++ |
1850 |
++ # --- Update, monitor and output the fluid solution before the next time step ---# |
1851 |
++ FluidSolver.Update() |
1852 |
++ FluidSolver.Monitor(TimeIter) |
1853 |
++ FluidSolver.Output(TimeIter) |
1854 |
++ |
1855 |
++ if TimeIter >= TimeIterTreshold: |
1856 |
++ if myid in self.solidSolverProcessors: |
1857 |
++ # --- Output the solid solution before thr next time step --- # |
1858 |
++ SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter) |
1859 |
++ |
1860 |
++ # --- Displacement predictor for the next time step and update of the solid solution --- # |
1861 |
++ self.MPIPrint('\nSolid displacement prediction for next time step') |
1862 |
++ self.displacementPredictor(FSI_config, SolidSolver, deltaT) |
1863 |
+ if myid in self.solidSolverProcessors: |
1864 |
+- SolidSolver.updateSolution() |
1865 |
+- |
1866 |
+- TimeIter += 1 |
1867 |
+- time += deltaT |
1868 |
+- #--- End of the temporal loop --- # |
1869 |
++ SolidSolver.updateSolution() |
1870 |
++ |
1871 |
++ TimeIter += 1 |
1872 |
++ time += deltaT |
1873 |
++ #--- End of the temporal loop --- # |
1874 |
+ |
1875 |
+ self.MPIBarrier() |
1876 |
+ |
1877 |
+- self.MPIPrint('\n*************************') |
1878 |
+- self.MPIPrint('* End FSI computation *') |
1879 |
+- self.MPIPrint('*************************\n') |
1880 |
++ self.MPIPrint('\n*************************') |
1881 |
++ self.MPIPrint('* End FSI computation *') |
1882 |
++ self.MPIPrint('*************************\n') |
1883 |
+ |
1884 |
+ def SteadyFSI(self, FSI_config,FluidSolver, SolidSolver): |
1885 |
+- """ |
1886 |
+- Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface. |
1887 |
+- """ |
1888 |
++ """ |
1889 |
++ Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface. |
1890 |
++ """ |
1891 |
+ |
1892 |
+ if self.have_MPI == True: |
1893 |
+- myid = self.comm.Get_rank() |
1894 |
+- numberPart = self.comm.Get_size() |
1895 |
++ myid = self.comm.Get_rank() |
1896 |
++ numberPart = self.comm.Get_size() |
1897 |
+ else: |
1898 |
+ myid = 0 |
1899 |
+ numberPart = 1 |
1900 |
+ |
1901 |
+- # --- Set some general variables for the steady computation --- # |
1902 |
+- NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step |
1903 |
+- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step) |
1904 |
+- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance |
1905 |
+- varCoordNorm = 0.0 |
1906 |
+- |
1907 |
+- self.MPIPrint('\n********************************') |
1908 |
+- self.MPIPrint('* Begin steady FSI computation *') |
1909 |
+- self.MPIPrint('********************************\n') |
1910 |
+- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************') |
1911 |
++ # --- Set some general variables for the steady computation --- # |
1912 |
++ NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step |
1913 |
++ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step) |
1914 |
++ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance |
1915 |
++ varCoordNorm = 0.0 |
1916 |
++ |
1917 |
++ self.MPIPrint('\n********************************') |
1918 |
++ self.MPIPrint('* Begin steady FSI computation *') |
1919 |
++ self.MPIPrint('********************************\n') |
1920 |
++ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************') |
1921 |
+ |
1922 |
+ self.getSolidInterfaceDisplacement(SolidSolver) |
1923 |
+ |
1924 |
+- # --- External FSI loop --- # |
1925 |
+- self.FSIIter = 0 |
1926 |
+- while self.FSIIter < NbFSIIterMax: |
1927 |
+- self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter)) |
1928 |
+- self.MPIPrint('\nLaunching fluid solver for a steady computation...') |
1929 |
+- # --- Fluid solver call for FSI subiteration ---# |
1930 |
+- Iter = 0 |
1931 |
+- FluidSolver.ResetConvergence() |
1932 |
+- while Iter < NbIter: |
1933 |
+- FluidSolver.PreprocessExtIter(Iter) |
1934 |
+- FluidSolver.Run() |
1935 |
+- StopIntegration = FluidSolver.Monitor(Iter) |
1936 |
+- FluidSolver.Output(Iter) |
1937 |
+- if StopIntegration: |
1938 |
+- break; |
1939 |
+- Iter += 1 |
1940 |
+- |
1941 |
+- # --- Surface fluid loads interpolation and communication ---# |
1942 |
+- self.MPIPrint('\nProcessing interface fluid loads...\n') |
1943 |
++ # --- External FSI loop --- # |
1944 |
++ self.FSIIter = 0 |
1945 |
++ while self.FSIIter < NbFSIIterMax: |
1946 |
++ self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter)) |
1947 |
++ self.MPIPrint('\nLaunching fluid solver for a steady computation...') |
1948 |
++ # --- Fluid solver call for FSI subiteration ---# |
1949 |
++ Iter = 0 |
1950 |
++ FluidSolver.ResetConvergence() |
1951 |
++ while Iter < NbIter: |
1952 |
++ FluidSolver.PreprocessExtIter(Iter) |
1953 |
++ FluidSolver.Run() |
1954 |
++ StopIntegration = FluidSolver.Monitor(Iter) |
1955 |
++ FluidSolver.Output(Iter) |
1956 |
++ if StopIntegration: |
1957 |
++ break; |
1958 |
++ Iter += 1 |
1959 |
++ |
1960 |
++ # --- Surface fluid loads interpolation and communication ---# |
1961 |
++ self.MPIPrint('\nProcessing interface fluid loads...\n') |
1962 |
+ self.MPIBarrier() |
1963 |
+- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver) |
1964 |
++ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver) |
1965 |
+ self.MPIBarrier() |
1966 |
+- self.interpolateFluidLoadsOnSolidMesh(FSI_config) |
1967 |
+- self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05) |
1968 |
+- |
1969 |
+- # --- Solid solver call for FSI subiteration --- # |
1970 |
+- self.MPIPrint('\nLaunching solid solver for a static computation...\n') |
1971 |
++ self.interpolateFluidLoadsOnSolidMesh(FSI_config) |
1972 |
++ self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05) |
1973 |
++ |
1974 |
++ # --- Solid solver call for FSI subiteration --- # |
1975 |
++ self.MPIPrint('\nLaunching solid solver for a static computation...\n') |
1976 |
+ if myid in self.solidSolverProcessors: |
1977 |
+- if FSI_config['CSD_SOLVER'] == 'NATIVE': |
1978 |
+- SolidSolver.staticComputation() |
1979 |
++ if FSI_config['CSD_SOLVER'] == 'NATIVE': |
1980 |
++ SolidSolver.staticComputation() |
1981 |
+ else: |
1982 |
+ SolidSolver.run(0.0, 0.05) |
1983 |
+- SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter) |
1984 |
++ SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter) |
1985 |
+ |
1986 |
+- # --- Compute and monitor the FSI residual --- # |
1987 |
+- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver) |
1988 |
+- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm)) |
1989 |
++ # --- Compute and monitor the FSI residual --- # |
1990 |
++ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver) |
1991 |
++ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm)) |
1992 |
+ self.writeFSIHistory(0, 0.0, varCoordNorm, False) |
1993 |
+- if varCoordNorm < FSITolerance: |
1994 |
+- break |
1995 |
++ if varCoordNorm < FSITolerance: |
1996 |
++ break |
1997 |
+ |
1998 |
+ # --- Relaxe the solid displacement and update the solid solution --- # |
1999 |
+ self.MPIPrint('\nProcessing interface displacements...\n') |
2000 |
+- self.relaxSolidPosition(FSI_config) |
2001 |
++ self.relaxSolidPosition(FSI_config) |
2002 |
+ if myid in self.solidSolverProcessors: |
2003 |
+ SolidSolver.updateSolution() |
2004 |
+- |
2005 |
+- # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- # |
2006 |
+- self.interpolateSolidPositionOnFluidMesh(FSI_config) |
2007 |
+- self.MPIPrint('\nPerforming static mesh deformation...\n') |
2008 |
+- self.setFluidInterfaceVarCoord(FluidSolver) |
2009 |
+- FluidSolver.StaticMeshUpdate() |
2010 |
+- self.FSIIter += 1 |
2011 |
++ |
2012 |
++ # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- # |
2013 |
++ self.interpolateSolidPositionOnFluidMesh(FSI_config) |
2014 |
++ self.MPIPrint('\nPerforming static mesh deformation...\n') |
2015 |
++ self.setFluidInterfaceVarCoord(FluidSolver) |
2016 |
++ FluidSolver.StaticMeshUpdate() |
2017 |
++ self.FSIIter += 1 |
2018 |
+ |
2019 |
+ self.MPIBarrier() |
2020 |
+ |
2021 |
+- self.MPIPrint('\nBGS is converged (strong coupling)') |
2022 |
+- self.MPIPrint(' ') |
2023 |
+- self.MPIPrint('*************************') |
2024 |
+- self.MPIPrint('* End FSI computation *') |
2025 |
+- self.MPIPrint('*************************') |
2026 |
+- self.MPIPrint(' ') |
2027 |
++ self.MPIPrint('\nBGS is converged (strong coupling)') |
2028 |
++ self.MPIPrint(' ') |
2029 |
++ self.MPIPrint('*************************') |
2030 |
++ self.MPIPrint('* End FSI computation *') |
2031 |
++ self.MPIPrint('*************************') |
2032 |
++ self.MPIPrint(' ') |
2033 |
+diff -Naur old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py |
2034 |
+--- old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-01 19:09:18.000000000 +0300 |
2035 |
++++ new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-10 16:17:07.000000000 +0300 |
2036 |
+@@ -174,9 +174,9 @@ |
2037 |
+ |
2038 |
+ with open(self.Config_file) as configfile: |
2039 |
+ while 1: |
2040 |
+- line = configfile.readline() |
2041 |
+- if not line: |
2042 |
+- break |
2043 |
++ line = configfile.readline() |
2044 |
++ if not line: |
2045 |
++ break |
2046 |
+ |
2047 |
+ # remove line returns |
2048 |
+ line = line.strip('\r\n') |
2049 |
+@@ -189,41 +189,41 @@ |
2050 |
+ this_value = line[1].strip() |
2051 |
+ |
2052 |
+ for case in switch(this_param): |
2053 |
+- #integer values |
2054 |
+- #if case("NB_FSI_ITER") : |
2055 |
+- #self.Config[this_param] = int(this_value) |
2056 |
+- #break |
2057 |
+- |
2058 |
+- #float values |
2059 |
+- if case("DELTA_T") : pass |
2060 |
+- if case("START_TIME") : pass |
2061 |
+- if case("STOP_TIME") : pass |
2062 |
+- if case("SPRING_MASS") : pass |
2063 |
+- if case("INERTIA_FLEXURAL") : pass |
2064 |
+- if case("SPRING_STIFFNESS") : pass |
2065 |
+- if case("SPRING_DAMPING") : pass |
2066 |
+- if case("TORSIONAL_STIFFNESS") : pass |
2067 |
+- if case("TORSIONAL_DAMPING") : pass |
2068 |
+- if case("CORD") : pass |
2069 |
+- if case("FLEXURAL_AXIS") : pass |
2070 |
+- if case("GRAVITY_CENTER") : pass |
2071 |
+- if case("INITIAL_DISP") : pass |
2072 |
+- if case("INITIAL_ANGLE") : pass |
2073 |
+- if case("RHO") : |
2074 |
+- self.Config[this_param] = float(this_value) |
2075 |
+- break |
2076 |
+- |
2077 |
+- #string values |
2078 |
+- if case("TIME_MARCHING") : pass |
2079 |
+- if case("MESH_FILE") : pass |
2080 |
+- if case("CSD_SOLVER") : pass |
2081 |
+- if case("MOVING_MARKER") : pass |
2082 |
+- if case("STRUCT_TYPE") : |
2083 |
+- self.Config[this_param] = this_value |
2084 |
+- break |
2085 |
++ #integer values |
2086 |
++ #if case("NB_FSI_ITER") : |
2087 |
++ #self.Config[this_param] = int(this_value) |
2088 |
++ #break |
2089 |
++ |
2090 |
++ #float values |
2091 |
++ if case("DELTA_T") : pass |
2092 |
++ if case("START_TIME") : pass |
2093 |
++ if case("STOP_TIME") : pass |
2094 |
++ if case("SPRING_MASS") : pass |
2095 |
++ if case("INERTIA_FLEXURAL") : pass |
2096 |
++ if case("SPRING_STIFFNESS") : pass |
2097 |
++ if case("SPRING_DAMPING") : pass |
2098 |
++ if case("TORSIONAL_STIFFNESS") : pass |
2099 |
++ if case("TORSIONAL_DAMPING") : pass |
2100 |
++ if case("CORD") : pass |
2101 |
++ if case("FLEXURAL_AXIS") : pass |
2102 |
++ if case("GRAVITY_CENTER") : pass |
2103 |
++ if case("INITIAL_DISP") : pass |
2104 |
++ if case("INITIAL_ANGLE") : pass |
2105 |
++ if case("RHO") : |
2106 |
++ self.Config[this_param] = float(this_value) |
2107 |
++ break |
2108 |
++ |
2109 |
++ #string values |
2110 |
++ if case("TIME_MARCHING") : pass |
2111 |
++ if case("MESH_FILE") : pass |
2112 |
++ if case("CSD_SOLVER") : pass |
2113 |
++ if case("MOVING_MARKER") : pass |
2114 |
++ if case("STRUCT_TYPE") : |
2115 |
++ self.Config[this_param] = this_value |
2116 |
++ break |
2117 |
+ |
2118 |
+- if case(): |
2119 |
+- print(this_param + " is an invalid option !") |
2120 |
++ if case(): |
2121 |
++ print(this_param + " is an invalid option !") |
2122 |
+ break |
2123 |
+ |
2124 |
+ def __readSU2Mesh(self): |
2125 |
+@@ -233,78 +233,78 @@ |
2126 |
+ print('Opened mesh file ' + self.Mesh_file + '.') |
2127 |
+ while 1: |
2128 |
+ line = meshfile.readline() |
2129 |
+- if not line: |
2130 |
+- break |
2131 |
++ if not line: |
2132 |
++ break |
2133 |
+ |
2134 |
+- pos = line.find('NDIM') |
2135 |
+- if pos != -1: |
2136 |
+- line = line.strip('\r\n') |
2137 |
++ pos = line.find('NDIM') |
2138 |
++ if pos != -1: |
2139 |
++ line = line.strip('\r\n') |
2140 |
+ line = line.split("=",1) |
2141 |
+- self.nDim = int(line[1]) |
2142 |
+- continue |
2143 |
+- |
2144 |
+- pos = line.find('NELEM') |
2145 |
+- if pos != -1: |
2146 |
+- line = line.strip('\r\n') |
2147 |
++ self.nDim = int(line[1]) |
2148 |
++ continue |
2149 |
++ |
2150 |
++ pos = line.find('NELEM') |
2151 |
++ if pos != -1: |
2152 |
++ line = line.strip('\r\n') |
2153 |
+ line = line.split("=",1) |
2154 |
+- self.nElem = int(line[1]) |
2155 |
+- continue |
2156 |
++ self.nElem = int(line[1]) |
2157 |
++ continue |
2158 |
+ |
2159 |
+- pos = line.find('NPOIN') |
2160 |
+- if pos != -1: |
2161 |
+- line = line.strip('\r\n') |
2162 |
++ pos = line.find('NPOIN') |
2163 |
++ if pos != -1: |
2164 |
++ line = line.strip('\r\n') |
2165 |
+ line = line.split("=",1) |
2166 |
+- self.nPoint = int(line[1]) |
2167 |
++ self.nPoint = int(line[1]) |
2168 |
+ for iPoint in range(self.nPoint): |
2169 |
+- self.node.append(Point()) |
2170 |
+- line = meshfile.readline() |
2171 |
+- line = line.strip('\r\n') |
2172 |
+- line = line.split(' ',self.nDim) |
2173 |
+- x = float(line[0]) |
2174 |
+- y = float(line[1]) |
2175 |
++ self.node.append(Point()) |
2176 |
++ line = meshfile.readline() |
2177 |
++ line = line.strip('\r\n') |
2178 |
++ line = line.split(' ',self.nDim) |
2179 |
++ x = float(line[0]) |
2180 |
++ y = float(line[1]) |
2181 |
+ z = 0.0 |
2182 |
+- if self.nDim == 3: |
2183 |
+- z = float(line[2]) |
2184 |
+- self.node[iPoint].SetCoord((x,y,z)) |
2185 |
++ if self.nDim == 3: |
2186 |
++ z = float(line[2]) |
2187 |
++ self.node[iPoint].SetCoord((x,y,z)) |
2188 |
+ self.node[iPoint].SetCoord0((x,y,z)) |
2189 |
+- self.node[iPoint].SetCoord_n((x,y,z)) |
2190 |
+- continue |
2191 |
++ self.node[iPoint].SetCoord_n((x,y,z)) |
2192 |
++ continue |
2193 |
+ |
2194 |
+- pos = line.find('NMARK') |
2195 |
+- if pos != -1: |
2196 |
+- line = line.strip('\r\n') |
2197 |
++ pos = line.find('NMARK') |
2198 |
++ if pos != -1: |
2199 |
++ line = line.strip('\r\n') |
2200 |
+ line = line.split("=",1) |
2201 |
+- self.nMarker = int(line[1]) |
2202 |
+- continue |
2203 |
++ self.nMarker = int(line[1]) |
2204 |
++ continue |
2205 |
+ |
2206 |
+- pos = line.find('MARKER_TAG') |
2207 |
+- if pos != -1: |
2208 |
+- line = line.strip('\r\n') |
2209 |
+- line = line.replace(" ", "") |
2210 |
++ pos = line.find('MARKER_TAG') |
2211 |
++ if pos != -1: |
2212 |
++ line = line.strip('\r\n') |
2213 |
++ line = line.replace(" ", "") |
2214 |
+ line = line.split("=",1) |
2215 |
+- markerTag = line[1] |
2216 |
+- if markerTag == self.FSI_marker: |
2217 |
+- self.markers[markerTag] = [] |
2218 |
+- line = meshfile.readline() |
2219 |
+- line = line.strip('\r\n') |
2220 |
+- line = line.split("=",1) |
2221 |
+- nElem = int(line[1]) |
2222 |
+- for iElem in range(nElem): |
2223 |
+- line = meshfile.readline() |
2224 |
+- line = line.strip('\r\n') |
2225 |
+- line = line.split(' ',1) |
2226 |
+- elemType = int(line[0]) |
2227 |
+- if elemType == 3: |
2228 |
+- nodes = line[1].split(' ', 1) |
2229 |
+- if not int(nodes[0]) in self.markers[markerTag]: |
2230 |
+- self.markers[markerTag].append(int(nodes[0])) |
2231 |
+- if not int(nodes[1]) in self.markers[markerTag]: |
2232 |
+- self.markers[markerTag].append(int(nodes[1])) |
2233 |
+- else: |
2234 |
+- print("Element type {} is not recognized !!".format(elemType)) |
2235 |
+- continue |
2236 |
+- else: |
2237 |
+- continue |
2238 |
++ markerTag = line[1] |
2239 |
++ if markerTag == self.FSI_marker: |
2240 |
++ self.markers[markerTag] = [] |
2241 |
++ line = meshfile.readline() |
2242 |
++ line = line.strip('\r\n') |
2243 |
++ line = line.split("=",1) |
2244 |
++ nElem = int(line[1]) |
2245 |
++ for iElem in range(nElem): |
2246 |
++ line = meshfile.readline() |
2247 |
++ line = line.strip('\r\n') |
2248 |
++ line = line.split(' ',1) |
2249 |
++ elemType = int(line[0]) |
2250 |
++ if elemType == 3: |
2251 |
++ nodes = line[1].split(' ', 1) |
2252 |
++ if not int(nodes[0]) in self.markers[markerTag]: |
2253 |
++ self.markers[markerTag].append(int(nodes[0])) |
2254 |
++ if not int(nodes[1]) in self.markers[markerTag]: |
2255 |
++ self.markers[markerTag].append(int(nodes[1])) |
2256 |
++ else: |
2257 |
++ print("Element type {} is not recognized !!".format(elemType)) |
2258 |
++ continue |
2259 |
++ else: |
2260 |
++ continue |
2261 |
+ |
2262 |
+ print("Number of dimensions: {}".format(self.nDim)) |
2263 |
+ print("Number of elements: {}".format(self.nElem)) |
2264 |
+@@ -441,23 +441,23 @@ |
2265 |
+ Coord_n = self.node[iPoint].GetCoord_n() |
2266 |
+ |
2267 |
+ if self.Unsteady: |
2268 |
+- r = Coord_n - self.centerOfRotation_n |
2269 |
+- else: |
2270 |
+- r = Coord - self.centerOfRotation |
2271 |
++ r = Coord_n - self.centerOfRotation_n |
2272 |
++ else: |
2273 |
++ r = Coord - self.centerOfRotation |
2274 |
+ |
2275 |
+- rotCoord = rotMatrix.dot(r) |
2276 |
++ rotCoord = rotMatrix.dot(r) |
2277 |
+ |
2278 |
+ newCoord = newCenter + rotCoord |
2279 |
+ newVel[0] = Centerdot[0]+psidot*(newCoord[1]-newCenter[1]) |
2280 |
+- newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0]) |
2281 |
+- newVel[2] = Centerdot[2]+0.0 |
2282 |
++ newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0]) |
2283 |
++ newVel[2] = Centerdot[2]+0.0 |
2284 |
+ |
2285 |
+ self.node[iPoint].SetCoord((newCoord[0], newCoord[1], newCoord[2])) |
2286 |
+ self.node[iPoint].SetVel((newVel[0], newVel[1], newVel[2])) |
2287 |
+ |
2288 |
+- if initialize: |
2289 |
+- self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2])) |
2290 |
+- self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2])) |
2291 |
++ if initialize: |
2292 |
++ self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2])) |
2293 |
++ self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2])) |
2294 |
+ |
2295 |
+ self.centerOfRotation = np.copy(newCenter) |
2296 |
+ |
2297 |
+diff -Naur old/SU2_PY/FSI/io/FSI_config.py new/SU2_PY/FSI/io/FSI_config.py |
2298 |
+--- old/SU2_PY/FSI/io/FSI_config.py 2020-05-01 19:09:18.000000000 +0300 |
2299 |
++++ new/SU2_PY/FSI/io/FSI_config.py 2020-05-10 16:17:07.000000000 +0300 |
2300 |
+@@ -58,23 +58,23 @@ |
2301 |
+ self.readConfig() |
2302 |
+ |
2303 |
+ def __str__(self): |
2304 |
+- tempString = str() |
2305 |
+- for key, value in self._ConfigContent.items(): |
2306 |
+- tempString += "{} = {}\n".format(key,value) |
2307 |
+- return tempString |
2308 |
++ tempString = str() |
2309 |
++ for key, value in self._ConfigContent.items(): |
2310 |
++ tempString += "{} = {}\n".format(key,value) |
2311 |
++ return tempString |
2312 |
+ |
2313 |
+ def __getitem__(self,key): |
2314 |
+- return self._ConfigContent[key] |
2315 |
++ return self._ConfigContent[key] |
2316 |
+ |
2317 |
+ def __setitem__(self, key, value): |
2318 |
+- self._ConfigContent[key] = value |
2319 |
++ self._ConfigContent[key] = value |
2320 |
+ |
2321 |
+ def readConfig(self): |
2322 |
+ input_file = open(self.ConfigFileName) |
2323 |
+ while 1: |
2324 |
+- line = input_file.readline() |
2325 |
+- if not line: |
2326 |
+- break |
2327 |
++ line = input_file.readline() |
2328 |
++ if not line: |
2329 |
++ break |
2330 |
+ # remove line returns |
2331 |
+ line = line.strip('\r\n') |
2332 |
+ # make sure it has useful data |
2333 |
+@@ -86,46 +86,46 @@ |
2334 |
+ this_value = line[1].strip() |
2335 |
+ |
2336 |
+ for case in switch(this_param): |
2337 |
+- #integer values |
2338 |
+- if case("NDIM") : pass |
2339 |
+- #if case("MESH_DEF_LIN_ITER") : pass |
2340 |
+- #if case("MESH_DEF_NONLIN_ITER") : pass |
2341 |
+- if case("RESTART_ITER") : pass |
2342 |
+- if case("NB_EXT_ITER") : pass |
2343 |
+- if case("NB_FSI_ITER") : |
2344 |
+- self._ConfigContent[this_param] = int(this_value) |
2345 |
+- break |
2346 |
++ #integer values |
2347 |
++ if case("NDIM") : pass |
2348 |
++ #if case("MESH_DEF_LIN_ITER") : pass |
2349 |
++ #if case("MESH_DEF_NONLIN_ITER") : pass |
2350 |
++ if case("RESTART_ITER") : pass |
2351 |
++ if case("NB_EXT_ITER") : pass |
2352 |
++ if case("NB_FSI_ITER") : |
2353 |
++ self._ConfigContent[this_param] = int(this_value) |
2354 |
++ break |
2355 |
+ |
2356 |
+- #float values |
2357 |
++ #float values |
2358 |
+ if case("RBF_RADIUS") : pass |
2359 |
+- if case("AITKEN_PARAM") : pass |
2360 |
+- if case("START_TIME") : pass |
2361 |
+- if case("UNST_TIMESTEP") : pass |
2362 |
+- if case("UNST_TIME") : pass |
2363 |
+- if case("FSI_TOLERANCE") : |
2364 |
+- self._ConfigContent[this_param] = float(this_value) |
2365 |
+- break |
2366 |
+- |
2367 |
+- #string values |
2368 |
+- if case("CFD_CONFIG_FILE_NAME") : pass |
2369 |
+- if case("CSD_SOLVER") : pass |
2370 |
+- if case("CSD_CONFIG_FILE_NAME") : pass |
2371 |
+- if case("RESTART_SOL") : pass |
2372 |
+- if case("MATCHING_MESH") : pass |
2373 |
++ if case("AITKEN_PARAM") : pass |
2374 |
++ if case("START_TIME") : pass |
2375 |
++ if case("UNST_TIMESTEP") : pass |
2376 |
++ if case("UNST_TIME") : pass |
2377 |
++ if case("FSI_TOLERANCE") : |
2378 |
++ self._ConfigContent[this_param] = float(this_value) |
2379 |
++ break |
2380 |
++ |
2381 |
++ #string values |
2382 |
++ if case("CFD_CONFIG_FILE_NAME") : pass |
2383 |
++ if case("CSD_SOLVER") : pass |
2384 |
++ if case("CSD_CONFIG_FILE_NAME") : pass |
2385 |
++ if case("RESTART_SOL") : pass |
2386 |
++ if case("MATCHING_MESH") : pass |
2387 |
+ if case("MESH_INTERP_METHOD") : pass |
2388 |
+- if case("DISP_PRED") : pass |
2389 |
+- if case("AITKEN_RELAX") : pass |
2390 |
+- if case("TIME_MARCHING") : pass |
2391 |
+- if case("INTERNAL_FLOW") : |
2392 |
+- #if case("MESH_DEF_METHOD") : pass |
2393 |
+- self._ConfigContent[this_param] = this_value |
2394 |
+- break |
2395 |
+- |
2396 |
+- if case(): |
2397 |
+- print(this_param + " is an invalid option !") |
2398 |
+- break |
2399 |
+- #end for |
2400 |
+- |
2401 |
++ if case("DISP_PRED") : pass |
2402 |
++ if case("AITKEN_RELAX") : pass |
2403 |
++ if case("TIME_MARCHING") : pass |
2404 |
++ if case("INTERNAL_FLOW") : |
2405 |
++ #if case("MESH_DEF_METHOD") : pass |
2406 |
++ self._ConfigContent[this_param] = this_value |
2407 |
++ break |
2408 |
++ |
2409 |
++ if case(): |
2410 |
++ print(this_param + " is an invalid option !") |
2411 |
++ break |
2412 |
++ #end for |
2413 |
++ |
2414 |
+ |
2415 |
+ |
2416 |
+ #def dump() |
2417 |
+diff -Naur old/SU2_PY/SU2/util/filter_adjoint.py new/SU2_PY/SU2/util/filter_adjoint.py |
2418 |
+--- old/SU2_PY/SU2/util/filter_adjoint.py 2020-05-01 19:09:18.000000000 +0300 |
2419 |
++++ new/SU2_PY/SU2/util/filter_adjoint.py 2020-05-10 16:17:07.000000000 +0300 |
2420 |
+@@ -179,7 +179,7 @@ |
2421 |
+ Sens_smoother = smooth( S_clip, Sens_smooth, smth_len , 'blackman' ) |
2422 |
+ Sens_filter = Sens_smooth + (Sens_smooth - Sens_smoother) # sharpener |
2423 |
+ else: |
2424 |
+- raise Exception, 'unknown filter type' |
2425 |
++ raise Exception('unknown filter type') |
2426 |
+ |
2427 |
+ # -------------------------------------------- |
2428 |
+ # PLOTTING |
2429 |
+@@ -472,10 +472,10 @@ |
2430 |
+ """ |
2431 |
+ |
2432 |
+ if x.ndim != 1: |
2433 |
+- raise ValueError, "smooth only accepts 1 dimension arrays." |
2434 |
++ raise ValueError("smooth only accepts 1 dimension arrays.") |
2435 |
+ |
2436 |
+ if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: |
2437 |
+- raise ValueError, "Window is not of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" |
2438 |
++ raise ValueError("Window is not of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") |
2439 |
+ |
2440 |
+ # interpolate to constant time sample width |
2441 |
+ min_dt = np.min( np.diff(t) ) |
2442 |
+diff -Naur old/SU2_PY/compute_uncertainty.py new/SU2_PY/compute_uncertainty.py |
2443 |
+--- old/SU2_PY/compute_uncertainty.py 2020-05-01 19:09:18.000000000 +0300 |
2444 |
++++ new/SU2_PY/compute_uncertainty.py 2020-05-10 16:17:07.000000000 +0300 |
2445 |
+@@ -66,13 +66,13 @@ |
2446 |
+ |
2447 |
+ # perform eigenvalue perturbations |
2448 |
+ for comp in range(1,4): |
2449 |
+- print "\n\n =================== Performing " + str(comp) + " Component Perturbation =================== \n\n" |
2450 |
++ print('\n\n =================== Performing ' + str(comp) + ' Component Perturbation =================== \n\n') |
2451 |
+ |
2452 |
+ # make copies |
2453 |
+ konfig = copy.deepcopy(config) |
2454 |
+ ztate = copy.deepcopy(state) |
2455 |
+ |
2456 |
+- # set componentality |
2457 |
++ # set componentality |
2458 |
+ konfig.UQ_COMPONENT = comp |
2459 |
+ |
2460 |
+ # send output to a folder |
2461 |
+@@ -85,14 +85,14 @@ |
2462 |
+ # run su2 |
2463 |
+ info = SU2.run.CFD(konfig) |
2464 |
+ ztate.update(info) |
2465 |
+- |
2466 |
+- # Solution merging |
2467 |
+- konfig.SOLUTION_FILENAME = konfig.RESTART_FILENAME |
2468 |
+- info = SU2.run.merge(konfig) |
2469 |
+- ztate.update(info) |
2470 |
++ |
2471 |
++ # Solution merging |
2472 |
++ konfig.SOLUTION_FILENAME = konfig.RESTART_FILENAME |
2473 |
++ info = SU2.run.merge(konfig) |
2474 |
++ ztate.update(info) |
2475 |
+ |
2476 |
+ |
2477 |
+- print "\n\n =================== Performing p1c1 Component Perturbation =================== \n\n" |
2478 |
++ print('\n\n =================== Performing p1c1 Component Perturbation =================== \n\n') |
2479 |
+ |
2480 |
+ # make copies |
2481 |
+ konfig = copy.deepcopy(config) |
2482 |
+@@ -118,7 +118,7 @@ |
2483 |
+ info = SU2.run.merge(konfig) |
2484 |
+ state.update(info) |
2485 |
+ |
2486 |
+- print "\n\n =================== Performing p1c2 Component Perturbation =================== \n\n" |
2487 |
++ print('\n\n =================== Performing p1c2 Component Perturbation =================== \n\n') |
2488 |
+ |
2489 |
+ # make copies |
2490 |
+ konfig = copy.deepcopy(config) |
2491 |
+diff -Naur old/SU2_PY/fsi_computation.py new/SU2_PY/fsi_computation.py |
2492 |
+--- old/SU2_PY/fsi_computation.py 2020-05-01 19:09:18.000000000 +0300 |
2493 |
++++ new/SU2_PY/fsi_computation.py 2020-05-10 16:17:07.000000000 +0300 |
2494 |
+@@ -74,9 +74,9 @@ |
2495 |
+ if myid == rootProcess: |
2496 |
+ if os.getcwd() not in sys.path: |
2497 |
+ sys.path.append(os.getcwd()) |
2498 |
+- print("Setting working directory : {}".format(os.getcwd())) |
2499 |
+- else: |
2500 |
+- print("Working directory is set to {}".format(os.getcwd())) |
2501 |
++ print("Setting working directory : {}".format(os.getcwd())) |
2502 |
++ else: |
2503 |
++ print("Working directory is set to {}".format(os.getcwd())) |
2504 |
+ |
2505 |
+ # starts timer |
2506 |
+ start = timer.time() |
2507 |
|
2508 |
diff --git a/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch b/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch |
2509 |
new file mode 100644 |
2510 |
index 0000000..12acdfe |
2511 |
--- /dev/null |
2512 |
+++ b/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch |
2513 |
@@ -0,0 +1,31 @@ |
2514 |
+diff -Naur old_static/externals/tecio/meson.build new_shared/externals/tecio/meson.build |
2515 |
+--- old_static/externals/tecio/meson.build 2020-05-09 16:35:10.000000000 +0300 |
2516 |
++++ new_shared/externals/tecio/meson.build 2020-05-10 11:52:36.000000000 +0300 |
2517 |
+@@ -1,15 +1,15 @@ |
2518 |
+-check_dir = run_command(python, |
2519 |
+- script_path / 'check_dir.py', |
2520 |
+- 'boost') |
2521 |
+-if check_dir.returncode() != 0 |
2522 |
+- message('Extracting boost ...') |
2523 |
+- extract_boost = run_command(python, |
2524 |
+- script_path / 'extract_file.py', |
2525 |
+- 'boost.tar.gz', |
2526 |
+- meson.current_source_dir(), check: true) |
2527 |
+-else |
2528 |
+- message('Boost sources found.') |
2529 |
+-endif |
2530 |
++#check_dir = run_command(python, |
2531 |
++# script_path / 'check_dir.py', |
2532 |
++# 'boost') |
2533 |
++#if check_dir.returncode() != 0 |
2534 |
++# message('Extracting boost ...') |
2535 |
++# extract_boost = run_command(python, |
2536 |
++# script_path / 'extract_file.py', |
2537 |
++# 'boost.tar.gz', |
2538 |
++# meson.current_source_dir(), check: true) |
2539 |
++#else |
2540 |
++# message('Boost sources found.') |
2541 |
++#endif |
2542 |
+ |
2543 |
+ if mpi |
2544 |
+ subdir('teciompisrc') |
2545 |
|
2546 |
diff --git a/sci-physics/SU2/metadata.xml b/sci-physics/SU2/metadata.xml |
2547 |
new file mode 100644 |
2548 |
index 0000000..636aded |
2549 |
--- /dev/null |
2550 |
+++ b/sci-physics/SU2/metadata.xml |
2551 |
@@ -0,0 +1,28 @@ |
2552 |
+<?xml version="1.0" encoding="UTF-8"?> |
2553 |
+<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd"> |
2554 |
+<pkgmetadata> |
2555 |
+ <maintainer type="person"> |
2556 |
+ <email>torokhov-s-a@××××××.ru</email> |
2557 |
+ <name>Sergey Torokhov</name> |
2558 |
+ </maintainer> |
2559 |
+ <use> |
2560 |
+ <flag name="cgns">Build with CGNS support (bundled)</flag> |
2561 |
+ <flag name="mkl">Enable Intel MKL support</flag> |
2562 |
+ <flag name="openblas">Enable OpenBLAS support</flag> |
2563 |
+ <flag name="tecio">Enable TECIO support</flag> |
2564 |
+ <flag name="tutorials">Install Tutorials files</flag> |
2565 |
+ </use> |
2566 |
+ <longdescription> |
2567 |
+ The SU2 suite is an open-source collection of C++ based software tools |
2568 |
+ for performing Partial Differential Equation (PDE) analysis and solving |
2569 |
+ PDE-constrained optimization problems. |
2570 |
+ |
2571 |
+ The toolset is designed with Computational Fluid Dynamics (CFD) |
2572 |
+ and aerodynamic shape optimization in mind, but is extensible |
2573 |
+ to treat arbitrary sets of governing equations such as potential flow, |
2574 |
+ elasticity, electrodynamics, chemically-reacting flows, and many others. |
2575 |
+ </longdescription> |
2576 |
+ <upstream> |
2577 |
+ <remote-id type="github">su2code/SU2</remote-id> |
2578 |
+ </upstream> |
2579 |
+</pkgmetadata> |