mpi4pyExample.py
You can view and download this file on Github: mpi4pyExample.py
1#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2# This is an EXUDYN example
3#
4# Details: This is an example for mpi4py
5#
6# on linux/WSL run with:
7# mpiexec -n 9 python3 -m mpi4py.futures mpi4pyExample.py
8# n represents 8 workers and 1 for running main script
9# on 4core/8threads optimum reached with n=9 (1 core running on 15%, all other cores around 95%)
10#
11# troubleshoot: you need to install mpi4py with conda; if your code starts n times, deinstall
12# all mpi4py versions (also if installed with pip, remove it with python -m pip uninstall)
13# MAY NOT run with virtual environments (best results with conda base, Python 3.9 under linux/WSL)
14#
15# Author: Johannes Gerstmayr
16# Date: 2023-03-17
17#
18# Copyright:This file is part of Exudyn. Exudyn is free software. You can redistribute it and/or modify it under the terms of the Exudyn license. See 'LICENSE.txt' for more details.
19#
20#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
21
22
23import exudyn as exu
24from exudyn.utilities import * #includes itemInterface and rigidBodyUtilities
25import exudyn.graphics as graphics #only import if it does not conflict
26from exudyn.processing import *
27import time
28
29import numpy as np
30import sys
31
32useMPI = True #True requires mpi4py to be installed
33
34
35
36
37#function, which creates and runs model; executed in parallel!
38def TestExudyn(parameterDict):
39
40 #create an environment for mini example
41 SC = exu.SystemContainer()
42 mbs = SC.AddSystem()
43
44 x=1
45 y=1000
46 computationIndex = 0
47 x = parameterDict['mass']
48 y = parameterDict['stiffness']
49
50 oGround=mbs.AddObject(ObjectGround(referencePosition= [0,0,0]))
51 nGround = mbs.AddNode(NodePointGround(referenceCoordinates=[0,0,0]))
52
53 node = mbs.AddNode(Node1D(referenceCoordinates = [0],
54 initialCoordinates=[(x-0.5)**2],
55 initialVelocities=[(y-0.2)**2]))
56 mass = mbs.AddObject(Mass1D(nodeNumber = node, physicsMass=1))
57
58 #assemble and solve system for default parameters
59 mbs.Assemble()
60 #exu.SolveDynamic(mbs, exu.SimulationSettings())
61
62 h=1e-3
63 tEnd = 100 #nominal: 10
64 #tEnd = 1000
65 simulationSettings = exu.SimulationSettings()
66 simulationSettings.timeIntegration.numberOfSteps = int(tEnd/h)
67 simulationSettings.timeIntegration.endTime = tEnd
68 simulationSettings.solutionSettings.writeSolutionToFile = False #no concurrent writing to files ...!
69 #exu.StartRenderer() #don't do this in parallelization: will crash
70 exu.SolveDynamic(mbs, simulationSettings)
71 #exu.StopRenderer() #don't do this in parallelization: will crash
72
73 #check result, get current mass position at local position [0,0,0]
74 result = mbs.GetObjectOutputBody(mass, exu.OutputVariableType.Position, [0,0,0])[0]
75 #print("result ",x, "=",result)
76
77 del mbs #dont forget to delete variables, otherwise memory may leak significantly
78 del SC
79 return result
80 #final x-coordinate of position shall be 2
81
82#now run parallelized parameter variation;
83#make sure that this only runs in main process:
84if __name__ == '__main__':
85 n=640
86 start_time = time.time()
87 print('parameter variation '+'with MPI'*useMPI)
88 [p,v]=ParameterVariation(parameterFunction = TestExudyn,
89 parameters = {'mass':(1.,1.,1), 'stiffness':(1000,2000,n)},
90 # debugMode=True,
91 addComputationIndex = True,
92 useMultiProcessing = True,
93 showProgress = True,
94 #numberOfThreads=8, #automatically determined by mpi4py routines in ParameterVariationList(...)
95 resultsFile='solution/resultsMPI.txt',
96 useMPI = useMPI,
97 )
98 print("--- %s seconds ---" % (time.time() - start_time))
99 #print("values=",v)
100 print('sum=',np.array(v).sum()) #gives sum= 14931163024.24202 with default values
101
102
103# old, manual implementation of parameter variation with mpi
104# if useMPI:
105# import mpi4py
106# from mpi4py import MPI
107#
108# comm = MPI.COMM_WORLD
109# nprocs = comm.Get_size()
110# rank = comm.Get_rank()
111# print('rank=', rank, ', size=', nprocs)
112#
113# from mpi4py.futures import MPIPoolExecutor
114#
115#
116# if __name__ == '__main__' and useMPI:
117# #MPI.Init() # manual initialization of the MPI environment
118# print('mpi4py test program\n')
119# x=[]
120# y=np.arange(1,10)
121# #executor = MPIPoolExecutor(max_workers=8)
122# executor = MPIPoolExecutor()
123# #for result in executor.map(fmpi, [1,2,3,4]):
124# for i in range(n):
125# x+=[{'mass':1,
126# 'stiffness':1000+1000*i/(n-1),
127# 'computationIndex':i}]
128# #print('x=',x)
129# v=[]
130# if False:
131# start_time = time.time()
132# for result in executor.map(TestExudyn, x):
133# v.append(result)
134# print("--- %s seconds ---" % (time.time() - start_time))
135# else:
136# nVariations=n
137# import tqdm #progress bar
138# try: #_instances only available after first run!
139# tqdm.tqdm._instances.clear() #if open instances of tqdm, which leads to nasty newline
140# except:
141# pass
142# useTQDM = True
143
144# start_time = time.time()
145# #for v in (tqdm.tqdm(p.imap(parameterFunction, vInput), total=nVariations)):
146# for result in (tqdm.tqdm(executor.map(TestExudyn, x), total=nVariations)):
147# v.append(result)
148# print("--- %s seconds ---" % (time.time() - start_time))
149
150# #print('rank=',rank)
151# print('sum=',np.array(v).sum())
152# #MPI.Finalize() # manual finalization of the MPI environment