Rocstar  1.0
Rocstar multiphysics simulation application
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
v3d10_nl_arruda_boyce_damping.f90
Go to the documentation of this file.
1 !*********************************************************************
2 !* Illinois Open Source License *
3 !* *
4 !* University of Illinois/NCSA *
5 !* Open Source License *
6 !* *
7 !* Copyright@2008, University of Illinois. All rights reserved. *
8 !* *
9 !* Developed by: *
10 !* *
11 !* Center for Simulation of Advanced Rockets *
12 !* *
13 !* University of Illinois *
14 !* *
15 !* www.csar.uiuc.edu *
16 !* *
17 !* Permission is hereby granted, free of charge, to any person *
18 !* obtaining a copy of this software and associated documentation *
19 !* files (the "Software"), to deal with the Software without *
20 !* restriction, including without limitation the rights to use, *
21 !* copy, modify, merge, publish, distribute, sublicense, and/or *
22 !* sell copies of the Software, and to permit persons to whom the *
23 !* Software is furnished to do so, subject to the following *
24 !* conditions: *
25 !* *
26 !* *
27 !* @ Redistributions of source code must retain the above copyright *
28 !* notice, this list of conditions and the following disclaimers. *
29 !* *
30 !* @ Redistributions in binary form must reproduce the above *
31 !* copyright notice, this list of conditions and the following *
32 !* disclaimers in the documentation and/or other materials *
33 !* provided with the distribution. *
34 !* *
35 !* @ Neither the names of the Center for Simulation of Advanced *
36 !* Rockets, the University of Illinois, nor the names of its *
37 !* contributors may be used to endorse or promote products derived *
38 !* from this Software without specific prior written permission. *
39 !* *
40 !* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, *
41 !* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES *
42 !* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND *
43 !* NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR *
44 !* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
45 !* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
46 !* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE *
47 !* USE OR OTHER DEALINGS WITH THE SOFTWARE. *
48 !*********************************************************************
49 !* Please acknowledge The University of Illinois Center for *
50 !* Simulation of Advanced Rockets in works and publications *
51 !* resulting from this software or its derivatives. *
52 !*********************************************************************
53 SUBROUTINE v3d10_nl_arruda_boyce_damping(coor,matcstet,lmcstet,R_in,d, &
54  s11,s22,s33,s12,s23,s13, &
55  numnp,nstart,nend,numcstet,numat_vol, &
56  mu,kappa, rho, cd_fastest, detfold, velo, dt)
57 
58 !________________________________________________________________________
59 !
60 ! V3D4 - Performs displacement based computations for Volumetric 3D,
61 ! 10-node Quadratic Linear Elastic Tetrahedron with quadratic
62 ! interpolation functions. (linear strain tetrahedra).
63 ! Large Deformation. Returns the internal force vector R_in.
64 !
65 ! DATE: 10.2001 AUTHOR: SCOT BREITENFELD
66 !________________________________________________________________________
67 
68  IMPLICIT NONE
69 !-----Global variables
70  INTEGER :: numnp ! number of nodes
71  INTEGER :: numat_vol ! number of volumetric materials
72  INTEGER :: numcstet ! number of LSTets
73 !-- coordinate array
74  REAL*8, DIMENSION(1:3,1:numnp) :: coor
75 !-- internal force
76  REAL*8, DIMENSION(1:3*numnp) :: r_in
77 !-- displacement vector
78  REAL*8, DIMENSION(1:3*numnp) :: d
79 !-- CSTet stress
80  REAL*8, DIMENSION(1:4,1:numcstet) :: s11, s22, s33, s12, s23, s13
81 !-- connectivity table for CSTet
82  INTEGER, DIMENSION(1:10,1:numcstet) :: lmcstet
83 !-- mat number for CSTet element
84  INTEGER, DIMENSION(1:numcstet) :: matcstet
85 !---- Local variables
86 !-- node numbers
87  INTEGER :: n1,n2,n3,n4,n5,n6,n7,n8,n9,n10
88 !-- x, y, and z displacements of nodes
89  REAL*8 :: u1,u2,u3,u4,u5,u6,u7,u8,u9,u10
90  REAL*8 :: v1,v2,v3,v4,v5,v6,v7,v8,v9,v10
91  REAL*8 :: w1,w2,w3,w4,w5,w6,w7,w8,w9,w10
92 !-- 6*volume and the volume
93  REAL*8 :: vx6,vx6inv
94 !-- spacial derivatives
95  REAL*8 :: b1,b2,b3,b4,b5,b6,b7,b8,b9,b10
96  REAL*8 :: b11,b12,b13,b14,b15,b16,b17,b18,b19,b20
97  REAL*8 :: b21,b22,b23,b24,b25,b26,b27,b28,b29,b30
98 !-- partial derivatives of the displacement
99  REAL*8 :: dudx,dvdy,dwdz,dudy,dvdx,dvdz,dwdy,dudz,dwdx
100 !-- strains
101  REAL*8 :: e11,e22,e33,e12,e23,e13
102 !-- coordinate holding variable
103  REAL*8 :: x1,x2,x3,x4,x5,x6,x7,x8,x9,x10
104  REAL*8 :: y1,y2,y3,y4,y5,y6,y7,y8,y9,y10
105  REAL*8 :: z1,z2,z3,z4,z5,z6,z7,z8,z9,z10
106 !-- dummy and counters
107  INTEGER :: i,j,nstart,nend
108  REAL*8 :: aux1,aux2,aux3,aux4,aux5,aux6,aux7,aux8,aux9,aux10,aux11,aux12
109 !-- partial internal force
110  REAL*8 :: r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17,r18
111  REAL*8 :: r19,r20,r21,r22,r23,r24,r25,r26,r27,r28,r29,r30
112  REAL*8 :: g1, g2, g3, g4
113  REAL*8 :: xn1, xn2, xn3, xn4
114 
115  INTEGER :: k1n1,k1n2,k1n3,k1n4,k1n5,k1n6,k1n7,k1n8,k1n9,k1n10
116  INTEGER :: k2n1,k2n2,k2n3,k2n4,k2n5,k2n6,k2n7,k2n8,k2n9,k2n10
117  INTEGER :: k3n1,k3n2,k3n3,k3n4,k3n5,k3n6,k3n7,k3n8,k3n9,k3n10
118 ! --
119  REAL*8 :: f11, f12, f13, f21, f22, f23, f31, f32, f33
120 
121 !-- Coordinate subtractions
122  REAL*8 :: x14, x24, x34, y14, y24, y34, z14, z24, z34
123 !-- Coordinate subtractions: These are to speed up B calculation
124  REAL*8 :: x12, x13, y12, y13, z12, z13
125  REAL*8 :: val11, val21, val31
126  REAL*8, DIMENSION(1:numat_vol) :: mu, kappa, rho
127  real*8 :: cd_fastest
128  real*8 , DIMENSION(1:4, 1:numcstet) :: detfold
129 !-- velocity vector
130  REAL*8, DIMENSION(1:3*numnp) :: velo
131  real*8 :: dt
132 
133  REAL*8 :: strssvisco(3,3)
134 
135 !-- x, y, and z displacements of nodes
136  REAL*8 :: vel_u1,vel_u2,vel_u3,vel_u4,vel_u5,vel_u6,vel_u7,vel_u8,vel_u9,vel_u10
137  REAL*8 :: vel_v1,vel_v2,vel_v3,vel_v4,vel_v5,vel_v6,vel_v7,vel_v8,vel_v9,vel_v10
138  REAL*8 :: vel_w1,vel_w2,vel_w3,vel_w4,vel_w5,vel_w6,vel_w7,vel_w8,vel_w9,vel_w10
139 
140  REAL*8 :: f(3,3), fdot(3,3)
141 
142  REAL*8,DIMENSION(1:4,1:4) :: gaussintpt = reshape( &
143  (/0.58541020d0,0.13819660d0,0.13819660d0,0.13819660d0, &
144  0.13819660d0,0.58541020d0,0.13819660d0,0.13819660d0, &
145  0.13819660d0,0.13819660d0,0.58541020d0,0.13819660d0, &
146  0.13819660d0,0.13819660d0,0.13819660d0,0.58541020d0/),(/4,4/) )
147 
148  INTEGER :: igpt
149 
150  REAL*8, DIMENSION(1:3,1:3) :: fij, cij
151  INTEGER :: kk,ll,mm
152 
153  DO i = nstart, nend
154 
155  j = matcstet(i)
156 
157  n1 = lmcstet(1,i)
158  n2 = lmcstet(2,i)
159  n3 = lmcstet(3,i)
160  n4 = lmcstet(4,i)
161  n5 = lmcstet(5,i)
162  n6 = lmcstet(6,i)
163  n7 = lmcstet(7,i)
164  n8 = lmcstet(8,i)
165  n9 = lmcstet(9,i)
166  n10 = lmcstet(10,i)
167 
168  k3n1 = 3*n1
169  k3n2 = 3*n2
170  k3n3 = 3*n3
171  k3n4 = 3*n4
172  k3n5 = 3*n5
173  k3n6 = 3*n6
174  k3n7 = 3*n7
175  k3n8 = 3*n8
176  k3n9 = 3*n9
177  k3n10 = 3*n10
178 
179  k2n1 = k3n1 - 1
180  k2n2 = k3n2 - 1
181  k2n3 = k3n3 - 1
182  k2n4 = k3n4 - 1
183  k2n5 = k3n5 - 1
184  k2n6 = k3n6 - 1
185  k2n7 = k3n7 - 1
186  k2n8 = k3n8 - 1
187  k2n9 = k3n9 - 1
188  k2n10 = k3n10 - 1
189 
190  k1n1 = k3n1 - 2
191  k1n2 = k3n2 - 2
192  k1n3 = k3n3 - 2
193  k1n4 = k3n4 - 2
194  k1n5 = k3n5 - 2
195  k1n6 = k3n6 - 2
196  k1n7 = k3n7 - 2
197  k1n8 = k3n8 - 2
198  k1n9 = k3n9 - 2
199  k1n10 = k3n10 - 2
200  ! k#n# dummy variables replaces:
201  u1 = d(k1n1) ! (3*n1 -2)
202  u2 = d(k1n2) ! (3*n2 -2)
203  u3 = d(k1n3) ! (3*n3 -2)
204  u4 = d(k1n4) ! (3*n4 -2)
205  u5 = d(k1n5) ! (3*n5 -2)
206  u6 = d(k1n6) ! (3*n6 -2)
207  u7 = d(k1n7) ! (3*n7 -2)
208  u8 = d(k1n8) ! (3*n8 -2)
209  u9 = d(k1n9) ! (3*n9 -2)
210  u10 = d(k1n10) ! (3*n10-2)
211  v1 = d(k2n1) ! (3*n1 -1)
212  v2 = d(k2n2) ! (3*n2 -1)
213  v3 = d(k2n3) ! (3*n3 -1)
214  v4 = d(k2n4) ! (3*n4 -1)
215  v5 = d(k2n5) ! (3*n5 -1)
216  v6 = d(k2n6) ! (3*n6 -1)
217  v7 = d(k2n7) ! (3*n7 -1)
218  v8 = d(k2n8) ! (3*n8 -1)
219  v9 = d(k2n9) ! (3*n9 -1)
220  v10 = d(k2n10) ! (3*n10-1)
221  w1 = d(k3n1) ! (3*n1)
222  w2 = d(k3n2) ! (3*n2)
223  w3 = d(k3n3) ! (3*n3)
224  w4 = d(k3n4) ! (3*n4)
225  w5 = d(k3n5) ! (3*n5)
226  w6 = d(k3n6) ! (3*n6)
227  w7 = d(k3n7) ! (3*n7)
228  w8 = d(k3n8) ! (3*n8)
229  w9 = d(k3n9) ! (3*n9)
230  w10 = d(k3n10) ! (3*n10)
231 
232  vel_u1 = velo(k1n1) ! (3*n1 -2)
233  vel_u2 = velo(k1n2) ! (3*n2 -2)
234  vel_u3 = velo(k1n3) ! (3*n3 -2)
235  vel_u4 = velo(k1n4) ! (3*n4 -2)
236  vel_u5 = velo(k1n5) ! (3*n5 -2)
237  vel_u6 = velo(k1n6) ! (3*n6 -2)
238  vel_u7 = velo(k1n7) ! (3*n7 -2)
239  vel_u8 = velo(k1n8) ! (3*n8 -2)
240  vel_u9 = velo(k1n9) ! (3*n9 -2)
241  vel_u10 = velo(k1n10) ! (3*n10-2)
242  vel_v1 = velo(k2n1) ! (3*n1 -1)
243  vel_v2 = velo(k2n2) ! (3*n2 -1)
244  vel_v3 = velo(k2n3) ! (3*n3 -1)
245  vel_v4 = velo(k2n4) ! (3*n4 -1)
246  vel_v5 = velo(k2n5) ! (3*n5 -1)
247  vel_v6 = velo(k2n6) ! (3*n6 -1)
248  vel_v7 = velo(k2n7) ! (3*n7 -1)
249  vel_v8 = velo(k2n8) ! (3*n8 -1)
250  vel_v9 = velo(k2n9) ! (3*n9 -1)
251  vel_v10 = velo(k2n10) ! (3*n10-1)
252  vel_w1 = velo(k3n1) ! (3*n1)
253  vel_w2 = velo(k3n2) ! (3*n2)
254  vel_w3 = velo(k3n3) ! (3*n3)
255  vel_w4 = velo(k3n4) ! (3*n4)
256  vel_w5 = velo(k3n5) ! (3*n5)
257  vel_w6 = velo(k3n6) ! (3*n6)
258  vel_w7 = velo(k3n7) ! (3*n7)
259  vel_w8 = velo(k3n8) ! (3*n8)
260  vel_w9 = velo(k3n9) ! (3*n9)
261  vel_w10 = velo(k3n10) ! (3*n10)
262 
263  x1 = coor(1,n1)
264  x2 = coor(1,n2)
265  x3 = coor(1,n3)
266  x4 = coor(1,n4)
267  y1 = coor(2,n1)
268  y2 = coor(2,n2)
269  y3 = coor(2,n3)
270  y4 = coor(2,n4)
271  z1 = coor(3,n1)
272  z2 = coor(3,n2)
273  z3 = coor(3,n3)
274  z4 = coor(3,n4)
275 
276  x12 = x1 - x2 ! not used in vol. calc
277  x13 = x1 - x3 ! not used in vol. calc
278  x14 = x1 - x4
279  x24 = x2 - x4
280  x34 = x3 - x4
281  y12 = y1 - y2 ! not used in vol. calc
282  y13 = y1 - y3 ! not used in vol. calc
283  y14 = y1 - y4
284  y24 = y2 - y4
285  y34 = y3 - y4
286  z12 = z1 - z2 ! not used in vol. calc
287  z13 = z1 - z3 ! not used in vol. calc
288  z14 = z1 - z4
289  z24 = z2 - z4
290  z34 = z3 - z4
291 
292  val11 = y24*z34 - z24*y34
293  val21 = -( x24*z34 - z24*x34 )
294  val31 = x24*y34 - y24*x34
295 
296  vx6 = -( x14*val11 + y14*val21 + z14*val31 )
297 
298  vx6inv = 1.d0 / vx6
299 
300  aux1 = -(y3*z4 - y4*z3 - y2*z4 + y2*z3 + z2*y4 - z2*y3)
301  aux2 = (x3*z4 - x4*z3 - x2*z4 + x2*z3 + z2*x4 - z2*x3)
302  aux3 = -(x3*y4 - x4*y3 - x2*y4 + x2*y3 + y2*x4 - y2*x3)
303  aux4 = (y3*z4 - y4*z3 - y1*z4 + y1*z3 + z1*y4 - z1*y3)
304  aux5 = -(x3*z4 - x4*z3 - x1*z4 + x1*z3 + z1*x4 - z1*x3)
305  aux6 = (x3*y4 - x4*y3 - x1*y4 + x1*y3 + y1*x4 - y1*x3)
306  aux7 = -(y2*z4 - z2*y4 - y1*z4 + y1*z2 + z1*y4 - z1*y2)
307  aux8 = (x2*z4 - z2*x4 - x1*z4 + x1*z2 + z1*x4 - z1*x2)
308  aux9 = -(x2*y4 - y2*x4 - x1*y4 + x1*y2 + y1*x4 - y1*x2)
309  aux10 = (y2*z3 - z2*y3 - y1*z3 + y1*z2 + z1*y3 - z1*y2)
310  aux11 =-(x2*z3 - z2*x3 - x1*z3 + x1*z2 + z1*x3 - z1*x2)
311  aux12 = (x2*y3 - y2*x3 - x1*y3 + x1*y2 + y1*x3 - y1*x2)
312 
313  r1 = 0.d0
314  r2 = 0.d0
315  r3 = 0.d0
316  r4 = 0.d0
317  r5 = 0.d0
318  r6 = 0.d0
319  r7 = 0.d0
320  r8 = 0.d0
321  r9 = 0.d0
322  r10 = 0.d0
323  r11 = 0.d0
324  r12 = 0.d0
325  r13 = 0.d0
326  r14 = 0.d0
327  r15 = 0.d0
328  r16 = 0.d0
329  r17 = 0.d0
330  r18 = 0.d0
331  r19 = 0.d0
332  r20 = 0.d0
333  r21 = 0.d0
334  r22 = 0.d0
335  r23 = 0.d0
336  r24 = 0.d0
337  r25 = 0.d0
338  r26 = 0.d0
339  r27 = 0.d0
340  r28 = 0.d0
341  r29 = 0.d0
342  r30 = 0.d0
343 
344  DO igpt = 1, 4
345 
346  g1 = gaussintpt(igpt,1)
347  g2 = gaussintpt(igpt,2)
348  g3 = gaussintpt(igpt,3)
349  g4 = gaussintpt(igpt,4)
350 
351 
352  xn1 = (4.d0*g1-1.d0) ! derivative of shape function
353  xn2 = (4.d0*g2-1.d0) ! dN_i/dzeta_i
354  xn3 = (4.d0*g3-1.d0)
355  xn4 = (4.d0*g4-1.d0)
356 ! xN5 = 4.d0*g1*g2
357 ! xN6 = 4.d0*g2*g3
358 ! xN7 = 4.d0*g3*g1
359 ! xN8 = 4.d0*g1*g4
360 ! xN9 = 4.d0*g2*g4
361 ! xN10= 4.d0*g3*g4
362 
363  b1 = aux1*xn1
364  b2 = aux2*xn1
365  b3 = aux3*xn1
366  b4 = aux4*xn2
367  b5 = aux5*xn2
368  b6 = aux6*xn2
369  b7 = aux7*xn3
370  b8 = aux8*xn3
371  b9 = aux9*xn3
372  b10 = aux10*xn4
373  b11 = aux11*xn4
374  b12 = aux12*xn4
375 
376  b13 = 4.d0*(g2*aux1 + g1*aux4)
377  b14 = 4.d0*(g2*aux2 + g1*aux5)
378  b15 = 4.d0*(g2*aux3 + g1*aux6)
379 
380  b16 = 4.d0*(g3*aux4 + g2*aux7)
381  b17 = 4.d0*(g3*aux5 + g2*aux8)
382  b18 = 4.d0*(g3*aux6 + g2*aux9)
383 
384  b19 = 4.d0*(g1*aux7 + g3*aux1)
385  b20 = 4.d0*(g1*aux8 + g3*aux2)
386  b21 = 4.d0*(g1*aux9 + g3*aux3)
387 
388  b22 = 4.d0*(g4*aux1 + g1*aux10)
389  b23 = 4.d0*(g4*aux2 + g1*aux11)
390  b24 = 4.d0*(g4*aux3 + g1*aux12)
391 
392  b25 = 4.d0*(g4*aux4 + g2*aux10)
393  b26 = 4.d0*(g4*aux5 + g2*aux11)
394  b27 = 4.d0*(g4*aux6 + g2*aux12)
395 
396  b28 = 4.d0*(g4*aux7 + g3*aux10)
397  b29 = 4.d0*(g4*aux8 + g3*aux11)
398  b30 = 4.d0*(g4*aux9 + g3*aux12)
399 
400 
401 !-----Calculate displacement gradient (H)
402  dudx = (b1*u1 + b4*u2 + b7*u3 + b10*u4 + b13*u5 + b16*u6 + b19*u7 + b22*u8 + b25*u9 + b28*u10)*vx6inv
403  dvdy = (b2*v1 + b5*v2 + b8*v3 + b11*v4 + b14*v5 + b17*v6 + b20*v7 + b23*v8 + b26*v9 + b29*v10)*vx6inv
404  dwdz = (b3*w1 + b6*w2 + b9*w3 + b12*w4 + b15*w5 + b18*w6 + b21*w7 + b24*w8 + b27*w9 + b30*w10)*vx6inv
405  dudy = (b2*u1 + b5*u2 + b8*u3 + b11*u4 + b14*u5 + b17*u6 + b20*u7 + b23*u8 + b26*u9 + b29*u10)*vx6inv
406  dvdx = (b1*v1 + b4*v2 + b7*v3 + b10*v4 + b13*v5 + b16*v6 + b19*v7 + b22*v8 + b25*v9 + b28*v10)*vx6inv
407  dvdz = (b3*v1 + b6*v2 + b9*v3 + b12*v4 + b15*v5 + b18*v6 + b21*v7 + b24*v8 + b27*v9 + b30*v10)*vx6inv
408  dwdy = (b2*w1 + b5*w2 + b8*w3 + b11*w4 + b14*w5 + b17*w6 + b20*w7 + b23*w8 + b26*w9 + b29*w10)*vx6inv
409  dudz = (b3*u1 + b6*u2 + b9*u3 + b12*u4 + b15*u5 + b18*u6 + b21*u7 + b24*u8 + b27*u9 + b30*u10)*vx6inv
410  dwdx = (b1*w1 + b4*w2 + b7*w3 + b10*w4 + b13*w5 + b16*w6 + b19*w7 + b22*w8 + b25*w9 + b28*w10)*vx6inv
411 
412 !
413 ! deformation gradients F
414 !
415  f11 = 1.d0 + ( dudx )
416  f22 = 1.d0 + ( dvdy )
417  f33 = 1.d0 + ( dwdz )
418  f12 = dudy
419  f21 = dvdx
420  f23 = dvdz
421  f32 = dwdy
422  f13 = dudz
423  f31 = dwdx
424 
425  f(1,1) = f11
426  f(2,2) = f22
427  f(3,3) = f33
428  f(1,2) = f12
429  f(1,3) = f13
430  f(2,1) = f21
431  f(2,3) = f23
432  f(3,1) = f31
433  f(3,2) = f32
434 
435  fdot(1,1) = 1.d0 + &
436  (b1*vel_u1 + b4*vel_u2 + b7*vel_u3 + b10*vel_u4 + b13*vel_u5 + &
437  b16*vel_u6 + b19*vel_u7 + b22*vel_u8 + b25*vel_u9 + b28*vel_u10)*vx6inv
438  fdot(2,2) = 1.d0 + &
439  (b2*vel_v1 + b5*vel_v2 + b8*vel_v3 + b11*vel_v4 + b14*vel_v5 + &
440  b17*vel_v6 + b20*vel_v7 + b23*vel_v8 + b26*vel_v9 + b29*vel_v10)*vx6inv
441  fdot(3,3) = 1.d0 + &
442  (b3*vel_w1 + b6*vel_w2 + b9*vel_w3 + b12*vel_w4 + b15*vel_w5 + &
443  b18*vel_w6 + b21*vel_w7 + b24*vel_w8 + b27*vel_w9 + b30*vel_w10)*vx6inv
444  fdot(1,2) = (b2*vel_u1 + b5*vel_u2 + b8*vel_u3 + b11*vel_u4 + b14*vel_u5 + &
445  b17*vel_u6 + b20*vel_u7 + b23*vel_u8 + b26*vel_u9 + b29*vel_u10)*vx6inv
446  fdot(2,1) = (b1*vel_v1 + b4*vel_v2 + b7*vel_v3 + b10*vel_v4 + b13*vel_v5 + &
447  b16*vel_v6 + b19*vel_v7 + b22*vel_v8 + b25*vel_v9 + b28*vel_v10)*vx6inv
448  fdot(2,3) = (b3*vel_v1 + b6*vel_v2 + b9*vel_v3 + b12*vel_v4 + b15*vel_v5 + &
449  b18*vel_v6 + b21*vel_v7 + b24*vel_v8 + b27*vel_v9 + b30*vel_v10)*vx6inv
450  fdot(3,2) = (b2*vel_w1 + b5*vel_w2 + b8*vel_w3 + b11*vel_w4 + b14*vel_w5 + &
451  b17*vel_w6 + b20*vel_w7 + b23*vel_w8 + b26*vel_w9 + b29*vel_w10)*vx6inv
452  fdot(1,3) = (b3*vel_u1 + b6*vel_u2 + b9*vel_u3 + b12*vel_u4 + b15*vel_u5 + &
453  b18*vel_u6 + b21*vel_u7 + b24*vel_u8 + b27*vel_u9 + b30*vel_u10)*vx6inv
454  fdot(3,1) = (b1*vel_w1 + b4*vel_w2 + b7*vel_w3 + b10*vel_w4 + b13*vel_w5 + &
455  b16*vel_w6 + b19*vel_w7 + b22*vel_w8 + b25*vel_w9 + b28*vel_w10)*vx6inv
456 
457 
458  CALL artificialdamping(numcstet, numnp, &
459  rho(j), cd_fastest, detfold(igpt,i), dt, f, fdot, vx6, strssvisco)
460 
461 !
462 ! Arruda-Boyce Nonlinear Elasticity Model
463 !
464 ! -- NOTE: ci(7,j) : shear modulus
465 
466 !-- (2) right Cauchy-Green deformation Tensor Cij = Fmi Fmj
467 ! T
468 ! C = F F
469 
470  DO kk=1,3
471  DO ll=1,3
472  cij(kk,ll) = 0.d0
473  DO mm=1,3
474  cij(kk,ll)=cij(kk,ll)+f(mm,kk)*f(mm,ll)
475  ENDDO
476  ENDDO
477  ENDDO
478 
479  CALL arruda_boyce(cij, &
480  s11(igpt,i),s22(igpt,i),s33(igpt,i),s12(igpt,i),s23(igpt,i),s13(igpt,i),i, &
481  mu(j),kappa(j))
482 
483  s11(igpt,i) = s11(igpt,i) + strssvisco(1,1)
484  s22(igpt,i) = s22(igpt,i) + strssvisco(2,2)
485  s33(igpt,i) = s33(igpt,i) + strssvisco(3,3)
486  s12(igpt,i) = s12(igpt,i) + strssvisco(1,2)
487  s13(igpt,i) = s13(igpt,i) + strssvisco(1,3)
488  s23(igpt,i) = s23(igpt,i) + strssvisco(2,3)
489 
490  r1 = r1 + &
491  ( s11(igpt,i)*b1*(1.d0+dudx) + s22(igpt,i)*b2*dudy + s33(igpt,i)*b3*dudz &
492  + s12(igpt,i)*( b2*(1.d0+dudx) + b1*dudy ) &
493  + s23(igpt,i)*( b3*dudy + b2*dudz ) &
494  + s13(igpt,i)*( b3*(1.d0+dudx) + b1*dudz ) )
495  r2 = r2 +&
496  ( s11(igpt,i)*b1*dvdx + s22(igpt,i)*b2*(1.d0+dvdy) + s33(igpt,i)*b3*dvdz &
497  + s12(igpt,i)*( b1*(1.d0+dvdy) + b2*dvdx ) &
498  + s23(igpt,i)*( b3*(1.d0+dvdy) + b2*dvdz ) &
499  + s13(igpt,i)*( b3*dvdx + b1*dvdz ) )
500  r3 = r3 + &
501  ( s11(igpt,i)*b1*dwdx + s22(igpt,i)*b2*dwdy + s33(igpt,i)*b3*(1.d0+dwdz) &
502  + s12(igpt,i)*( b2*dwdx + b1*dwdy ) &
503  + s23(igpt,i)*( b3*dwdy + b2*(1.d0 + dwdz) ) &
504  + s13(igpt,i)*( b3*dwdx + b1*(1.d0 + dwdz) ) )
505 
506  r4 = r4 + &
507  ( s11(igpt,i)*b4*(1.d0+dudx) + s22(igpt,i)*b5*dudy + s33(igpt,i)*b6*dudz &
508  + s12(igpt,i)*( b5*(1.d0+dudx) + b4*dudy ) &
509  + s23(igpt,i)*( b6*dudy + b5*dudz ) &
510  + s13(igpt,i)*( b6*(1.d0+dudx) + b4*dudz ) )
511  r5 = r5 + &
512  ( s11(igpt,i)*b4*dvdx + s22(igpt,i)*b5*(1.d0+dvdy) + s33(igpt,i)*b6*dvdz &
513  + s12(igpt,i)*( b4*(1.d0+dvdy) + b5*dvdx ) &
514  + s23(igpt,i)*( b6*(1.d0+dvdy) + b5*dvdz ) &
515  + s13(igpt,i)*( b6*dvdx + b4*dvdz ) )
516  r6 = r6 + &
517  ( s11(igpt,i)*b4*dwdx + s22(igpt,i)*b5*dwdy + s33(igpt,i)*b6*(1.d0+dwdz) &
518  + s12(igpt,i)*( b5*dwdx + b4*dwdy ) &
519  + s23(igpt,i)*( b6*dwdy + b5*(1.d0 + dwdz) ) &
520  + s13(igpt,i)*( b6*dwdx + b4*(1.d0 + dwdz) ) )
521 
522  r7 = r7 + &
523  ( s11(igpt,i)*b7*(1.d0+dudx) + s22(igpt,i)*b8*dudy + s33(igpt,i)*b9*dudz &
524  + s12(igpt,i)*( b8*(1.d0+dudx) + b7*dudy ) &
525  + s23(igpt,i)*( b9*dudy + b8*dudz ) &
526  + s13(igpt,i)*( b9*(1.d0+dudx) + b7*dudz ) )
527  r8 = r8 + &
528  ( s11(igpt,i)*b7*dvdx + s22(igpt,i)*b8*(1.d0+dvdy) + s33(igpt,i)*b9*dvdz &
529  + s12(igpt,i)*( b7*(1.d0+dvdy) + b8*dvdx ) &
530  + s23(igpt,i)*( b9*(1.d0+dvdy) + b8*dvdz ) &
531  + s13(igpt,i)*( b9*dvdx + b7*dvdz ) )
532  r9 = r9 + &
533  ( s11(igpt,i)*b7*dwdx + s22(igpt,i)*b8*dwdy + s33(igpt,i)*b9*(1.d0+dwdz) &
534  + s12(igpt,i)*( b8*dwdx + b7*dwdy ) &
535  + s23(igpt,i)*( b9*dwdy + b8*(1.d0 + dwdz) ) &
536  + s13(igpt,i)*( b9*dwdx + b7*(1.d0 + dwdz) ) )
537 
538  r10 = r10 + &
539  ( s11(igpt,i)*b10*(1.d0+dudx) + s22(igpt,i)*b11*dudy+s33(igpt,i)*b12*dudz &
540  + s12(igpt,i)*( b11*(1.d0+dudx) + b10*dudy ) &
541  + s23(igpt,i)*( b12*dudy + b11*dudz ) &
542  + s13(igpt,i)*( b12*(1.d0+dudx) + b10*dudz ) )
543  r11 = r11 + &
544  ( s11(igpt,i)*b10*dvdx + s22(igpt,i)*b11*(1.d0+dvdy)+s33(igpt,i)*b12*dvdz &
545  + s12(igpt,i)*( b10*(1.d0+dvdy) + b11*dvdx ) &
546  + s23(igpt,i)*( b12*(1.d0+dvdy) + b11*dvdz ) &
547  + s13(igpt,i)*( b12*dvdx + b10*dvdz ) )
548  r12 = r12 + &
549  ( s11(igpt,i)*b10*dwdx + s22(igpt,i)*b11*dwdy+s33(igpt,i)*b12*(1.d0+dwdz) &
550  + s12(igpt,i)*( b11*dwdx + b10*dwdy ) &
551  + s23(igpt,i)*( b12*dwdy + b11*(1.d0 + dwdz) ) &
552  + s13(igpt,i)*( b12*dwdx + b10*(1.d0 + dwdz) ) )
553 
554  r13 = r13 + &
555  ( s11(igpt,i)*b13*(1.d0+dudx) + s22(igpt,i)*b14*dudy + s33(igpt,i)*b15*dudz &
556  + s12(igpt,i)*( b14*(1.d0+dudx) + b13*dudy ) &
557  + s23(igpt,i)*( b15*dudy + b14*dudz ) &
558  + s13(igpt,i)*( b15*(1.d0+dudx) + b13*dudz ) )
559  r14 = r14 + &
560  ( s11(igpt,i)*b13*dvdx + s22(igpt,i)*b14*(1.d0+dvdy) + s33(igpt,i)*b15*dvdz &
561  + s12(igpt,i)*( b13*(1.d0+dvdy) + b14*dvdx ) &
562  + s23(igpt,i)*( b15*(1.d0+dvdy) + b14*dvdz ) &
563  + s13(igpt,i)*( b15*dvdx + b13*dvdz ) )
564  r15 = r15 + &
565  ( s11(igpt,i)*b13*dwdx + s22(igpt,i)*b14*dwdy + s33(igpt,i)*b15*(1.d0+dwdz) &
566  + s12(igpt,i)*( b14*dwdx + b13*dwdy ) &
567  + s23(igpt,i)*( b15*dwdy + b14*(1.d0 + dwdz) ) &
568  + s13(igpt,i)*( b15*dwdx + b13*(1.d0 + dwdz) ) )
569 
570  r16 = r16 + &
571  ( s11(igpt,i)*b16*(1.d0+dudx) + s22(igpt,i)*b17*dudy + s33(igpt,i)*b18*dudz &
572  + s12(igpt,i)*( b17*(1.d0+dudx) + b16*dudy ) &
573  + s23(igpt,i)*( b18*dudy + b17*dudz ) &
574  + s13(igpt,i)*( b18*(1.d0+dudx) + b16*dudz ) )
575  r17 = r17 + &
576  ( s11(igpt,i)*b16*dvdx + s22(igpt,i)*b17*(1.d0+dvdy) + s33(igpt,i)*b18*dvdz &
577  + s12(igpt,i)*( b16*(1.d0+dvdy) + b17*dvdx ) &
578  + s23(igpt,i)*( b18*(1.d0+dvdy) + b17*dvdz ) &
579  + s13(igpt,i)*( b18*dvdx + b16*dvdz ) )
580  r18 = r18 + &
581  ( s11(igpt,i)*b16*dwdx + s22(igpt,i)*b17*dwdy + s33(igpt,i)*b18*(1.d0+dwdz) &
582  + s12(igpt,i)*( b17*dwdx + b16*dwdy ) &
583  + s23(igpt,i)*( b18*dwdy + b17*(1.d0 + dwdz) ) &
584  + s13(igpt,i)*( b18*dwdx + b16*(1.d0 + dwdz) ) )
585 
586  r19 = r19 + &
587  ( s11(igpt,i)*b19*(1.d0+dudx) + s22(igpt,i)*b20*dudy + s33(igpt,i)*b21*dudz &
588  + s12(igpt,i)*( b20*(1.d0+dudx) + b19*dudy ) &
589  + s23(igpt,i)*( b21*dudy + b20*dudz ) &
590  + s13(igpt,i)*( b21*(1.d0+dudx) + b19*dudz ) )
591  r20 = r20 + &
592  ( s11(igpt,i)*b19*dvdx + s22(igpt,i)*b20*(1.d0+dvdy) + s33(igpt,i)*b21*dvdz &
593  + s12(igpt,i)*( b19*(1.d0+dvdy) + b20*dvdx ) &
594  + s23(igpt,i)*( b21*(1.d0+dvdy) + b20*dvdz ) &
595  + s13(igpt,i)*( b21*dvdx + b19*dvdz ) )
596  r21 = r21 + &
597  ( s11(igpt,i)*b19*dwdx + s22(igpt,i)*b20*dwdy + s33(igpt,i)*b21*(1.d0+dwdz) &
598  + s12(igpt,i)*( b20*dwdx + b19*dwdy ) &
599  + s23(igpt,i)*( b21*dwdy + b20*(1.d0 + dwdz) ) &
600  + s13(igpt,i)*( b21*dwdx + b19*(1.d0 + dwdz) ) )
601 
602  r22 = r22 + &
603  ( s11(igpt,i)*b22*(1.d0+dudx) + s22(igpt,i)*b23*dudy+s33(igpt,i)*b24*dudz &
604  + s12(igpt,i)*( b23*(1.d0+dudx) + b22*dudy ) &
605  + s23(igpt,i)*( b24*dudy + b23*dudz ) &
606  + s13(igpt,i)*( b24*(1.d0+dudx) + b22*dudz ) )
607  r23 = r23 + &
608  ( s11(igpt,i)*b22*dvdx + s22(igpt,i)*b23*(1.d0+dvdy)+s33(igpt,i)*b24*dvdz &
609  + s12(igpt,i)*( b22*(1.d0+dvdy) + b23*dvdx ) &
610  + s23(igpt,i)*( b24*(1.d0+dvdy) + b23*dvdz ) &
611  + s13(igpt,i)*( b24*dvdx + b22*dvdz ) )
612  r24 = r24 + &
613  ( s11(igpt,i)*b22*dwdx + s22(igpt,i)*b23*dwdy+s33(igpt,i)*b24*(1.d0+dwdz) &
614  + s12(igpt,i)*( b23*dwdx + b22*dwdy ) &
615  + s23(igpt,i)*( b24*dwdy + b23*(1.d0 + dwdz) ) &
616  + s13(igpt,i)*( b24*dwdx + b22*(1.d0 + dwdz) ) )
617 
618  r25 = r25 + &
619  ( s11(igpt,i)*b25*(1.d0+dudx) + s22(igpt,i)*b26*dudy+s33(igpt,i)*b27*dudz &
620  + s12(igpt,i)*( b26*(1.d0+dudx) + b25*dudy ) &
621  + s23(igpt,i)*( b27*dudy + b26*dudz ) &
622  + s13(igpt,i)*( b27*(1.d0+dudx) + b25*dudz ) )
623  r26 = r26 + &
624  ( s11(igpt,i)*b25*dvdx + s22(igpt,i)*b26*(1.d0+dvdy)+s33(igpt,i)*b27*dvdz &
625  + s12(igpt,i)*( b25*(1.d0+dvdy) + b26*dvdx ) &
626  + s23(igpt,i)*( b27*(1.d0+dvdy) + b26*dvdz ) &
627  + s13(igpt,i)*( b27*dvdx + b25*dvdz ) )
628  r27 = r27 + &
629  ( s11(igpt,i)*b25*dwdx + s22(igpt,i)*b26*dwdy+s33(igpt,i)*b27*(1.d0+dwdz) &
630  + s12(igpt,i)*( b26*dwdx + b25*dwdy ) &
631  + s23(igpt,i)*( b27*dwdy + b26*(1.d0 + dwdz) ) &
632  + s13(igpt,i)*( b27*dwdx + b25*(1.d0 + dwdz) ) )
633 
634  r28 = r28 + &
635  ( s11(igpt,i)*b28*(1.d0+dudx) + s22(igpt,i)*b29*dudy+s33(igpt,i)*b30*dudz &
636  + s12(igpt,i)*( b29*(1.d0+dudx) + b28*dudy ) &
637  + s23(igpt,i)*( b30*dudy + b29*dudz ) &
638  + s13(igpt,i)*( b30*(1.d0+dudx) + b28*dudz ) )
639  r29 = r29 + &
640  ( s11(igpt,i)*b28*dvdx + s22(igpt,i)*b29*(1.d0+dvdy)+s33(igpt,i)*b30*dvdz &
641  + s12(igpt,i)*( b28*(1.d0+dvdy) + b29*dvdx ) &
642  + s23(igpt,i)*( b30*(1.d0+dvdy) + b29*dvdz ) &
643  + s13(igpt,i)*( b30*dvdx + b28*dvdz ) )
644  r30 = r30 + &
645  ( s11(igpt,i)*b28*dwdx + s22(igpt,i)*b29*dwdy+s33(igpt,i)*b30*(1.d0+dwdz) &
646  + s12(igpt,i)*( b29*dwdx + b28*dwdy ) &
647  + s23(igpt,i)*( b30*dwdy + b29*(1.d0 + dwdz) ) &
648  + s13(igpt,i)*( b30*dwdx + b28*(1.d0 + dwdz) ) )
649 
650  ENDDO
651 
652 !Wi (i.e. weight) for 4 guass point integration is 1/4
653 
654 ! Wi * 1/6 because the volume of a reference tetrahedra in
655 ! volume coordinates is 1/6
656 
657 ! ASSEMBLE THE INTERNAL FORCE VECTOR
658 !
659 ! local node 1
660  r_in(k1n1) = r_in(k1n1) - r1*0.04166666666666667d0
661  r_in(k2n1) = r_in(k2n1) - r2*0.04166666666666667d0
662  r_in(k3n1) = r_in(k3n1) - r3*0.04166666666666667d0
663 ! local node 2
664  r_in(k1n2) = r_in(k1n2) - r4*0.04166666666666667d0
665  r_in(k2n2) = r_in(k2n2) - r5*0.04166666666666667d0
666  r_in(k3n2) = r_in(k3n2) - r6*0.04166666666666667d0
667 ! local node 3
668  r_in(k1n3) = r_in(k1n3) - r7*0.04166666666666667d0
669  r_in(k2n3) = r_in(k2n3) - r8*0.04166666666666667d0
670  r_in(k3n3) = r_in(k3n3) - r9*0.04166666666666667d0
671 ! local node 4
672  r_in(k1n4) = r_in(k1n4) - r10*0.04166666666666667d0
673  r_in(k2n4) = r_in(k2n4) - r11*0.04166666666666667d0
674  r_in(k3n4) = r_in(k3n4) - r12*0.04166666666666667d0
675 ! local node 5
676  r_in(k1n5) = r_in(k1n5) - r13*0.04166666666666667d0
677  r_in(k2n5) = r_in(k2n5) - r14*0.04166666666666667d0
678  r_in(k3n5) = r_in(k3n5) - r15*0.04166666666666667d0
679 ! local node 6
680  r_in(k1n6) = r_in(k1n6) - r16*0.04166666666666667d0
681  r_in(k2n6) = r_in(k2n6) - r17*0.04166666666666667d0
682  r_in(k3n6) = r_in(k3n6) - r18*0.04166666666666667d0
683 ! local node 7
684  r_in(k1n7) = r_in(k1n7) - r19*0.04166666666666667d0
685  r_in(k2n7) = r_in(k2n7) - r20*0.04166666666666667d0
686  r_in(k3n7) = r_in(k3n7) - r21*0.04166666666666667d0
687 ! local node 8
688  r_in(k1n8) = r_in(k1n8) - r22*0.04166666666666667d0
689  r_in(k2n8) = r_in(k2n8) - r23*0.04166666666666667d0
690  r_in(k3n8) = r_in(k3n8) - r24*0.04166666666666667d0
691 ! local node 9
692  r_in(k1n9) = r_in(k1n9) - r25*0.04166666666666667d0
693  r_in(k2n9) = r_in(k2n9) - r26*0.04166666666666667d0
694  r_in(k3n9) = r_in(k3n9) - r27*0.04166666666666667d0
695 ! local node 10
696  r_in(k1n10) = r_in(k1n10) - r28*0.04166666666666667d0
697  r_in(k2n10) = r_in(k2n10) - r29*0.04166666666666667d0
698  r_in(k3n10) = r_in(k3n10) - r30*0.04166666666666667d0
699 
700  ENDDO
701  RETURN
702 END SUBROUTINE v3d10_nl_arruda_boyce_damping
703 
subroutine artificialdamping(numcstet, numnp, rho, cd_fastest, DetFold, dt, F, Fdot, Vx6, StrssVisco)
const NT & d
blockLoc i
Definition: read.cpp:79
j indices j
Definition: Indexing.h:6
subroutine arruda_boyce(Cij, S11, S22, S33, S12, S23, S13, ielem, mu, kappa)
subroutine v3d10_nl_arruda_boyce_damping(coor, matcstet, lmcstet, R_in, d, S11, S22, S33, S12, S23, S13, numnp, nstart, nend, numcstet, numat_vol, mu, kappa, rho, cd_fastest, DetFold, velo, dt)