6 INTEGER,
PARAMETER,
PRIVATE :: m0=0, m1=1, n0=0
7 REAL(dp),
ALLOCATABLE,
PRIVATE :: work1(:,:,:), work2(:,:)
8 REAL(dp),
PRIVATE :: cosmux, sinmux
47 SUBROUTINE totzsps_par(rzl_array, r11, ru1, rv1, z11, zu1, zv1,
48 & lu1, lv1, rcn1, zcn1, ier_flag)
49 USE vmec_params,
ONLY: jmin1, jlam, ntmax, rcc, rss, zsc, zcs,
51 USE precon2d,
ONLY: ictrl_prec2d
52 USE parallel_include_module
56 REAL(dp),
DIMENSION(0:ntor,0:mpol1,ns,3*ntmax),
57 &
TARGET,
INTENT(INOUT) :: rzl_array
58 REAL(dp),
DIMENSION(nzeta,ntheta3,ns,0:1),
59 &
INTENT(out) :: r11, ru1,
60 & rv1, z11, zu1, zv1, lu1, lv1, rcn1, zcn1
61 INTEGER,
INTENT(inout) :: ier_flag
65 INTEGER :: n, m, mparity, k, i, l
66 INTEGER :: ioff, joff, mj, ni, nsz
67 INTEGER :: nsmin, nsmax, js
68 REAL(dp),
DIMENSION(:,:,:),
POINTER ::
69 & rmncc, rmnss, zmncs, zmnsc, lmncs, lmnsc
70 REAL(dp) :: tbroadon, tbroadoff
77 rmncc=>rzl_array(:,:,:,rcc)
78 zmnsc=>rzl_array(:,:,:,zsc+ntmax)
79 lmnsc=>rzl_array(:,:,:,zsc+2*ntmax)
81 rmnss=>rzl_array(:,:,:,rss)
82 zmncs=>rzl_array(:,:,:,zcs+ntmax)
83 lmncs=>rzl_array(:,:,:,zcs+2*ntmax)
85 rzl_array(:,m1,1,:) = rzl_array(:,m1,2,:)
87 ioff = lbound(rmncc,1)
88 joff = lbound(rmncc,2)
90 CALL convert_sym_par(rmnss(:,m1+joff,:), zmncs(:,m1+joff,:),
97 IF (lthreed .AND. jlam(m0) .GT. 1)
THEN
98 lmncs(:,m0+joff,1) = lmncs(:,m0+joff,2)
105 #if defined(CHI_FORCE)
106 IF (ncurr .EQ. 1)
THEN
107 IF (ictrl_prec2d .EQ. 2)
THEN
108 lmnsc(n0+ioff,m0+joff,nsmin:nsmax) = chips(nsmin:nsmax)
109 ELSE IF (ictrl_prec2d .NE. 0)
THEN
110 chips(nsmin:nsmax) = lmnsc(n0+ioff,m0+joff,nsmin:nsmax)
115 ALLOCATE (work1(nzeta,12,nsmin:nsmax), stat=i)
117 stop
'Allocation error in VMEC2000 totzsps'
141 work1(k,1,js) = work1(k,1,js)
142 & + rmncc(ni,mj,js)*cosnv(k,n)
143 work1(k,6,js) = work1(k,6,js)
144 & + zmnsc(ni,mj,js)*cosnv(k,n)
145 work1(k,10,js) = work1(k,10,js)
146 & + lmnsc(ni,mj,js)*cosnv(k,n)
148 IF (.NOT.lthreed) cycle
150 work1(k,4,js) = work1(k,4,js)
151 & + rmnss(ni,mj,js)*cosnvn(k,n)
152 work1(k,7,js) = work1(k,7,js)
153 & + zmncs(ni,mj,js)*cosnvn(k,n)
154 work1(k,11,js) = work1(k,11,js)
155 & + lmncs(ni,mj,js)*cosnvn(k,n)
157 work1(k,2,js) = work1(k,2,js)
158 & + rmnss(ni,mj,js)*sinnv(k,n)
159 work1(k,5,js) = work1(k,5,js)
160 & + zmncs(ni,mj,js)*sinnv(k,n)
161 work1(k,9,js) = work1(k,9,js)
162 & + lmncs(ni,mj,js)*sinnv(k,n)
164 work1(k,3,js) = work1(k,3,js)
165 & + rmncc(ni,mj,js)*sinnvn(k,n)
166 work1(k,8,js) = work1(k,8,js)
167 & + zmnsc(ni,mj,js)*sinnvn(k,n)
168 work1(k,12,js) = work1(k,12,js)
169 & + lmnsc(ni,mj,js)*sinnvn(k,n)
178 cosmux = xmpq(m,1)*cosmu(i,m)
179 sinmux = xmpq(m,1)*sinmu(i,m)
181 r11(:,i,js,mparity) = r11(:,i,js,mparity)
182 & + work1(:,1,js)*cosmu(i,m)
183 ru1(:,i,js,mparity) = ru1(:,i,js,mparity)
184 & + work1(:,1,js)*sinmum(i,m)
185 rcn1(:,i,js,mparity) = rcn1(:,i,js,mparity)
186 & + work1(:,1,js)*cosmux
188 z11(:,i,js,mparity) = z11(:,i,js,mparity)
189 & + work1(:,6,js)*sinmu(i,m)
190 zu1(:,i,js,mparity) = zu1(:,i,js,mparity)
191 & + work1(:,6,js)*cosmum(i,m)
192 zcn1(:,i,js,mparity) = zcn1(:,i,js,mparity)
193 & + work1(:,6,js)*sinmux
195 lu1(:,i,js,mparity) = lu1(:,i,js,mparity)
196 & + work1(:,10,js)*cosmum(i,m)
198 IF (.not.lthreed) cycle
200 r11(:,i,js,mparity) = r11(:,i,js,mparity)
201 & + work1(:,2,js)*sinmu(i,m)
202 ru1(:,i,js,mparity) = ru1(:,i,js,mparity)
203 & + work1(:,2,js)*cosmum(i,m)
204 rcn1(:,i,js,mparity) = rcn1(:,i,js,mparity)
205 & + work1(:,2,js)*sinmux
207 rv1(:,i,js,mparity) = rv1(:,i,js,mparity)
208 & + work1(:,3,js)*cosmu(i,m)
209 & + work1(:,4,js)*sinmu(i,m)
210 z11(:,i,js,mparity) = z11(:,i,js,mparity)
211 & + work1(:,5,js)*cosmu(i,m)
213 zu1(:,i,js,mparity) = zu1(:,i,js,mparity)
214 & + work1(:,5,js)*sinmum(i,m)
215 zcn1(:,i,js,mparity) = zcn1(:,i,js,mparity)
216 & + work1(:,5,js)*cosmux
217 zv1(:,i,js,mparity) = zv1(:,i,js,mparity)
218 & + work1(:,7,js)*cosmu(i,m)
219 & + work1(:,8,js)*sinmu(i,m)
221 lu1(:,i,js,mparity) = lu1(:,i,js,mparity)
222 & + work1(:,9,js)*sinmum(i,m)
223 lv1(:,i,js,mparity) = lv1(:,i,js,mparity)
224 & - (work1(:,11,js)*cosmu(i,m)
225 & + work1(:,12,js)*sinmu(i,m))
232 z01(nsmin:nsmax) = zmnsc(n0+ioff,m1+joff,nsmin:nsmax)
233 r01(nsmin:nsmax) = rmncc(n0+ioff,m1+joff,nsmin:nsmax)
235 IF (rank.EQ.0 .AND. r01(1).EQ.zero)
THEN
236 ier_flag = r01_bad_value_flag
237 ELSE IF (rank.EQ.0 .AND. r01(1).NE.zero)
THEN
238 dkappa = z01(1)/r01(1)
240 CALL second0(tbroadon)
241 CALL mpi_bcast(dkappa,1, mpi_real8,0,ns_comm,mpi_err)
242 CALL second0(tbroadoff)
243 broadcast_time = broadcast_time + (tbroadoff - tbroadon)
246 CALL second0(tfftoff)
247 totzsps_time = totzsps_time + (tfftoff - tffton)
248 timer(tfft) = timer(tfft) + (tfftoff - tffton)
250 END SUBROUTINE totzsps_par
287 SUBROUTINE totzspa_par(rzl_array, r11, ru1, rv1, z11, zu1, zv1,
288 1 lu1, lv1, rcn1, zcn1)
289 USE vmec_params,
ONLY: jmin1, jlam, ntmax, rcs, rsc, zcc, zss
290 USE precon2d,
ONLY: ictrl_prec2d
291 USE parallel_include_module
295 REAL(dp),
DIMENSION(0:ntor,0:mpol1,ns,3*ntmax),
296 1
TARGET,
INTENT(inout) :: rzl_array
297 REAL(dp),
DIMENSION(nzeta,ntheta3,ns,0:1),
INTENT(out) ::
298 1 r11, ru1, rv1, z11, zu1, zv1, lu1, lv1, rcn1, zcn1
302 INTEGER :: m, n, mparity, k, i, l, j1
303 INTEGER :: ioff, joff, mj, ni
304 INTEGER :: nsmin, nsmax, js
305 REAL(dp),
DIMENSION(:,:,:),
POINTER ::
306 1 rmncs, rmnsc, zmncc, zmnss, lmncc, lmnss
312 rmnsc => rzl_array(:,:,:,rsc)
313 zmncc => rzl_array(:,:,:,zcc+ntmax)
314 lmncc => rzl_array(:,:,:,zcc+2*ntmax)
316 rmncs => rzl_array(:,:,:,rcs)
317 zmnss => rzl_array(:,:,:,zss+ntmax)
318 lmnss => rzl_array(:,:,:,zss+2*ntmax)
325 ioff = lbound(rmnsc,1)
326 joff = lbound(rmnsc,2)
327 CALL convert_asym_par(rmnsc(:,m1+joff,:), zmncc(:,m1+joff,:),
330 z00b = zmncc(ioff,joff,ns)
332 ALLOCATE (work1(nzeta,12,nsmin:nsmax), stat=i)
334 stop
'Allocation error in VMEC totzspa'
341 IF (jlam(m0) .gt. 1)
THEN
342 lmncc(:,m0+joff,1) = lmncc(:,m0+joff,2)
365 work1(k,1,js) = work1(k,1,js)
366 & + rmnsc(ni,mj,js)*cosnv(k,n)
367 work1(k,6,js) = work1(k,6,js)
368 & + zmncc(ni,mj,js)*cosnv(k,n)
369 work1(k,10,js) = work1(k,10,js)
370 & + lmncc(ni,mj,js)*cosnv(k,n)
372 IF (.NOT.lthreed) cycle
374 work1(k,2,js) = work1(k,2,js)
375 & + rmncs(ni,mj,js)*sinnv(k,n)
376 work1(k,3,js) = work1(k,3,js)
377 & + rmnsc(ni,mj,js)*sinnvn(k,n)
378 work1(k,4,js) = work1(k,4,js)
379 & + rmncs(ni,mj,js)*cosnvn(k,n)
380 work1(k,5,js) = work1(k,5,js)
381 & + zmnss(ni,mj,js)*sinnv(k,n)
382 work1(k,7,js) = work1(k,7,js)
383 & + zmnss(ni,mj,js)*cosnvn(k,n)
384 work1(k,8,js) = work1(k,8,js)
385 & + zmncc(ni,mj,js)*sinnvn(k,n)
386 work1(k,9,js) = work1(k,9,js)
387 & + lmnss(ni,mj,js)*sinnv(k,n)
388 work1(k,11,js) = work1(k,11,js)
389 & + lmnss(ni,mj,js)*cosnvn(k,n)
390 work1(k,12,js) = work1(k,12,js)
391 & + lmncc(ni,mj,js)*sinnvn(k,n)
399 cosmux = xmpq(m,1)*cosmu(i,m)
400 sinmux = xmpq(m,1)*sinmu(i,m)
402 r11(:,i,js,mparity) = r11(:,i,js,mparity)
403 & + work1(:,1,js)*sinmu(i,m)
404 ru1(:,i,js,mparity) = ru1(:,i,js,mparity)
405 & + work1(:,1,js)*cosmum(i,m)
406 z11(:,i,js,mparity) = z11(:,i,js,mparity)
407 & + work1(:,6,js)*cosmu(i,m)
408 zu1(:,i,js,mparity) = zu1(:,i,js,mparity)
409 & + work1(:,6,js)*sinmum(i,m)
410 lu1(:,i,js,mparity) = lu1(:,i,js,mparity)
411 & + work1(:,10,js)*sinmum(i,m)
412 rcn1(:,i,js,mparity) = rcn1(:,i,js,mparity)
413 & + work1(:,1,js)*sinmux
414 zcn1(:,i,js,mparity) = zcn1(:,i,js,mparity)
415 & + work1(:,6,js)*cosmux
417 IF (.not.lthreed) cycle
419 r11(:,i,js,mparity) = r11(:,i,js,mparity)
420 & + work1(:,2,js)*cosmu(i,m)
421 ru1(:,i,js,mparity) = ru1(:,i,js,mparity)
422 & + work1(:,2,js)*sinmum(i,m)
423 z11(:,i,js,mparity) = z11(:,i,js,mparity)
424 & + work1(:,5,js)*sinmu(i,m)
425 zu1(:,i,js,mparity) = zu1(:,i,js,mparity)
426 & + work1(:,5,js)*cosmum(i,m)
427 lu1(:,i,js,mparity) = lu1(:,i,js,mparity)
428 & + work1(:,9,js)*cosmum(i,m)
429 rcn1(:,i,js,mparity) = rcn1(:,i,js,mparity)
430 & + work1(:,2,js)*cosmux
431 zcn1(:,i,js,mparity) = zcn1(:,i,js,mparity)
432 & + work1(:,5,js)*sinmux
433 rv1(:,i,js,mparity) = rv1(:,i,js,mparity)
434 & + work1(:,3,js)*sinmu(i,m)
435 & + work1(:,4,js)*cosmu(i,m)
436 zv1(:,i,js,mparity) = zv1(:,i,js,mparity)
437 & + work1(:,7,js)*sinmu(i,m)
438 & + work1(:,8,js)*cosmu(i,m)
439 lv1(:,i,js,mparity) = lv1(:,i,js,mparity)
440 & - work1(:,11,js)*sinmu(i,m)
441 & - work1(:,12,js)*cosmu(i,m)
448 CALL second0(tfftoff)
449 totzspa_time = totzspa_time + (tfftoff - tffton)
450 timer(tfft) = timer(tfft) + (tfftoff - tffton)
452 END SUBROUTINE totzspa_par
454 SUBROUTINE convert_sym_par(rmnss, zmncs, nsmin, nsmax)
458 INTEGER,
INTENT(IN) :: nsmin, nsmax
459 REAL(dp),
DIMENSION(0:ntor,ns),
INTENT(INOUT) :: rmnss, zmncs
463 REAL(dp),
DIMENSION(0:ntor,nsmin:nsmax) :: temp
470 temp(:,nsmin:nsmax) = rmnss(:,nsmin:nsmax)
471 rmnss(:,nsmin:nsmax) = temp(:,nsmin:nsmax)
472 & + zmncs(:,nsmin:nsmax)
473 zmncs(:,nsmin:nsmax) = temp(:,nsmin:nsmax)
474 & - zmncs(:,nsmin:nsmax)
477 END SUBROUTINE convert_sym_par
479 SUBROUTINE convert_asym_par(rmnsc, zmncc, nsmin, nsmax)
483 INTEGER,
INTENT(IN) :: nsmin, nsmax
484 REAL(dp),
DIMENSION(0:ntor,ns),
INTENT(INOUT) :: rmnsc, zmncc
488 REAL(dp),
DIMENSION(0:ntor,nsmin:nsmax) :: temp
494 temp(:,nsmin:nsmax) = rmnsc(:,nsmin:nsmax)
495 rmnsc(:,nsmin:nsmax) = temp(:,nsmin:nsmax)
496 & + zmncc(:,nsmin:nsmax)
497 zmncc(:,nsmin:nsmax) = temp(:,nsmin:nsmax)
498 & - zmncc(:,nsmin:nsmax)
501 END SUBROUTINE convert_asym_par
503 SUBROUTINE totzsps(rzl_array, r11, ru1, rv1, z11, zu1, zv1,
504 & lu1, lv1, rcn1, zcn1)
505 USE vmec_params,
ONLY: jmin1, jlam, ntmax, rcc, rss, zsc, zcs
506 USE precon2d,
ONLY: ictrl_prec2d
511 REAL(dp),
DIMENSION(ns,0:ntor,0:mpol1,3*ntmax),
512 &
TARGET,
INTENT(INOUT) :: rzl_array
513 REAL(dp),
DIMENSION(ns*nzeta*ntheta3,0:1),
514 &
INTENT(OUT) :: r11, ru1,
515 & rv1, z11, zu1, zv1, lu1, lv1, rcn1, zcn1
519 INTEGER :: n, m, mparity, k, i, j1, l, j1l, nsl
520 INTEGER :: ioff, joff, mj, ni, nsz
521 REAL(dp),
DIMENSION(:,:,:),
POINTER ::
522 & rmncc, rmnss, zmncs, zmnsc, lmncs, lmnsc
534 rmncc => rzl_array(:,:,:,rcc)
535 zmnsc => rzl_array(:,:,:,zsc+ntmax)
536 lmnsc => rzl_array(:,:,:,zsc+2*ntmax)
538 rmnss => rzl_array(:,:,:,rss)
539 zmncs => rzl_array(:,:,:,zcs+ntmax)
540 lmncs => rzl_array(:,:,:,zcs+2*ntmax)
543 ioff = lbound(rmncc,2)
544 joff = lbound(rmncc,3)
547 CALL convert_sym(rmnss(:,:,m1+joff), zmncs(:,:,m1+joff))
561 rzl_array(1,:,m1,:) = rzl_array(2,:,m1,:)
566 IF (lthreed .and. jlam(m0) .gt. 1)
THEN
567 lmncs(1,:,m0+joff) = lmncs(2,:,m0+joff)
573 #if defined(CHI_FORCE)
574 IF (ncurr .EQ. 1)
THEN
575 IF (ictrl_prec2d .EQ. 2)
THEN
576 lmnsc(2:ns,n0+ioff,m0+joff) = chips(2:ns)
577 ELSE IF (ictrl_prec2d .NE. 0)
THEN
578 chips(2:ns) = lmnsc(2:ns,n0+ioff,m0+joff)
583 ALLOCATE (work2(nsz,12), stat=i)
585 stop
'Allocation error in VMEC2000 totzsps'
618 work2(j1l:nsl,1) = work2(j1l:nsl,1)
619 1 + rmncc(j1:ns,ni,mj)*cosnv(k,n)
620 work2(j1l:nsl,6) = work2(j1l:nsl,6)
621 1 + zmnsc(j1:ns,ni,mj)*cosnv(k,n)
622 work2(j1l:nsl,10) = work2(j1l:nsl,10)
623 1 + lmnsc(j1:ns,ni,mj)*cosnv(k,n)
625 IF (.not.lthreed) cycle
627 work2(j1l:nsl,4) = work2(j1l:nsl,4)
628 1 + rmnss(j1:ns,ni,mj)*cosnvn(k,n)
629 work2(j1l:nsl,7) = work2(j1l:nsl,7)
630 1 + zmncs(j1:ns,ni,mj)*cosnvn(k,n)
631 work2(j1l:nsl,11) = work2(j1l:nsl,11)
632 1 + lmncs(j1:ns,ni,mj)*cosnvn(k,n)
634 work2(j1l:nsl,2) = work2(j1l:nsl,2)
635 1 + rmnss(j1:ns,ni,mj)*sinnv(k,n)
636 work2(j1l:nsl,5) = work2(j1l:nsl,5)
637 1 + zmncs(j1:ns,ni,mj)*sinnv(k,n)
638 work2(j1l:nsl,9) = work2(j1l:nsl,9)
639 1 + lmncs(j1:ns,ni,mj)*sinnv(k,n)
641 work2(j1l:nsl,3) = work2(j1l:nsl,3)
642 1 + rmncc(j1:ns,ni,mj)*sinnvn(k,n)
643 work2(j1l:nsl,8) = work2(j1l:nsl,8)
644 1 + zmnsc(j1:ns,ni,mj)*sinnvn(k,n)
645 work2(j1l:nsl,12) = work2(j1l:nsl,12)
646 1 + lmnsc(j1:ns,ni,mj)*sinnvn(k,n)
657 cosmux = xmpq(m,1)*cosmu(i,m)
658 sinmux = xmpq(m,1)*sinmu(i,m)
660 r11(j1l:nsl,mparity) = r11(j1l:nsl,mparity)
661 1 + work2(1:nsz,1)*cosmu(i,m)
662 ru1(j1l:nsl,mparity) = ru1(j1l:nsl,mparity)
663 1 + work2(1:nsz,1)*sinmum(i,m)
665 rcn1(j1l:nsl,mparity) = rcn1(j1l:nsl,mparity)
666 1 + work2(1:nsz,1)*cosmux
669 z11(j1l:nsl,mparity) = z11(j1l:nsl,mparity)
670 1 + work2(1:nsz,6)*sinmu(i,m)
671 zu1(j1l:nsl,mparity) = zu1(j1l:nsl,mparity)
672 1 + work2(1:nsz,6)*cosmum(i,m)
674 zcn1(j1l:nsl,mparity) = zcn1(j1l:nsl,mparity)
675 1 + work2(1:nsz,6)*sinmux
678 lu1(j1l:nsl,mparity) = lu1(j1l:nsl,mparity)
679 1 + work2(1:nsz,10)*cosmum(i,m)
681 IF (.not.lthreed) cycle
683 r11(j1l:nsl,mparity) = r11(j1l:nsl,mparity)
684 1 + work2(1:nsz,2)*sinmu(i,m)
685 ru1(j1l:nsl,mparity) = ru1(j1l:nsl,mparity)
686 1 + work2(1:nsz,2)*cosmum(i,m)
688 rcn1(j1l:nsl,mparity) = rcn1(j1l:nsl,mparity)
689 1 + work2(1:nsz,2)*sinmux
692 rv1(j1l:nsl,mparity) = rv1(j1l:nsl,mparity)
693 1 + work2(1:nsz,3)*cosmu(i,m)
694 1 + work2(1:nsz,4)*sinmu(i,m)
695 z11(j1l:nsl,mparity) = z11(j1l:nsl,mparity)
696 1 + work2(1:nsz,5)*cosmu(i,m)
698 zu1(j1l:nsl,mparity) = zu1(j1l:nsl,mparity)
699 1 + work2(1:nsz,5)*sinmum(i,m)
701 zcn1(j1l:nsl,mparity) = zcn1(j1l:nsl,mparity)
702 1 + work2(1:nsz,5)*cosmux
704 zv1(j1l:nsl,mparity) = zv1(j1l:nsl,mparity)
705 1 + work2(1:nsz,7)*cosmu(i,m)
706 1 + work2(1:nsz,8)*sinmu(i,m)
708 lu1(j1l:nsl,mparity) = lu1(j1l:nsl,mparity)
709 1 + work2(1:nsz,9)*sinmum(i,m)
710 lv1(j1l:nsl,mparity) = lv1(j1l:nsl,mparity)
711 1 - work2(1:nsz,11)*cosmu(i,m)
712 1 - work2(1:nsz,12)*sinmu(i,m)
718 z01(1:ns) = zmnsc(1:ns,n0+ioff,m1+joff)
719 r01(1:ns) = rmncc(1:ns,n0+ioff,m1+joff)
720 IF (r01(1) .eq. zero)
THEN
721 stop
'r01(0) = 0 in totzsps_SPH'
723 dkappa = z01(1)/r01(1)
725 END SUBROUTINE totzsps
727 SUBROUTINE convert_sym(rmnss, zmncs)
731 REAL(dp),
DIMENSION(ns,0:ntor),
INTENT(INOUT) :: rmnss, zmncs
735 REAL(dp),
DIMENSION(ns,0:ntor) :: temp
746 END SUBROUTINE convert_sym
749 SUBROUTINE totzspa(rzl_array, r11, ru1, rv1, z11, zu1, zv1, lu1,
751 USE vmec_params,
ONLY: jmin1, jlam, ntmax, rcs, rsc, zcc, zss
752 USE precon2d,
ONLY: ictrl_prec2d
757 REAL(dp),
DIMENSION(ns,0:ntor,0:mpol1,3*ntmax),
758 1
TARGET,
INTENT(inout) :: rzl_array
759 REAL(dp),
DIMENSION(ns*nzeta,ntheta3,0:1),
INTENT(out) ::
760 1 r11, ru1, rv1, z11, zu1, zv1, lu1, lv1, rcn1, zcn1
764 INTEGER :: m, n, mparity, k, i, l, j1, j1l, nsl
765 INTEGER :: ioff, joff, mj, ni
766 REAL(dp),
DIMENSION(:,:,:),
POINTER ::
767 1 rmncs, rmnsc, zmncc, zmnss, lmncc, lmnss
770 rmnsc => rzl_array(:,:,:,rsc)
771 zmncc => rzl_array(:,:,:,zcc+ntmax)
772 lmncc => rzl_array(:,:,:,zcc+2*ntmax)
774 rmncs => rzl_array(:,:,:,rcs)
775 zmnss => rzl_array(:,:,:,zss+ntmax)
776 lmnss => rzl_array(:,:,:,zss+2*ntmax)
783 ioff = lbound(rmnsc,2)
784 joff = lbound(rmnsc,3)
785 CALL convert_asym(rmnsc(:,:,m1+joff), zmncc(:,:,m1+joff))
787 z00b = zmncc(ns,ioff,joff)
789 IF (jlam(m0) .GT. 1)
THEN
790 lmncc(1,:,m0+joff) = lmncc(2,:,m0+joff)
795 ALLOCATE (work2(ns*nzeta,12), stat=i)
796 IF (i .ne. 0) stop
'Allocation error in VMEC totzspa'
823 work2(j1l:nsl,1) = work2(j1l:nsl,1)
824 1 + rmnsc(j1:ns,ni,mj)*cosnv(k,n)
825 work2(j1l:nsl,6) = work2(j1l:nsl,6)
826 1 + zmncc(j1:ns,ni,mj)*cosnv(k,n)
827 work2(j1l:nsl,10) = work2(j1l:nsl,10)
828 1 + lmncc(j1:ns,ni,mj)*cosnv(k,n)
830 IF (.not.lthreed) cycle
832 work2(j1l:nsl,2) = work2(j1l:nsl,2)
833 1 + rmncs(j1:ns,ni,mj)*sinnv(k,n)
834 work2(j1l:nsl,3) = work2(j1l:nsl,3)
835 1 + rmnsc(j1:ns,ni,mj)*sinnvn(k,n)
836 work2(j1l:nsl,4) = work2(j1l:nsl,4)
837 1 + rmncs(j1:ns,ni,mj)*cosnvn(k,n)
838 work2(j1l:nsl,5) = work2(j1l:nsl,5)
839 1 + zmnss(j1:ns,ni,mj)*sinnv(k,n)
840 work2(j1l:nsl,7) = work2(j1l:nsl,7)
841 1 + zmnss(j1:ns,ni,mj)*cosnvn(k,n)
842 work2(j1l:nsl,8) = work2(j1l:nsl,8)
843 1 + zmncc(j1:ns,ni,mj)*sinnvn(k,n)
844 work2(j1l:nsl,9) = work2(j1l:nsl,9)
845 1 + lmnss(j1:ns,ni,mj)*sinnv(k,n)
846 work2(j1l:nsl,11) = work2(j1l:nsl,11)
847 1 + lmnss(j1:ns,ni,mj)*cosnvn(k,n)
848 work2(j1l:nsl,12) = work2(j1l:nsl,12)
849 1 + lmncc(j1:ns,ni,mj)*sinnvn(k,n)
857 cosmux = xmpq(m,1)*cosmu(i,m)
858 sinmux = xmpq(m,1)*sinmu(i,m)
859 r11(:,i,mparity) = r11(:,i,mparity) + work2(:,1)*sinmu(i,m)
860 ru1(:,i,mparity) = ru1(:,i,mparity) + work2(:,1)*cosmum(i,m)
861 z11(:,i,mparity) = z11(:,i,mparity) + work2(:,6)*cosmu(i,m)
862 zu1(:,i,mparity) = zu1(:,i,mparity) + work2(:,6)*sinmum(i,m)
863 lu1(:,i,mparity) = lu1(:,i,mparity)
864 & + work2(:,10)*sinmum(i,m)
865 rcn1(:,i,mparity) = rcn1(:,i,mparity)
866 & + work2(:,1)*sinmux
867 zcn1(:,i,mparity) = zcn1(:,i,mparity)
868 & + work2(:,6)*cosmux
870 IF (.not.lthreed) cycle
872 r11(:,i,mparity) = r11(:,i,mparity)
873 & + work2(:,2)*cosmu(i,m)
874 ru1(:,i,mparity) = ru1(:,i,mparity)
875 & + work2(:,2)*sinmum(i,m)
876 z11(:,i,mparity) = z11(:,i,mparity)
877 & + work2(:,5)*sinmu(i,m)
878 zu1(:,i,mparity) = zu1(:,i,mparity)
879 & + work2(:,5)*cosmum(i,m)
880 lu1(:,i,mparity) = lu1(:,i,mparity)
881 & + work2(:,9)*cosmum(i,m)
882 rcn1(:,i,mparity) = rcn1(:,i,mparity)
883 & + work2(:,2)*cosmux
884 zcn1(:,i,mparity) = zcn1(:,i,mparity)
885 & + work2(:,5)*sinmux
886 rv1(:,i,mparity) = rv1(:,i,mparity)
887 & + work2(:,3)*sinmu(i,m)
888 & + work2(:,4)*cosmu(i,m)
889 zv1(:,i,mparity) = zv1(:,i,mparity)
890 & + work2(:,7)*sinmu(i,m)
891 & + work2(:,8)*cosmu(i,m)
892 lv1(:,i,mparity) = lv1(:,i,mparity)
893 & - work2(:,11)*sinmu(i,m)
894 & - work2(:,12)*cosmu(i,m)
900 END SUBROUTINE totzspa
903 SUBROUTINE convert_asym(rmnsc, zmncc)
907 REAL(dp),
DIMENSION(ns,0:ntor),
INTENT(INOUT) :: rmnsc, zmncc
911 REAL(dp),
DIMENSION(ns,0:ntor) :: temp
922 END SUBROUTINE convert_asym
924 END MODULE totzsp_mod